PIL Image fails to plot the blank image from ndarray - python

I want to create a blank image and first try converting a ndarray to PIL Image, however, generating the wrong image. Why does this strange thing happen?
from PIL import Image
import numpy as np
x = np.ones((256, 256)).astype(bool)
img = Image.new('1', (256, 256), 1)
y = np.asarray(img)
print((x == y).all())
# x equals to y, but ploting different pictures, why?
img1 = Image.fromarray(x, mode='1')
img1.show()
img2 = Image.fromarray(y, mode='1')
img2.show()

Change the mode to 8 bit pixels not only in Image.new but also in Image.fromarray:
from PIL import Image
import numpy as np
x = np.ones((256, 256)).astype(bool)
img = Image.new('L', (256, 256), 1)
y = np.asarray(img)
img1 = Image.fromarray(x, mode='L')
img1.show()
img2 = Image.fromarray(y, mode='L')
img2.show()

Related

Python making Hologram pyramid

I am studying on hologram vision. I want to placed 4 images onto black screen.
I wrote this code:
import numpy as np
import cv2
from screeninfo import get_monitors
if __name__ == '__main__':
screen = get_monitors()[0]
print(screen)
width, height = screen.width, screen.height
image = np.zeros((height, width, 3), dtype=np.float64)
image[:, :] = 0 # black screen
img = cv2.imread("newBen.png")
p = 0.25
w = int(img.shape[1])
h = int(img.shape[0])
new_img = cv2.resize(img, (w, h))
image[:h, :w] = new_img
window_name = 'projector'
cv2.namedWindow(window_name, cv2.WND_PROP_FULLSCREEN)
cv2.moveWindow(window_name, screen.x - 1, screen.y - 1)
cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN,
cv2.WINDOW_FULLSCREEN)
cv2.imshow(window_name, image)
cv2.waitKey()
cv2.destroyAllWindows()
But my image looking like this.
How can ı fix it?
The dtype of a normal RGB image is uint8, not float64.
image = np.zeros((height, width, 3), dtype=np.uint8)
Btw: You don't have to set image[:, :] = 0 # black screen. This is already been done by np.zeros.

make all images' shape the same in python

I wanted to utilize some images using CNN classification.
however, the problem is the image shape is different for example
for i in range(1,len(x_train)):
print(print(x_train_resize[i].shape))
this shows the images' shapes for all the images that i am using this gives output of
None
(100, 100)
None
(100, 100)
None
(100, 100, 3)
None
(100, 100, 4)
as shown above, is there a way to make the shapes of the images that i have all the same as
(100, 100, 1) or (100, 100, 3)
(100, 100) means grayscale image.
(100, 100, 3) means RGB image.
(100, 100, 4) means RGBA image.
If you have numpy grayscale image img_gray with shape (100,100) then you can duplicate layers to create (100, 100, 3) like in RGB
img_rgb = np.dstack((img_gray, img_gray, img_gray))
If you add alpha layer with values 255 then you get (100, 100, 4) like in RGBA
alpha = np.ones((100, 100), dtype='uint8') * 255
img_rgba = np.dstack((img_rgb, alpha))
If you have img_rgba with (100, 100, 4) then you can skip alpha layer to get img_rgb
img_rgb = img_rgba[:,:,:3]
to convert rgb to grayscale you could calculate
img_gray = (img_rgb[:,:,0] + img_rgb[:,:,1] + img_rgb[:,:,2]) // 3
but better is formula GRAY = 0.2126 * R + 0.7152 * G + 0.0722 * B
img_gray = int(0.2126 * img_rgb[:,:,0] + 0.7152 * img_rgb[:,:,1] + 0.0722 * img_rgb[:,:,2])
Wikipedia: Converting colour to greyscale
If you use OpenCV which also use numpy arrays then it has function to convert colors.
gray to RGB
img_rgb = cv2.cvtColor(img_gray, cv2.COLOR_GRAY2RGB)
gray to RBGA
img_rgba = cv2.cvtColor(img_gray, cv2.COLOR_GRAY2RGBA)
RGB to RBGA
img_rgba = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2RGBA)
and in other direction
RGB to gray
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2GRAY)
RBGA to gray
img_gray = cv2.cvtColor(img_rgba, cv2.COLOR_RGBA2GRAY)
RGBA to RBG
img_rgb = cv2.cvtColor(img_rgba, cv2.COLOR_RGBA2RGB)
You could also use pillow Image.convert but it needs to
convert numpy array to pillow Image - img = Image.fromarray(array),
convert color - img = img.convert(...),
convert back pillow Image to numpy array - array = np.asarray(img).
Doc: Image.fromarray()
EDIT:
Minimal working example
import numpy as np
img_gray = np.zeros((100, 100), dtype='uint8')
# create image with cross
for y in range(100):
img_gray[y,y] = int(255 * (y/100))
img_gray[y,99-y] = int(255 * (y/100))
print('img_gray.shape:', img_gray.shape) # (100, 100)
img_rgb = np.dstack((img_gray, img_gray, img_gray))
print('img_rgb.shape:', img_rgb.shape) # (100, 100, 3)
alpha = np.ones((100, 100), dtype='uint8') * 255
img_rgba = np.dstack((img_rgb, alpha))
print('img_rgba.shape:', img_rgba.shape)
import matplotlib.pyplot as plt
plt.imshow(img_gray)
plt.show()
plt.imshow(img_rgb)
plt.show()
plt.imshow(img_rgba)
plt.show()
# --- OpenCV ---
import cv2
img_cv2_rgb = cv2.cvtColor(img_gray, cv2.COLOR_GRAY2RGB)
print('img_cv2_rgb.shape:', img_cv2_rgb.shape)
img_cv2_rgba = cv2.cvtColor(img_gray, cv2.COLOR_GRAY2RGBA)
print('img_cv2_rgba.shape:', img_cv2_rgba.shape)
img_cv2_rgba2 = cv2.cvtColor(img_cv2_rgb, cv2.COLOR_RGB2RGBA)
print('img_cv2_rgba2.shape:', img_cv2_rgba2.shape)
cv2.imshow('gray', img_gray)
cv2.imshow('rgb', img_cv2_rgb)
cv2.imshow('rgba', img_cv2_rgba)
cv2.waitKey(0)
cv2.destroyAllWindows()

How to save multiple images in one folder by using open cv?

Here is the code that i'm trying to modify. To read and save all the images at once in one folder, however I got error when I tried to save it
import cv2
import glob
import numpy as np
#empty lists
image_list=[]
images = []
for img in glob.glob(r"C:\Users\user\Documents\Dataset\Test\Abnormal_Resize\Abnormal_Crop\*.png"):
n= cv2.imread(img)
images.append(n)
print (img)
#limit list to 236 elements
image_list = image_list[:100]
#Resizing the image for compatibility
for img in image_list:
image = cv2.resize(image, (500, 600))
#The initial processing of the image
#image = cv2.medianBlur(image, 3)
image_bw = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#The declaration of CLAHE
#clipLimit -> Threshold for contrast limiting
clahe = cv2.createCLAHE(clipLimit = 5)
final_img = clahe.apply(image_bw)
cv2.imwrite(path+r"C:\Users\user\Documents\Dataset\Test\Abnormal_Resize\Abnormal_Crop\Abnormal_Cntrst\contrast_"+str(i)+".png", final_img)
It seems like there are multiple issues with this code.
i is not defined.
images list has all the images appended and while processing you are making use of the empty list variable image_list
You are probably looking for a solution like this.
import cv2
import glob
import numpy as np
input_path = r"<your_input_folder_path>/*.png"
# make sure below folder already exists
out_path = '<your_output_folder_path>/'
image_paths = list(glob.glob(input_path))
for i, img in enumerate(image_paths):
image = cv2.imread(img)
image = cv2.resize(image, (500, 600))
#The initial processing of the image
#image = cv2.medianBlur(image, 3)
image_bw = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#The declaration of CLAHE
#clipLimit -> Threshold for contrast limiting
clahe = cv2.createCLAHE(clipLimit = 5)
final_img = clahe.apply(image_bw)
cv2.imwrite(out_path + f'{str(i)}.png', final_img)

How do I show an image made from a pixel map?

Ok, so I tried to edit an image and turn the edited result into a pixel map.
That worked well, but I just don't know how to turn that pixel map into an actual image and show it.
This is my code:
from PIL import Image
import numpy as np
img = Image.open('sample.jpg')
pixels = img.load()
for i in range(img.size[0]): # for every pixel:
for j in range(img.size[1]):
if pixels[i,j] == (255, 0, 0):
pixels[i,j] = (0, 0 ,0)
im2 = Image.fromarray(pixels)
im2.show()
Also, I'm getting this error message:
Traceback (most recent call last):
File "C:\Users\Haris.Sabotic\Desktop\Image Color Inverter\code.py", line 15, in <module>
im2 = Image.fromarray(pixels)
File "C:\Users\Haris.Sabotic\AppData\Local\Programs\Python\Python37-32\lib\site-packages\PIL\Image.py", line 2508, in fromarray
arr = obj.__array_interface__
AttributeError: 'PixelAccess' object has no attribute '__array_interface__'
You don't have to convert pixels back to image because pixels changes values in original img - so you have to display img.show()
from PIL import Image
img = Image.open('sample.jpg')
pixels = img.load()
width, height = img.size
for col in range(width):
for row in range(height):
if pixels[col,row] == (255, 0, 0):
pixels[col,row] = (0, 0 ,0)
img.show()
EDIT: You need fromarray() when you convert to numpy array
from PIL import Image
import numpy as np
img1 = Image.open('sample.jpg')
pixels = np.array(img1)
pixels[np.all(pixels == (255, 0, 0), axis=-1)] = (0,0,0)
img2 = Image.fromarray(pixels)
img2.show()
BTW: if you have to get pixel in array then remeber that array uses [row,col] instead of [col,row]

Image standardisation and normalisation using Numpy and OpenCV

I am trying to standardize and then normalise an image using Numpy and OpenCV in the following manner; however, the image that's output from matplotlib looks identical. Why is that?
Code
%matplotlib inline
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
IMG_SIZE = 256
def show_img(img):
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
imgplot = plt.imshow(img_rgb)
img = cv2.imread('/content/drive/My Drive/ai/test_images/test_image3.tif')
img = cv2.resize(img, (IMG_SIZE, IMG_SIZE))
show_img(img)
img = img.astype('float32')
img = (img - img.mean(axis=(0, 1, 2), keepdims=True)) / img.std(axis=(0, 1, 2), keepdims=True)
img = cv2.normalize(img, None, 0, 1, cv2.NORM_MINMAX)
show_img(img)
It is a normal behaviour. Normalizing an image means to change its ranges from 0-255 to 0-1. Matplotlib checks the range of the RGB values and display the image accordingly.
As for standardisation, if you look closely you can see a color shift. Orange seems a little lighter on the second image.
These behaviours are normal because preprocessing should not change the image (at least the information inside) but should help the model to processed them ALL.

Categories