How do I show an image made from a pixel map? - python

Ok, so I tried to edit an image and turn the edited result into a pixel map.
That worked well, but I just don't know how to turn that pixel map into an actual image and show it.
This is my code:
from PIL import Image
import numpy as np
img = Image.open('sample.jpg')
pixels = img.load()
for i in range(img.size[0]): # for every pixel:
for j in range(img.size[1]):
if pixels[i,j] == (255, 0, 0):
pixels[i,j] = (0, 0 ,0)
im2 = Image.fromarray(pixels)
im2.show()
Also, I'm getting this error message:
Traceback (most recent call last):
File "C:\Users\Haris.Sabotic\Desktop\Image Color Inverter\code.py", line 15, in <module>
im2 = Image.fromarray(pixels)
File "C:\Users\Haris.Sabotic\AppData\Local\Programs\Python\Python37-32\lib\site-packages\PIL\Image.py", line 2508, in fromarray
arr = obj.__array_interface__
AttributeError: 'PixelAccess' object has no attribute '__array_interface__'

You don't have to convert pixels back to image because pixels changes values in original img - so you have to display img.show()
from PIL import Image
img = Image.open('sample.jpg')
pixels = img.load()
width, height = img.size
for col in range(width):
for row in range(height):
if pixels[col,row] == (255, 0, 0):
pixels[col,row] = (0, 0 ,0)
img.show()
EDIT: You need fromarray() when you convert to numpy array
from PIL import Image
import numpy as np
img1 = Image.open('sample.jpg')
pixels = np.array(img1)
pixels[np.all(pixels == (255, 0, 0), axis=-1)] = (0,0,0)
img2 = Image.fromarray(pixels)
img2.show()
BTW: if you have to get pixel in array then remeber that array uses [row,col] instead of [col,row]

Related

Python making Hologram pyramid

I am studying on hologram vision. I want to placed 4 images onto black screen.
I wrote this code:
import numpy as np
import cv2
from screeninfo import get_monitors
if __name__ == '__main__':
screen = get_monitors()[0]
print(screen)
width, height = screen.width, screen.height
image = np.zeros((height, width, 3), dtype=np.float64)
image[:, :] = 0 # black screen
img = cv2.imread("newBen.png")
p = 0.25
w = int(img.shape[1])
h = int(img.shape[0])
new_img = cv2.resize(img, (w, h))
image[:h, :w] = new_img
window_name = 'projector'
cv2.namedWindow(window_name, cv2.WND_PROP_FULLSCREEN)
cv2.moveWindow(window_name, screen.x - 1, screen.y - 1)
cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN,
cv2.WINDOW_FULLSCREEN)
cv2.imshow(window_name, image)
cv2.waitKey()
cv2.destroyAllWindows()
But my image looking like this.
How can ı fix it?
The dtype of a normal RGB image is uint8, not float64.
image = np.zeros((height, width, 3), dtype=np.uint8)
Btw: You don't have to set image[:, :] = 0 # black screen. This is already been done by np.zeros.

How to change size of dots?

I want to make grid of dots, but I'm not sure this is the right way to do it.
from PIL import Image, ImageDraw
img = Image.new("RGB", (1000,1000), (0, 0, 0))
draw = ImageDraw.Draw(i)
w, h = img.size
step = 100
for n in range(step,w,step):
for x in range(step,h-step,step):
draw.point((n,x), fill="yellow")
img
Thanks in advance!
You can't pass a size parameter to point, so try ellipse:
from PIL import Image, ImageDraw
img = Image.new("RGB", (1000,1000), (0, 0, 0))
draw = ImageDraw.Draw(img)
w, h = img.size
step = 100
size = 10
for size in [1,25]:
for n in range(step,w,step):
for x in range(step,h-step,step):
draw.ellipse([n-size/2,x-size/2,n+size//2,x+size//2], fill="yellow")
img.save('size_{:d}.png'.format(size))
Which gives the following outputs:

PIL Image fails to plot the blank image from ndarray

I want to create a blank image and first try converting a ndarray to PIL Image, however, generating the wrong image. Why does this strange thing happen?
from PIL import Image
import numpy as np
x = np.ones((256, 256)).astype(bool)
img = Image.new('1', (256, 256), 1)
y = np.asarray(img)
print((x == y).all())
# x equals to y, but ploting different pictures, why?
img1 = Image.fromarray(x, mode='1')
img1.show()
img2 = Image.fromarray(y, mode='1')
img2.show()
Change the mode to 8 bit pixels not only in Image.new but also in Image.fromarray:
from PIL import Image
import numpy as np
x = np.ones((256, 256)).astype(bool)
img = Image.new('L', (256, 256), 1)
y = np.asarray(img)
img1 = Image.fromarray(x, mode='L')
img1.show()
img2 = Image.fromarray(y, mode='L')
img2.show()

Finding only the red pixel in a image and darkening it to more red

I want to find only the red pixel in a image and darkening it to more red, currently i have this basic code, Is there any better way to get very good results, Please do suggest. Thank You
import cv2
import numpy as np
img = cv2.imread('2.jpg', 1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
import matplotlib.pyplot as plt
plt.imshow(img)
plt.show()
img = cv2.addWeighted(img, 0.9, np.zeros(img.shape, img.dtype), 0, 0)
rows,cols, val = img.shape
for i in range(rows):
for j in range(cols):
k = img[i,j]
#print(k)
if(k[0] < 255):
k[0] = 255
print(k)
contrast_img = cv2.addWeighted(img, 0.8, np.zeros(img.shape, img.dtype), 0, 0)
plt.imshow(contrast_img)
Input
Expected output
Here is one way to increase the contrast of the red (and all colors) using Python/OpenCV/Skimage. Adjust the factor of min in the skimage.exposure function.
Input:
import cv2
import numpy as np
import skimage.exposure
# read image
img = cv2.imread('red_text.jpg')
# get min and max values
min = np.amin(img)
max = np.amax(img)
print("min=",min, "max=",max)
# increase contrast
result = skimage.exposure.rescale_intensity(img, in_range=(2*min,max), out_range=(0,255))
# save output image
cv2.imwrite('red_text_enhanced.jpg', result)
# display IN and OUT images
cv2.imshow('RESULT', result)
cv2.waitKey(0)
cv2.destroyAllWindows()

PIL Image opening turns white pixels into black pixels

When I open a FULLY WHITE file with pillow (from PIL import Image) and then obtain the color of all of the pixels, you SHOULD see something like [255, 255, 255, 255, 255, 255]..... but instead i only see [0, 0, 0, 0, 0, 0]....., code is as follows:
from PIL import Image
image = Image.open("index.png", "r")
pixels = list(image.getdata())
print(pixels)
Your code doesn't convert white pixels values to black pixels. It somehow represents pixel values in different way. We can check whether it converts white pixel values to black pixels using RGB color domain. Code is shown below:
from PIL import Image
import numpy as np
img = Image.open("index.png") # open colour image
imgRgb = img.convert('RGB')
pixels = list(imgRgb.getdata())
width, height = imgRgb.size
pixels = np.asarray([pixels[i * width:(i + 1) * width] for i in range(height)], dtype=int)

Categories