Dimming image in Python - python

I am trying to implement dimming method to dim a image by converting pixel values and using this equation X(new) = 0.5 * X(old)^2. I did some googling and but could not succeed. Here is my code:
import math
from PIL import Image
import numpy as np
from skimage import color, io
import matplotlib.pyplot as plt
def load(image_path):
out = plt.imread(image_path)
out = out.astype(np.float64) / 255
return out
def dim_image(image):
out = image.point(lambda x: x*0.5)
return out
def display(img):
# Show image
plt.figure(figsize = (5,5))
plt.imshow(img)
plt.axis('off')
plt.show()
image1 = load(image1_path)
image2 = load(image2_path)
display(image1)
display(image2)
new_image = dim_image(image1)
display(new_image)

You are trying to use .point on a numpy array, which doesn't exist. I assume you meant to reduce all color values by 50% to dim the image, in which case you should use np.dot (docs):
def dim_image(image):
out = np.dot(image, 0.5)
return out

Related

is there a correct way to plot this white pixel?

iam a beginner at python and image processing etc. I want to plot this white pixel. as i know the pixel color identifier for black is 0 and for white is 255. here's the image that i want to plot:
The Image
i try to print out the image ndarray with these following command:
#importing module
import cv2
import numpy as np
import matplotlib.pyplot as plt
import sys
#load image
img = cv2.imread('thin.png')
#image to nd.array
arr0 = np.array(img)
#finding white pixel
arr1 = np.where(arr0 == 255)
#indexing tuple, the printout arr1 is tuple with dtype=int64
tuple1 = arr1[0]
tuple2 = arr1[1]
tuple3 = arr1[2]
#defining x and y axis
x = np.array(tuple1)
y = np.array(tuple2)
z = np.array(tuple3)
plot = plt.plot(x,y)
plt.show()
this is what i get..
output image
i think it's very noisy but i dont have a clue. Thank you very much for help
I think there is some confusion in the dimensions of you array arr0. I think you should look at the indices of the white pixels:
import cv2
import numpy as np
import matplotlib.pyplot as plt
#load image
img = cv2.imread('thin.png')
#image to nd.array
arr0 = np.array(img)
height, width, _ = arr0.shape
x = range(width)
y = [height - np.argwhere(arr0[:, i, 0]==255).mean() for i in x] # "height -" to reverse the y-axis
plt.plot(x,y)
plt.show()
Note: taking the mean because a vertical line can have more than one white pixel (also some won't have any, see picture below)
Output:

A continuous increase seen in the brightness when a FITS file is opened. How can this be removed?

I am trying to open a FITS file using the AstroPy package in Python. In the code I have basically opened the fits file, and displayed the corresponding FITS object. For some reason a continious decrease in the brightness of the image is seen. When looking at the 3D graph of the pixel-values in the image we can see an almost see a constant slope being formed. Why is this the case? And how can this be changed. I can provide any additional information if needed.
Code that displays the FITS image:
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator
from astropy.nddata import Cutout2D
from astropy import units
from astropy.io import fits
import glob
from astropy.visualization import astropy_mpl_style
from astropy.visualization import simple_norm
from astropy.utils.data import download_file
from matplotlib.colors import *
from scipy import interpolate
import cv2
import sys
np.set_printoptions(threshold=sys.maxsize)
X, Y = np.mgrid[-1:1:20j, -1:1:20j]
Z = (X+Y) * np.exp(-6.0*(X*X+Y*Y)) + np.random.rand(X.shape[0])
print(X.shape, Y.shape, Z.shape)
plt.style.use(astropy_mpl_style)
filename = "jw02739-o001_t001_nircam_clear-f187n_segm.fits"
# filename = download_file('https://fits.gsfc.nasa.gov/samples/WFPC2u5780205r_c0fx.fits', cache=True )
data_set = {}
image_data = []
with fits.open(filename) as hdul:
hdul.info()
image_data = fits.getdata(filename,ext = 1)
print(image_data.shape)
print()
# for i in range(len(image_data)):
image = image_data
print(type(image))
image3 = np.stack((image,)*3, axis=-1)
image3 = ((image3 - image3.min()) * (1/(image3.max() - image3.min()) * 255)).astype('uint8')
# image = np.resize(image,(200,200))
# print(image)
# print(image3)
# res = cv2.resize(image3, dsize=(864, 510), interpolation=cv2.INTER_CUBIC)
cv2.imshow("Pillars of Creation in OpenCV", image3)
cv2.waitKey()
# plt.figure()
# plt.imshow(image, cmap='binary',norm = simple_norm(image, 'sqrt'))
# plt.show()
How can this continuous decrease in brightness be removed? I want it to display the decompressed version of the FITS file.

AttributeError: 'NoneType' object has no attribute 'min'

import cv2
from matplotlib import pyplot as plt
from skimage.util import random_noise
I = cv2.imread('roses.jpg', 0)
gauss = random_noise(I, mode='gaussian', seed=None, clip=True )
plt.subplot(241), plt.imshow(img), plt.title('origin')
plt.subplot(242), plt.imshow(gauss), plt.title('gaussian')
plt.show();
Detect if a signed image was input. how to fix this problem
/usr/local/lib/python3.7/dist-packages/skimage/util/noise.py in random_noise(image, mode, seed, clip, **kwargs)
if image.min() < 0:
low_clip = -1.
else:
import cv2
from matplotlib import pyplot as plt
from skimage.util import random_noise
img = cv2.imread('roses.jpg', 0)
gauss = random_noise(img, mode='gaussian', seed=None, clip=True )
plt.subplot(241), plt.imshow(img), plt.title('origin')
plt.subplot(242), plt.imshow(gauss), plt.title('gaussian')
plt.show();
if img.min() < 0:
low_clip = -1.
else:
You are saving the image as I, then using it as img, then trying to find the min of image. Your namespace is probably very cluttered and references to old things are hanging around. Be consistent and it should work if the img has a min method/attribute. If it does not, then you need to find the array that the img class represents. I suspect that opencv works like an array so the following should work:
import numpy as np
if np.min(img) < 0:
low_clip = -1.
else:
#Do something else

Resize image with skimage library without stretching

Hey i am trying to resize an image without stretching it but adding white pixels instead. I looked arround but i found nothing specifying how that can be achieved from within skimage. So instead i used numpy to add the extra pixels before the resize as arrays of [float(255)].
from skimage.io import imread
from skimage.io import imsave
from skimage.transform import resize
from matplotlib import pyplot as plt
from pylab import cm
import numpy as np
from skimage import morphology
from skimage import measure
from scipy import misc
def process(file_):
im = imread(file_, as_grey=True)
#im = misc.imread(file_)
#im=np.fromfile(file_, dtype=np.int64)
#Filler to avoid stretching
orig_rows, orig_cols = im.shape
print orig_rows, orig_cols
if orig_rows < orig_cols:
for addition in range(0,orig_cols-orig_rows):
#adding white rows
lst = np.array(list(float(255) for x in range(0,orig_cols)))
im= np.vstack((im,lst))
if orig_rows > orig_cols:
for addition in range(0,orig_rows-orig_cols):
#adding white columns
lst = np.array(list([float(255)] for x in range(0,orig_rows)))
im= np.hstack((im,lst))
image = resize(im, (48, 48))
imsave('test.jpg',im)
imsave('test1.jpg',image)
plt.imshow(im, cmap=cm.gray)
plt.show()
When i view the image with pyplot it looks like this
We can see that the columns have been added, but after i save the image with
image = resize(im, (48, 48))
imsave('test.jpg',im)
imsave('test1.jpg',image)
The the images look like negatives, and the resized image looks completely white(next to the dark its invisible on the sites background). Any ideas?
The code below should work. Note that the padded areas' color is not exactly white in order to see the image boundaries in the uploaded image. For white padding set fill_cval = np.max(img).
def resize_padded(img, new_shape, fill_cval=None, order=1):
import numpy as np
from skimage.transform import resize
if fill_cval is None:
fill_cval = np.max(img)
ratio = np.min([n / i for n, i in zip(new_shape, img.shape)])
interm_shape = np.rint([s * ratio for s in img.shape]).astype(np.int)
interm_img = resize(img, interm_shape, order=order, cval=fill_cval)
new_img = np.empty(new_shape, dtype=interm_img.dtype)
new_img.fill(fill_cval)
pad = [(n - s) >> 1 for n, s in zip(new_shape, interm_shape)]
new_img[[slice(p, -p, None) if 0 != p else slice(None, None, None)
for p in pad]] = interm_img
return new_img
import numpy as np
import matplotlib.pylab as plt
from skimage.data import astronaut
from skimage.color import rgb2gray # using luminance
from skimage.io import imsave
img = rgb2gray(astronaut())
# set desired image size
out_size = (30, 100) # height, width
# set the color of the padded area. Here: "95% luminance"
fill_cval = np.max(img) * 0.95
resized_img = resize_padded(img, out_size, fill_cval=fill_cval)
imsave('img.png', img)
imsave('img_res.png', resized_img)

Python - Grayscale very low with a mask

I'm working on pictures that have been converted to grayscale with:
Image.open('image.png').convert('LA')
I add a mask and I plot my picture with it, but while I expect to get grayscale values between 0 and 255, the values are very low as you can see below. There must be something wrong with the format. What do I have to do to get values between 0 and 255?
import numpy as np
import Image
import cv2
import matplotlib.pyplot as plt
import numpy.ma as ma
mask = plt.imread("mask.png")
test = plt.imread("1.png")
width, high = tab.shape
matrix = np.reshape(tab, (width, high))
# I have to force the dimension...
tab = mask[::, ::, 0]
test = tes[::, ::, 0]
test_mask = np.ma.array(ma.masked_array(test, tab.max()-tab))
And this is the plot:
By using OpenCV it works...
img = cv2.imread('test.png',0)

Categories