Python image library, Select random pictures from files - python

Hi I need some help with something I'm working on, I have this code that gets 2 pictures from 2 different folders and pastes it over each other creating 1 final merged image, what I want though is to merge two randomly selected pictures from the separate directories, thanks
from PIL import Image
import os
import random
import numpy as np
img1 = Image.open("/Users/Liam/Pictures/1/dfd.jpg").convert("RGBA")
img2 = Image.open("/Users/Liam/Pictures/2/face.png").convert("RGBA")
img1.paste(img2, (0,0), mask = img2)
img1.show()

It is actually pretty easy, check my example below
from PIL import Image
import os
import random
import numpy as np
basedir1 = "/Users/Liam/Pictures/1/"
basedir2 = "/Users/Liam/Pictures/2/"
first_image_list = os.listdir(basedir1)
second_image_list = os.listdir(basedir2)
img1 = Image.open(os.path.join(basedir1, random.choice(first_image_list))).convert("RGBA")
img2 = Image.open(os.path.join(basedir2, random.choice(second_image_list))).convert("RGBA")
img1.paste(img2, (0,0), mask = img2)
img1.show()

Related

Is there a Python algorithm to detect nucleii in GFP cells?

I have been trying to make an algorithm that can detect the nucleii in GFP cell scans like this one:
GFP Cell Scan
for months. I want it to be able to output:
desired_cell_tracking
Is there an existing library for doing this, or is there a way to train my own basic ML classifier for this?
(I attached starter code that detects just the cell masks (but not nucleii))
###############################
# This code uses cellpose library to create a mask for each cell in 2 png files of GFP-channel cell scans.
###############################
#~~~~~~~~~~~~~~~~~~~~~~~~~
# GFP cell detection original libraries
from skimage.io import imread
import numpy as np
import time, os, sys
import matplotlib.pyplot as plt
import matplotlib as mpl
import fnmatch
mpl.rcParams['figure.dpi'] = 300
from cellpose import utils, io
from skimage.measure import label, regionprops, regionprops_table
import pandas as pd
from PIL import Image, ImageChops
#~~~~~~~~~~~~~~~~~~~~~~~~~
# GFP cell nucleii detection libraries
from skimage import (filters, measure, morphology, segmentation)
from scipy import ndimage as ndi
from skimage import data, color
from skimage.transform import hough_circle, hough_circle_peaks
from skimage.feature import canny
from skimage.draw import circle_perimeter
from skimage.util import img_as_ubyte
import cv2
#~~~~~~~~~~~~~~~~~~~~~~~~~
# Load images
print("Libraries imported")
GFP_files = []
GFP_files.append('03152021_GFP_cells_1_lowesy_quality.tif')
GFP_files.append('03152021_GFP_cells_2_lowesy_quality.tif')
GFP_files= sorted(GFP_files) #sorting files
plt.figure(figsize=(2,2))
img = np.array(Image.open(GFP_files[0]))
img2 = img
wid = img.shape[0]
hei = img.shape[1]
#~~~~~~~~~~~~~~~~~~~~~~~~~
# Detect GFP cells
from cellpose import models, io
import random
model = models.Cellpose(gpu=True, model_type='cyto')
channels = [2,0]
#~~~~~~~~~~~~~~~~~~~~~~~~~
# LOOPING THROUGH FILES
#defining a null array for number of cells.
imgname=[0]*len(GFP_files)
n_cell= [[2, 0]]*len(GFP_files)
seg_masks=np.zeros((len(GFP_files), wid,hei)) #saving mask as 3d array for all images
i=-1 #for indexing
kernel = np.ones((5,5), np.uint8)
print('Running...')
# THE BIG LOOP
for filename in GFP_files:
i+=1
img = np.array(Image.open(filename))
masks, flows, styles, diams = model.eval(img, diameter=30, channels=channels)
n_cell[i]= [filename,np.max(masks)]
seg_masks[i,:,:]= masks
imgname[i]=[filename]
im = np.copy(img[:,:,0])
im[masks==0]=0 #set main background threshold on
# 'im' now is a single-color-channel image that only has one of the cells in it, everything else is background.

How to compare images and conbine into one image using python

I have a problem as follows, i have about 30 pieces of an image and i want to combine it into one like this image. How or any library in python can do that?
import cv2
import numpy as np
im1 = cv2.imread('data/abc1.jpg')
im2 = cv2.imread('data/abc2.jpg')
im_v = cv2.vconcat([im1, im1])
cv2.imwrite('data/dst/opencv_vconcat.jpg', im_v)

Processing and then saving images with a for loop in python

I'm new in programming and I'd like to ask you how can I write my code so that it read all the pic whitin a directory, process it one by one, and then save the output images in another directory.
%pylab
%matplotlib inline
import cv2
import glob
import os
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
filename = [img for img in glob.glob("outp/*.png")]
flist=sorted(filename)
images = []
for img in flist:
n = cv2.imread(img, 0)
images.append(n)
blur = cv.GaussianBlur(n, (9,9), 0)
cv.imwrite(flist, blur) #definitly wrong!! but idk how to do it
You don't need to create a list of all the pixel data of all the images in memory, that is just wasteful of memory. So, remove the lines:
images = []
and
images.apoend(...)
Then change your imwrite() to overwrite the current image:
cv2.imwrite(img,blur)
Here's a more complete version:
#!/usr/bin/env python3
import cv2
import glob
import os
import numpy as np
import cv2
# Go to where the images are
os.chdir('outp')
# Get list of filenames to convert
files = glob.glob("*.png")
for file in files:
n = cv2.imread(file, 0)
blur = cv2.GaussianBlur(n, (9,9), 0)
cv2.imwrite('blurred_' + file, blur)
Try this way:
cv2.imwrite("mypicture.jpg", gray[y:y+h,x:x+w])

cv2 treshold does not work correctly on second image

I am new to python and I was playing around with background subtraction to visualize changes in pre and post change images.
I wrote a short and simple script using the cv2 library:
#!/usr/bin/env python
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
#GRAYSCALE ONLY FOR TESTING
#Test with person appearing in image
img1 = cv.imread("images/1.jpg", 0)
img2 = cv.imread("images/2.jpg", 0)
img3 = cv.subtract(img1, img2)
ret,thresh1 = cv.threshold(img3,90,255,cv.THRESH_BINARY)
#Test with satelite image of japan landslide changes after earthquake
jl_before = cv.imread("images/japan_earthquake_before.jpg",0)
jl_after = cv.imread("images/japan_earthquake_after.jpg",0)
jl_subtraction = cv.subtract(jl_before, jl_after)
ret,thresh2 = cv.threshold(img3,20,255,cv.THRESH_BINARY)
images = [img1, img2, thresh1, jl_before, jl_after, thresh2]
titles = ["Image1", "Image2", "Changes", "Japan_Before", "Japan_After", "Japan_Changes" ]
for i in range(6):
plt.subplot(2,3,i+1),plt.imshow(images[i],'gray')
plt.title(titles[i])
plt.xticks([]),plt.yticks([])
plt.show()
The result looks like this:
Why is the mask with changes from the first set of images present in the mask of the second set of images?
I used different variables, thresh1 and thresh2.
Any help would be greatly appreciated as I can't seem to find the problem.
Because you missed a change when copy pasting:
ret,thresh2 = cv.threshold(img3,20,255,cv.THRESH_BINARY)
^^^^

PIL images converted to RGB, getting saved as plain black images (python)

I have images that I want to save in jpeg format, after shearing and editing them.
This is my function in python:
import numpy as np
from skimage import data, io, filter, color, exposure
import skimage.transform as tf
from skimage.transform import resize, rescale, rotate, setup, warp, AffineTransform
import os
from os import listdir
from os.path import isfile, join
from PIL import Image
def generateHoGSamples(path, readfile):
print "generating samples from " + path+"\\"+readfile
img = color.rgb2gray(io.imread(path+"\\"+readfile))
img = resize(img, (50,100))
filename = os.path.splitext(readfile)[0]
angles = [3, 0, -3]
shears = [0.13, 0.0, -0.13]
imgidx = 0
for myangle in angles:
myimg = rotate(img, angle=myangle, order=2)
for myshear in shears:
imgidx+=1
afine_tf = tf.AffineTransform(shear=myshear)
mymyimg = tf.warp(myimg, afine_tf)
outputimg = Image.fromarray(mymyimg)
# Saving as "jpg" without the following line caused an error
outputimg = outputimg.convert('RGB')
outputimg.save(path+"//"+str(imgidx)+".jpg", "JPEG")
But what happens instead is that all images are nothing but black.
What's the matter here?
Your image mymyimage goes from 0 to 1 and PIL is expecting an image with values between 0 and 255. During the truncation your jpeg image will have truncated values 0 or 1, resulting in black.
To fix that, just multiply mymyimg by 255, such as
outputimg = Image.fromarray(mymyimg*255)
Hope it helps.
Cheers

Categories