Extracting hand writing text out in shape with OpenCV - python

I am very new to OpenCV Python and I really need some help here.
So what I am trying to do here is to extract out these words in the image below.
The words and shapes are all hand drawn, so they are not perfect. I have did some coding below.
First of all, I grayscale the image
img_final = cv2.imread(file_name)
img2gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
Then I use THRESH_INV to show the content
ret, new_img = cv2.threshold(image_final, 100 , 255, cv2.THRESH_BINARY_INV)
After which, I dilate the content
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS,(3 , 3))
dilated = cv2.dilate(new_img,kernel,iterations = 3)
I dilate the image is because I can identify text as one cluster
After that, I apply boundingRect around the contour and draw around the rectangle
contours, hierarchy = cv2.findContours(dilated,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE) # get contours
index = 0
for contour in contours:
# get rectangle bounding contour
[x,y,w,h] = cv2.boundingRect(contour)
#Don't plot small false positives that aren't text
if w < 10 or h < 10:
continue
# draw rectangle around contour on original image
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,255),2)
This is what I got after that.
I am only able to detect one of the text. I have tried many other methods but this is the closet results I have got and it does not fulfill the requirement.
The reason for me to identify the text is so that I can get the X and Y coordinate of each of the text in this image by putting a bounding Rectangle "boundingRect()".
Please help me out. Thank you so much

You can use the fact that the connected component of the letters are much smaller than the large strokes of the rest of the diagram.
I used opencv3 connected components in the code but you can do the same things using findContours.
The code:
import cv2
import numpy as np
# Params
maxArea = 150
minArea = 10
# Read image
I = cv2.imread('i.jpg')
# Convert to gray
Igray = cv2.cvtColor(I,cv2.COLOR_RGB2GRAY)
# Threshold
ret, Ithresh = cv2.threshold(Igray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
# Keep only small components but not to small
comp = cv2.connectedComponentsWithStats(Ithresh)
labels = comp[1]
labelStats = comp[2]
labelAreas = labelStats[:,4]
for compLabel in range(1,comp[0],1):
if labelAreas[compLabel] > maxArea or labelAreas[compLabel] < minArea:
labels[labels==compLabel] = 0
labels[labels>0] = 1
# Do dilation
se = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(25,25))
IdilateText = cv2.morphologyEx(labels.astype(np.uint8),cv2.MORPH_DILATE,se)
# Find connected component again
comp = cv2.connectedComponentsWithStats(IdilateText)
# Draw a rectangle around the text
labels = comp[1]
labelStats = comp[2]
#labelAreas = labelStats[:,4]
for compLabel in range(1,comp[0],1):
cv2.rectangle(I,(labelStats[compLabel,0],labelStats[compLabel,1]),(labelStats[compLabel,0]+labelStats[compLabel,2],labelStats[compLabel,1]+labelStats[compLabel,3]),(0,0,255),2)

Related

I am getting fractional artifacts (pieces of metal) instead of whole ones. It seems that the edges are not continuous but they are

My goal is to detect objects placed on a white surface. From there, count how many there are and calculate the area of each one.
It seems that this algorithm is detecting its edge but counting it as multiple objects.
original picture
picture after edge detection
part of the picture with problems
results
In short, I am using "canny" and "connected components" and I am getting fractional objects instead just a whole object.
Following code should do the job, you might need to tweak minItemArea and maxItemArea to filter objects.
import numpy as np
import cv2
import matplotlib.pyplot as plt
rgb = cv2.imread('/path/to/your/image/items_0001.png')
gray = cv2.cvtColor(rgb, cv2.COLOR_BGR2GRAY)
imh, imw = gray.shape
th = cv2.adaptiveThreshold(gray,255, cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,21,5)
contours, hier = cv2.findContours(th.copy(),cv2.RETR_CCOMP,cv2.CHAIN_APPROX_SIMPLE)
out_img = rgb.copy()
minItemArea = 50
maxItemArea = 4000
for i in range(len(contours)):
if hier[0][i][3] != -1:
continue
x,y,w,h = cv2.boundingRect(contours[i])
if minItemArea < w*h < maxItemArea:
cv2.drawContours(out_img, [contours[i]], -1, 255, 1)
plt.imshow(out_img)

How to remove hair from skin images using opencv?

I am working with recognition of skin spots. For this, I work with a number of images with different noises. One of these noises are the hairs, because I have images with hairs over the area of ​​the stain (ROI). How to decrease or remove these types of image noise?
The code below decreases the area where hairs are, but does not remove hairs that are above the area of ​​interest (ROI).
import numpy as np
import cv2
IMD = 'IMD436'
# Read the image and perfrom an OTSU threshold
img = cv2.imread(IMD+'.bmp')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
# Remove hair with opening
kernel = np.ones((2,2),np.uint8)
opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 2)
# Combine surrounding noise with ROI
kernel = np.ones((6,6),np.uint8)
dilate = cv2.dilate(opening,kernel,iterations=3)
# Blur the image for smoother ROI
blur = cv2.blur(dilate,(15,15))
# Perform another OTSU threshold and search for biggest contour
ret, thresh = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
cnt = max(contours, key=cv2.contourArea)
# Create a new mask for the result image
h, w = img.shape[:2]
mask = np.zeros((h, w), np.uint8)
# Draw the contour on the new mask and perform the bitwise operation
cv2.drawContours(mask, [cnt],-1, 255, -1)
res = cv2.bitwise_and(img, img, mask=mask)
# Display the result
cv2.imwrite(IMD+'.png', res)
cv2.imshow('img', res)
cv2.waitKey(0)
cv2.destroyAllWindows()
Exit:
How can I remove hair from the top of my region of interest?
Images used:
I am responding to your tag on a related post. As I understand you and another colege are working together on a project to locate the moles on the skin? Because I think I have already gave help to one or maybe both of you on similar questions and already mentioned that the removal of the hair is very tricky and difficult task. If you remove the hair on the image you lose information and you can't replace that part of the image (no program or alghorithm can guess what is under the hair - but it can make an estimation). What you could do as I mentioned in other posts and I think that it would be the best approach is to learn about deep neural networks and make your own for the hair removal. You can google "watermark removal deep neural network" and see what I mean. That being said, your code does not seem to extract all ROIs (the moles) you have given in the example image. I have made another example on how you can better extract the moles. Basically you should perform closing before transforming to binary and you will get better results.
For the second part - hair removal, if you do not wish to make a neural network, I think that alternative solution could be, that you calculate the mean pixel intesity of the region that contains the mole. Then iterate throug every pixel and make some sort of criteria on how much can the pixel differ from the mean. Hair seem to be presented with pixels that are darker than the mole area. So when you find the pixel, replace it with the neigbour pixel that does not fall in this criteria. In the example I have made a simple logic which will not work with every image but it can serve as an example. To make a fully operational solution you should make a better, more complex alghorithm which I guess will take quite some time. Hope it helps a bit! Cheers!
import numpy as np
import cv2
from PIL import Image
# Read the image and perfrom an OTSU threshold
img = cv2.imread('skin2.png')
kernel = np.ones((15,15),np.uint8)
# Perform closing to remove hair and blur the image
closing = cv2.morphologyEx(img,cv2.MORPH_CLOSE,kernel, iterations = 2)
blur = cv2.blur(closing,(15,15))
# Binarize the image
gray = cv2.cvtColor(blur,cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
# Search for contours and select the biggest one
_, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
cnt = max(contours, key=cv2.contourArea)
# Create a new mask for the result image
h, w = img.shape[:2]
mask = np.zeros((h, w), np.uint8)
# Draw the contour on the new mask and perform the bitwise operation
cv2.drawContours(mask, [cnt],-1, 255, -1)
res = cv2.bitwise_and(img, img, mask=mask)
# Calculate the mean color of the contour
mean = cv2.mean(res, mask = mask)
print(mean)
# Make some sort of criterion as the ratio hair vs. skin color varies
# thus makes it hard to unify the threshold.
# NOTE that this is only for example and it will not work with all images!!!
if mean[2] >182:
bp = mean[0]/100*35
gp = mean[1]/100*35
rp = mean[2]/100*35
elif 182 > mean[2] >160:
bp = mean[0]/100*30
gp = mean[1]/100*30
rp = mean[2]/100*30
elif 160>mean[2]>150:
bp = mean[0]/100*50
gp = mean[1]/100*50
rp = mean[2]/100*50
elif 150>mean[2]>120:
bp = mean[0]/100*60
gp = mean[1]/100*60
rp = mean[2]/100*60
else:
bp = mean[0]/100*53
gp = mean[1]/100*53
rp = mean[2]/100*53
# Write temporary image
cv2.imwrite('temp.png', res)
# Open the image with PIL and load it to RGB pixelpoints
mask2 = Image.open('temp.png')
pix = mask2.load()
x,y = mask2.size
# Itearate through the image and make some sort of logic to replace the pixels that
# differs from the mean of the image
# NOTE that this alghorithm is for example and it will not work with other images
for i in range(0,x):
for j in range(0,y):
if -1<pix[i,j][0]<bp or -1<pix[i,j][1]<gp or -1<pix[i,j][2]<rp:
try:
pix[i,j] = b,g,r
except:
pix[i,j] = (int(mean[0]),int(mean[1]),int(mean[2]))
else:
b,g,r = pix[i,j]
# Transform the image back to cv2 format and mask the result
res = np.array(mask2)
res = res[:,:,::-1].copy()
final = cv2.bitwise_and(res, res, mask=mask)
# Display the result
cv2.imshow('img', final)
cv2.waitKey(0)
cv2.destroyAllWindows()
You can try the following steps, at least to get a road map to the proper solution implementation:
Find the hair region using adaptive local thresholding - Otsu's
method or any other method. I think "local thresholding" or even
"local histogram equalization and then global thresholding" will
find the hair regions.
To fill the hair regions, use "texture synthesis" to synthesize skin
like texture for the hair region.
One good and easy method for texture synthesis is described in "A.A. Efros and T.K. Leung, Texture synthesis by non-parametric sampling', In Proceedings of the International Conference on Computer Vision (ICCV), Kerkyra, Greece, 1999".
Texture synthesis will give a better result than averaging or median filtering to estimate the pixels in the hair region.
Also, take a look at this paper, it should help you a lot:
http://link.springer.com/article/10.1007%2Fs00521-012-1149-1?LI=true

Remove the selected elements from the image in OpenCV

I have this image with tables where I want to remove the tabular structure from the image so that it can work more effectively with Tesseract. I used the following code to create a boundary around the table (and individual cells) so that it can be deleted.
img =cv2.imread('bfir.jpg')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray,50,150,apertureSize = 3)
img1 = np.ones(img.shape, dtype=np.uint8)*255
ret,thresh = cv2.threshold(gray,127,255,1)
(_,contours,h) = cv2.findContours(thresh,1,2)
for cnt in contours:
approx = cv2.approxPolyDP(cnt,0.01*cv2.arcLength(cnt,True),True)
if len(approx)==4:
cv2.drawContours(img1,[cnt],0,(0,255,0),2)
This draws green lines around the table like this image.
Next, I tried the cv2.subtract method to subtract the table from the image, somewhat like this.
final_img = cv2.subtract(img1, img)
But this didn't work as I expected and gives me a grayscale image with the table still in it. Link
While I just want the original image in B&W with the table removed. I am using OpenCV for the first time so I don't know what I am doing wrong and I am sorry for the long post but if anybody can please help with how to go about with this or just point me in the right direction about how to remove the table, that would be very much appreciated.
EDIT:
As suggested by RobAu it can also work with simply drawing the contours in white in the first place but I don't know how to do that without losing the rest of the data in the preprocessing stage.
You could try and simply overwrite the cells that represent the borders. This can be done by creating a mask image, and then using that as reference as to where to overwrite pixels in the original.
This can be done with:
mask_image = np.zeros(img.shape[0:2], np.uint8)
cv2.drawContours(mask_image, contours, -1, color=255, thickness=2)
border_points = np.array(np.where(mask_image == 255)).transpose()
background = [0, 0, 0] # Change this to the colour you want
for point in border_points :
img[point[0], point[1]] = background
Update:
You could use the 3-channel you already created for the mask, but that slightly complicates the algorithms. The mask image propose is more fitted for the task, but I will try to adapt it to your code:
# Create your mask image as usual...
border_points = np.array(np.where(img1[:,:,1] == 255)).transpose() # Only look at channel 2
background = [0, 0, 0] # Change this to the colour you want
for point in border_points :
img[point[0], point[1]] = background
Update to do as #RobAu suggested (quicker than my previous methods):
line_thickness = 3 # Change this value until it looks the best.
cv2.drawContours(img, contours, -1, color=(0,0,0), thickness=line_thickness )
Please note I didn't test this code. So it might need some further fiddling.
As a reference to the comments of this question, this is an example of a code that locates rectangles and creates new images for each one, this was an attempt at creating individual images of a picture of shredded paper. Some of the values will need to be changed for it to locate the rectangles with the right amount of size
There is also some code for tracking sizes of images and the code is made up by 50% what i have written and 50% by stackoverflow help.
import cv2
import numpy as np
fileName = ['9','8','7','6','5','4','3','2','1','0']
img = cv2.imread('#YOUR IMAGE#')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.bilateralFilter(gray, 11, 17, 17)
kernel = np.ones((5,5),np.uint8)
erosion = cv2.erode(gray,kernel,iterations = 2)
kernel = np.ones((4,4),np.uint8)
dilation = cv2.dilate(erosion,kernel,iterations = 2)
edged = cv2.Canny(dilation, 30, 200)
_, contours, hierarchy = cv2.findContours(edged, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
rects = [cv2.boundingRect(cnt) for cnt in contours]
rects = sorted(rects,key=lambda x:x[1],reverse=True)
i = -1
j = 1
y_old = 5000
x_old = 5000
for rect in rects:
x,y,w,h = rect
area = w * h
print('width: %d and height: %d' %(w,h))
if w > 50 and h > 500:
print('abs:')
print(abs(x_old - x))
if abs(x_old - x) > 0:
print('writing')
x_old = x
x,y,w,h = rect
out = img[y+10:y+h-10,x+10:x+w-10]
cv2.imwrite('assets/newImage' + fileName[i] + '.jpg', out)
j+=1
if (y_old - y) > 1000:
i += 1
y_old = y
Even though, the given input image links are not working & so I obviously doesn't know the following is what you have asked for, I learnt something from your question, when I was working on, removing table structure lines from given image, I like to share what I have learnt, for the future readers.
I followed the steps provided in opencv documentation to remove the lines.
But that only removed the horizontal lines. When I tried to remove vertical lines, the result image only had the vertical lines. The text in the table was not there.
Then I came across your question & saw final_img = cv2.subtract(img1, img) in the question. Tried that & it worked great.
Here are the steps that I followed:
# Load the image
src = cv.imread(argv[0], cv.IMREAD_COLOR)
# Check if image is loaded fine
if src is None:
print ('Error opening image: ' + argv[0])
return -1
# Show source image
cv.imshow("src", src)
# [load_image]
# [gray]
# Transform source image to gray if it is not already
if len(src.shape) != 2:
gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
else:
gray = src
# Show gray image
# show_wait_destroy("gray", gray)
# [gray]
# [bin]
# Apply adaptiveThreshold at the bitwise_not of gray, notice the ~ symbol
gray = cv.bitwise_not(gray)
bw = cv.adaptiveThreshold(gray, 255, cv.ADAPTIVE_THRESH_MEAN_C, \
cv.THRESH_BINARY, 15, -2)
# Show binary image
# show_wait_destroy("binary", bw)
# [bin]
# [init]
# Create the images that will use to extract the horizontal and vertical lines
horizontal = np.copy(bw)
vertical = np.copy(bw)
# [horiz]
# [vert]
# Specify size on vertical axis
rows = vertical.shape[0]
verticalsize = rows / 10
# Create structure element for extracting vertical lines through morphology operations
verticalStructure = cv.getStructuringElement(cv.MORPH_RECT, (1, verticalsize))
# Apply morphology operations
vertical = cv.erode(vertical, verticalStructure)
vertical = cv.dilate(vertical, verticalStructure)
# [init]
# [horiz]
# Specify size on horizontal axis
cols = horizontal.shape[1]
horizontal_size = cols / 30
# Create structure element for extracting horizontal lines through morphology operations
horizontalStructure = cv.getStructuringElement(cv.MORPH_RECT, (horizontal_size, 1))
# Apply morphology operations
horizontal = cv.erode(horizontal, horizontalStructure)
horizontal = cv.dilate(horizontal, horizontalStructure)
lines_removed = cv.subtract(gray, vertical + horizontal)
show_wait_destroy("lines_removed", ~lines_removed)
Input:
Output:
Few things that I changed from the sources:
verticalsize = rows / 10, here, I do not understand the significance of the number 10. In the documentation, 30 was used. I got better result with 10. I guess, the less the division number, the large the structure element & here, as we are targeting straight lines, reducing the number works.
In the documentation, vertical lines are processed after horizontal lines. I reversed the order
I swapped the parameters to cv2.substract(). I used cv2.subtract(img, img1).

image analysis (opencv or scikit image), deskewing of noisy scan

I do have some old bank statements as scan and would like to use google´s thesseract engine to extract the text. Works pretty well unless the image is slightly rotated. I thought of detecting the dashed lines in order to estimate the slope and afterwards the angle of rotation. However, it is tricky to get the parameters right.
If I could get rid of the large line artefact, I might use the minimum rotated bounding box (cv2.minAreaRect) on the text characters.
Maybe another strategy is suited better ? Any ideas ?
An example image (deleted some characters for data protection):
EIDT: I have found a solution which seems to work. However, I am stil wondering if there might be a faster solution (takes about 1.5 seconds per Image)
I do use template matching from skimage with following template:
template = plt.imread('template_long.png')
template = rgb2gray(template)
template = template > threshold_mean(template)
for i in range(1):
# read in image
img = cv2.imread('conversion/umsatz_{}.png'.format(i))
# convert to grayscale
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
gray = cv2.bitwise_not(gray)
# threshold the image, setting all foreground pixels to
# 255 and all background pixels to 0
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
# edge detection
#edges = cv2.Canny(thresh,2,100, apertureSize = 3)
# fill the holes from detected edges
#kernel = np.ones((2,2),np.uint8)
#dilate = cv2.dilate(thresh, kernel, iterations=1)
result = match_template(thresh, template)
mask = result < 0.5
r = result.copy()
r[mask] = 0
r[~mask] = 1
plt.imshow(r)

Using inpaint function in OpenCV via Python to interpolate broken river-data in a watershed

Background
A raster file collected via LIDAR records the topography of a watershed. To properly model the watershed, the river must appear continuous without any breaks or interruptions. The roads in the raster file appear like dams that interrupt the river as seen in the picture below
Specific Area Under Consideration in the Watershed
Objective
These river breaks are the main problem and I am trying but failing to remove them.
Approach
Via Python, I used the various tools and prebuilt functions in the OpenCV library. The primary function I used in this approach is the cv2.inpaint function. This function takes in an image file and a mask file and interpolates the original image wherever the mask file pixels are nonzero.
The main step here is determining the mask file which I did by detecting the corners at the break in the river. The mask file will guide the inpaint function to fill in the pixels according to the patterns in the surrounding pixels.
Problem
My issue is that this happens from all directions whereas I require it to only extrapolate pixel data from the river itself. The image below shows the flawed result: inpaint works but it considers data from outside the river too.
Inpainted Result
Here is my code if you are so kind as to help:
import scipy.io as sio
import numpy as np
from matplotlib import pyplot as plt
import cv2
matfile = sio.loadmat('box.mat') ## box.mat original raster file linked below
ztopo = matfile['box']
#Crop smaller section for analysis
ztopo2 = ztopo[200:500, 1400:1700]
## Step 1) Isolate river
river = ztopo2.copy()
river[ztopo2<217.5] = 0
#This will become problematic for entire watershed w/o proper slicing
## Step 2) Detect Corners
dst = cv2.cornerHarris(river,3,7,0.04)
# cornerHarris arguments adjust qualities of corner markers
# Dilate Image (unnecessary)
#dst = cv2.dilate(dst,None)
# Threshold for an optimal value, it may vary depending on the image.
# This adjusts what defines a corner
river2 = river.copy()
river2[dst>0.01*dst.max()]=[150]
## Step 3) Remove river and keep corners
#Initiate loop to isolate detected corners
i=0
j=0
rows,columns = river2.shape
for i in np.arange(rows):
for j in np.arange(columns):
if river2[i,j] != 150:
river2[i,j] = 0
j = j + 1
i = i + 1
# Save corners as new image for import during next step.
# Import must be via cv2 as thresholding and contour detection can only work on BGR files. Sio import in line 6 (matfile = sio.loadmat('box.mat')) imports 1 dimensional image rather than BGR.
cv2.imwrite('corners.png', river2)
## Step 4) Create mask image by defining and filling a contour around the previously detected corners
#Step 4 code retrieved from http://dsp.stackexchange.com/questions/2564/opencv-c-connect-nearby-contours-based-on-distance-between-them
#Article: OpenCV/C++ connect nearby contours based on distance between them
#Initiate function to specify features of contour connections
def find_if_close(cnt1,cnt2):
row1,row2 = cnt1.shape[0],cnt2.shape[0]
for i in xrange(row1):
for j in xrange(row2):
dist = np.linalg.norm(cnt1[i]-cnt2[j])
if abs(dist) < 50 :
return True
elif i==row1-1 and j==row2-1:
return False
#import image of corners created in step 3 so thresholding can function properly
img = cv2.imread('corners.png')
#Thresholding and Finding contours only works on grayscale image
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(gray,127,255,cv2.THRESH_BINARY)
contours,hier = cv2.findContours(thresh,cv2.RETR_EXTERNAL,2)
LENGTH = len(contours)
status = np.zeros((LENGTH,1))
for i,cnt1 in enumerate(contours):
x = i
if i != LENGTH-1:
for j,cnt2 in enumerate(contours[i+1:]):
x = x+1
dist = find_if_close(cnt1,cnt2)
if dist == True:
val = min(status[i],status[x])
status[x] = status[i] = val
else:
if status[x]==status[i]:
status[x] = i+1
unified = []
maximum = int(status.max())+1
for i in xrange(maximum):
pos = np.where(status==i)[0]
if pos.size != 0:
cont = np.vstack(contours[i] for i in pos)
hull = cv2.convexHull(cont) # I don't know why/how this is used
unified.append(hull)
cv2.drawContours(img,unified,-1,(0,255,0),1) #Last argument specifies contour width
cv2.drawContours(thresh,unified,-1,255,-1)
# Thresh is the filled contour while img is the contour itself
# The contour surrounds the corners
#cv2.imshow('pic', thresh) #Produces black and white image
## Step 5) Merge via inpaint
river = np.uint8(river)
ztopo2 = np.uint8(ztopo2)
thresh[thresh>0] = 1
#new = river.copy()
merged = cv2.inpaint(river,thresh,12,cv2.INPAINT_TELEA)
plt.imshow(merged)
plt.colorbar()
plt.show()

Categories