At first, thanks to dear Christoph Rackwitz that guided me to write some parts of this code by python and opencv.
I have a question! This is my code :
import cv2
import numpy as np
if __name__ == '__main__' :
# Read source image.
im_dst = cv2.imread('c:/36.jpg')
# Four corners of the book in source image
pts_dst = np.array([[314, 107], [693, 107], [903, 493],[311, 491]])
# Read destination image.
im_src = cv2.imread('c:/pitch.jpg')
# Four corners of the book in destination image.
pts_src = np.array([[487, 81],[575, 81],[575, 297],[487, 297]])
# Calculate Homography
h, status = cv2.findHomography(pts_src, pts_dst)
# Warp source image to destination based on homography
im_out = cv2.warpPerspective(im_src, h, (im_dst.shape[1],im_dst.shape[0]), borderValue=[255,255,255])
mask = im_out[:,:,0] < 100
im_out_overlapped = im_dst.copy()
im_out_overlapped[mask] = [0,0,255]
# Display images
cv2.imshow("Source Image", im_src)
cv2.imshow("Destination Image", im_dst)
cv2.imshow("Warped Source Image", im_out)
cv2.imshow("Warped", im_out_overlapped)
cv2.waitKey(0)
with this code I can import these 2 images :
and the result is this : (after warpPerspective)
Now I have a new problem, as you see in code: for doing homography between my 2 images, I should import 4 points coordinates (4 corners of right penalty area) for each image, it means we need to find 8 points coordinates.
Is there a way that my app finds these coordinates points AUTOMATICALLY in both images? And don't I need to write coordinates of points myself?
Related
I wrote this code by python and opencv
I have 2 images (first is an image from football match 36.jpg) :
and (second is pitch.png an image (Lines of football field (Red Color)) with png format = without white background) :
With this code , I selected 4 coordinate points in both of the 2 images (4 corners of the right penalty area)
and then, with ( cv2.warpPerspective ) and showing it , we can show that first image from (Top View)
as below:
My question is, what changes do I need to make in my code so that the red colored lines from the second image appear on the first image in the same way as the images below (drawn in the Paint app):
this is my code :
import cv2
import numpy as np
if __name__ == '__main__' :
# Read source image.
im_src = cv2.imread('c:/36.jpg')
# Four corners of penalty area in first image
pts_src = np.array([[314, 108], [693, 108], [903, 493],[311, 490]])
# Read destination image.
im_dst = cv2.imread('c:pitch.png')
# Four corners of right penalty area in pitch image.
pts_dst = np.array([[480, 76],[569, 76],[569, 292],[480, 292]])
# Calculate Homography
h, status = cv2.findHomography(pts_src, pts_dst)
# Warp source image to destination based on homography
im_out = cv2.warpPerspective(im_src, h, (im_dst.shape[1],im_dst.shape[0]))
# Display images
cv2.imshow("Source Image", im_src)
cv2.imshow("Destination Image", im_dst)
cv2.imshow("Warped Source Image", im_out)
cv2.waitKey(0)
Swap your source and destination images and points. Then, warp the source image:
im_out = cv2.warpPerspective(im_src, h, (im_dst.shape[1],im_dst.shape[0]), borderValue=[255,255,255])
and add this code
mask = im_out[:,:,0] < 100
im_out_overlapped = im_dst.copy()
im_out_overlapped[mask] = [0,0,255]
I am trying to detect bubbles on an OMR sheet which looks something like this:
My code for edge detection and contour display is referenced from here. However, before finding the actual contours, I am trying to detect the edges but somehow not able to set the correct values of parameters.
This is what I get:
Code:
from imutils.perspective import four_point_transform
from imutils import contours
import numpy as np
import argparse
import imutils
import cv2
def auto_canny(image, sigma=0.50):
# compute the median of the single channel pixel intensities
v = np.median(image)
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
# return the edged image
return edged
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True,
help="path to the input image")
args = vars(ap.parse_args())
image = cv2.imread(args["image"])
r = 500.0 / image.shape[1]
dim = (500, int(image.shape[0] * r))
# perform the actual resizing of the image and show it
image = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
equalized_img = cv2.equalizeHist(gray)
cv2.imshow('Equalized', equalized_img)
# cv2.waitKey(0)
blurred = cv2.GaussianBlur(equalized_img, (7, 7), 0)
# edged =cv2.Canny(equalized_img, 30, 160)
edged = auto_canny(blurred)
cv2.imshow('edged', edged)
cv2.waitKey(0)
How can I get all the 90*4 circles?
You should be using Hough to search for circles. This method project every single white pixel as a circle, and tries to get as many overlapping pixels possible. You'll have to specify the predicted radiuses of circles to be found within image.
Left - original image
Top-right - each white pixel is projected as red circle - they are too small to find intersecting point
Bottom-right - green circle is larger, and all the intersecting points meet exactly at the middle of the circle! Both radius and position is returned by cvHoughCircles
This person dealt with blob detection (that's what finding circles is called I think) using cvHoughCircles with cvCanny-ized image (read OPs update).
OpenCV: Error in cvHoughCircles usage
You need to improve your contour detection.
Eventually by not changing it, but by better pre-processing the earlier stage.
Contour detection works better with more contrast and color separation in image. If you don´t have yet need to threshold you image with techniques like Simple Threshold, Adaptive or more smart techniques like Otsu's. Check Open CV document here.
Besides that, for your case eventually need more advanced techniques like "Adaptive Thresholding Using the Integral Image", described here.
In the program given below I am aligning two images using homography and reducing the opacity of im_dst image in im_out image (say opacity=0.5), so that I can see both im_src and im_dst images in im_out image. But all I am getting is a blackened im_dst image in im_out image!
import cv2
import numpy as np
im_src = cv2.imread('src.jpg')
pts_src = np.array([[141, 131], [480, 159], [493, 630],[64, 601]])
im_dst = cv2.imread('dst.jpg')
pts_dst = np.array([[318, 256],[534, 372],[316, 670],[73, 473]])
h, status = cv2.findHomography(pts_src, pts_dst)
img1 = np.array(im_dst , dtype=np.float)
img2 = np.array(im_src , dtype=np.float)
img1 /= 255.0
# pre-multiplication
a_channel = np.ones(img1.shape, dtype=np.float)/2.0
im_dst = img1*a_channel
im_src = img2*(1-a_channel)
im_out = cv2.warpPerspective(im_src, h, (im_dst.shape[1],im_dst.shape[0]))
cv2.imshow("Warped Image", im_out)
cv2.waitKey(0)
I am new to openCV, so I might be missing something simple. Thanks for help!
Hey I've seen those points before!
What your code is doing is reducing the values of two images, im_dst and im_src, but then you're simply moving the faded image of im_src to a new position and displaying that. Instead, you should add the faded and warped image to the destination image and output that. The following would be a working modification of the end of your code:
alpha = 0.5
im_dst = img1 * alpha
im_src = img2 * (1-alpha)
im_out = cv2.warpPerspective(im_src, h, (im_dst.shape[1],im_dst.shape[0]))
im_blended = im_dst + im_out
cv2.imshow("Blended Warped Image", im_blended)
cv2.waitKey(0)
However you only divided img1 and not img2 by 255 so you would want to divide both first.
However, there is no reason to do this manually as you have to worry about converting the image types and scaling and all that. Instead, a much easier way is to use the built-in OpenCV function addWeighted() to add two images together with alpha-blending. So your entire code would instead be this short:
import cv2
import numpy as np
im_src = cv2.imread('src.jpg')
pts_src = np.array([[141, 131], [480, 159], [493, 630],[64, 601]])
im_dst = cv2.imread('dst.jpg')
pts_dst = np.array([[318, 256],[534, 372],[316, 670],[73, 473]])
h, status = cv2.findHomography(pts_src, pts_dst)
im_out = cv2.warpPerspective(im_src, h, (im_dst.shape[1],im_dst.shape[0]))
alpha = 0.5
beta = (1.0 - alpha)
dst_warp_blended = cv2.addWeighted(im_dst, alpha, im_out, beta, 0.0)
cv2.imshow('Blended destination and warped image', dst_warp_blended)
cv2.waitKey(0)
The function addWeighted() multiplies the first image im_dst by alpha, and the second image im_out by beta. The last argument is positive shift you can add to the result should you need it. Finally, the result is saturated so that values above whatever is allowable for your datatype is truncated at the maximum. And this way, your result is the same type as your inputs---you don't have to convert to float.
Last point about your code. A lot of tutorials, the one linked above included, use findHomography() to get a homography from four matching points. It is more appropriate to use getPerspectiveTransform() in this case. The function findHomography() finds an optimal homography based on many matching points, using an outlier rejection scheme and random sampling to speed up going through all the possible sets of four matching points. It works fine for sets of four points of course, but it makes more sense to use getPerspectiveTransform() when you have four matching points, and findHomography() when you have more than four. Although, annoyingly, the points you pass into getPerspectiveTransform() have to be of type np.float32 for whatever reason. So this would be my final suggestion for your code:
import cv2
import numpy as np
# Read source image.
im_src = cv2.imread('src.jpg')
# Four corners of the book in source image
pts_src = np.array([[141, 131], [480, 159], [493, 630],[64, 601]], dtype=np.float32)
# Read destination image.
im_dst = cv2.imread('dst.jpg')
# Four corners of the book in destination image.
pts_dst = np.array([[318, 256],[534, 372],[316, 670],[73, 473]], dtype=np.float32)
# Calculate Homography
h = cv2.getPerspectiveTransform(pts_src, pts_dst)
# Warp source image to destination based on homography
warp_src = cv2.warpPerspective(im_src, h, (im_dst.shape[1],im_dst.shape[0]))
# Blend the warped image and the destination image
alpha = 0.5
beta = (1.0 - alpha)
dst_warp_blended = cv2.addWeighted(im_dst, alpha, warp_src, beta, 0.0)
# Show the output
cv2.imshow('Blended destination and warped image', dst_warp_blended)
cv2.waitKey(0)
This (and all the other solutions above) will produce the following image:
I'm working on depth map with OpenCV. I can obtain it but it is reconstructed from the left camera origin and there is a little tilt of this latter and as you can see on the figure, the depth is "shifted" (the depth should be close and no horizontal gradient):
I would like to express it as with a zero angle, i try with the warp perspective function as you can see below but i obtain a null field...
P = np.dot(cam,np.dot(Transl,np.dot(Rot,A1)))
dst = cv2.warpPerspective(depth, P, (2048, 2048))
with :
#Projection 2D -> 3D matrix
A1 = np.zeros((4,3))
A1[0,0] = 1
A1[0,2] = -1024
A1[1,1] = 1
A1[1,2] = -1024
A1[3,2] = 1
#Rotation matrice around the Y axis
theta = np.deg2rad(5)
Rot = np.zeros((4,4))
Rot[0,0] = np.cos(theta)
Rot[0,2] = -np.sin(theta)
Rot[1,1] = 1
Rot[2,0] = np.sin(theta)
Rot[2,2] = np.cos(theta)
Rot[3,3] = 1
#Translation matrix on the X axis
dist = 0
Transl = np.zeros((4,4))
Transl[0,0] = 1
Transl[0,2] = dist
Transl[1,1] = 1
Transl[2,2] = 1
Transl[3,3] = 1
#Camera Intrisecs matrix 3D -> 2D
cam = np.concatenate((C1,np.zeros((3,1))),axis=1)
cam[2,2] = 1
P = np.dot(cam,np.dot(Transl,np.dot(Rot,A1)))
dst = cv2.warpPerspective(Z0_0, P, (2048*3, 2048*3))
EDIT LATER :
You can download the 32MB field dataset here: https://filex.ec-lille.fr/get?k=cCBoyoV4tbmkzSV5bi6. Then, load and view the image with:
from matplotlib import pyplot as plt
import numpy as np
img = np.load('testZ0.npy')
plt.imshow(img)
plt.show()
I have got a rough solution in place. You can modify it later.
I used the mouse handling operations available in OpenCV to crop the region of interest in the given heatmap.
(Did I just say I used a mouse to crop the region?) Yes, I did. To learn more about mouse functions in OpenCV SEE THIS. Besides, there are many other SO questions that can help you in this regard.:)
Using those functions I was able to obtain the following:
Now to your question of removing the tilt. I used the homography principal by taking the corner points of the image above and using it on a 'white' image of a definite size. I used the cv2.findHomography() function for this.
Now using the cv2.warpPerspective() function in OpenCV, I was able to obtain the following:
Now you can the required scale to this image as you wanted.
CODE:
I have also attached some snippets of code for your perusal:
#First I created an image of white color of a definite size
back = np.ones((435, 379, 3)) # size
back[:] = (255, 255, 255) # white color
Next I obtained the corner points pts_src on the tilted image below :
pts_src = np.array([[25.0, 2.0],[403.0,22.0],[375.0,436.0],[6.0,433.0]])
I wanted the points above to be mapped to the points 'pts_dst' given below :
pts_dst = np.array([[2.0, 2.0], [379.0, 2.0], [379.0, 435.0],[2.0, 435.0]])
Now I used the principal of homography:
h, status = cv2.findHomography(pts_src, pts_dst)
Finally I mapped the original image to the white image using perspective transform.
fin = cv2.warpPerspective(img, h, (back.shape[1],back.shape[0]))
# img -> original tilted image.
# back -> image of white color.
Hope this helps! I also got to learn a great deal from this question.
Note: The points fed to the 'cv2.findHomography()' must be in float.
For more info on Homography , visit THIS PAGE
Background
A raster file collected via LIDAR records the topography of a watershed. To properly model the watershed, the river must appear continuous without any breaks or interruptions. The roads in the raster file appear like dams that interrupt the river as seen in the picture below
Specific Area Under Consideration in the Watershed
Objective
These river breaks are the main problem and I am trying but failing to remove them.
Approach
Via Python, I used the various tools and prebuilt functions in the OpenCV library. The primary function I used in this approach is the cv2.inpaint function. This function takes in an image file and a mask file and interpolates the original image wherever the mask file pixels are nonzero.
The main step here is determining the mask file which I did by detecting the corners at the break in the river. The mask file will guide the inpaint function to fill in the pixels according to the patterns in the surrounding pixels.
Problem
My issue is that this happens from all directions whereas I require it to only extrapolate pixel data from the river itself. The image below shows the flawed result: inpaint works but it considers data from outside the river too.
Inpainted Result
Here is my code if you are so kind as to help:
import scipy.io as sio
import numpy as np
from matplotlib import pyplot as plt
import cv2
matfile = sio.loadmat('box.mat') ## box.mat original raster file linked below
ztopo = matfile['box']
#Crop smaller section for analysis
ztopo2 = ztopo[200:500, 1400:1700]
## Step 1) Isolate river
river = ztopo2.copy()
river[ztopo2<217.5] = 0
#This will become problematic for entire watershed w/o proper slicing
## Step 2) Detect Corners
dst = cv2.cornerHarris(river,3,7,0.04)
# cornerHarris arguments adjust qualities of corner markers
# Dilate Image (unnecessary)
#dst = cv2.dilate(dst,None)
# Threshold for an optimal value, it may vary depending on the image.
# This adjusts what defines a corner
river2 = river.copy()
river2[dst>0.01*dst.max()]=[150]
## Step 3) Remove river and keep corners
#Initiate loop to isolate detected corners
i=0
j=0
rows,columns = river2.shape
for i in np.arange(rows):
for j in np.arange(columns):
if river2[i,j] != 150:
river2[i,j] = 0
j = j + 1
i = i + 1
# Save corners as new image for import during next step.
# Import must be via cv2 as thresholding and contour detection can only work on BGR files. Sio import in line 6 (matfile = sio.loadmat('box.mat')) imports 1 dimensional image rather than BGR.
cv2.imwrite('corners.png', river2)
## Step 4) Create mask image by defining and filling a contour around the previously detected corners
#Step 4 code retrieved from http://dsp.stackexchange.com/questions/2564/opencv-c-connect-nearby-contours-based-on-distance-between-them
#Article: OpenCV/C++ connect nearby contours based on distance between them
#Initiate function to specify features of contour connections
def find_if_close(cnt1,cnt2):
row1,row2 = cnt1.shape[0],cnt2.shape[0]
for i in xrange(row1):
for j in xrange(row2):
dist = np.linalg.norm(cnt1[i]-cnt2[j])
if abs(dist) < 50 :
return True
elif i==row1-1 and j==row2-1:
return False
#import image of corners created in step 3 so thresholding can function properly
img = cv2.imread('corners.png')
#Thresholding and Finding contours only works on grayscale image
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(gray,127,255,cv2.THRESH_BINARY)
contours,hier = cv2.findContours(thresh,cv2.RETR_EXTERNAL,2)
LENGTH = len(contours)
status = np.zeros((LENGTH,1))
for i,cnt1 in enumerate(contours):
x = i
if i != LENGTH-1:
for j,cnt2 in enumerate(contours[i+1:]):
x = x+1
dist = find_if_close(cnt1,cnt2)
if dist == True:
val = min(status[i],status[x])
status[x] = status[i] = val
else:
if status[x]==status[i]:
status[x] = i+1
unified = []
maximum = int(status.max())+1
for i in xrange(maximum):
pos = np.where(status==i)[0]
if pos.size != 0:
cont = np.vstack(contours[i] for i in pos)
hull = cv2.convexHull(cont) # I don't know why/how this is used
unified.append(hull)
cv2.drawContours(img,unified,-1,(0,255,0),1) #Last argument specifies contour width
cv2.drawContours(thresh,unified,-1,255,-1)
# Thresh is the filled contour while img is the contour itself
# The contour surrounds the corners
#cv2.imshow('pic', thresh) #Produces black and white image
## Step 5) Merge via inpaint
river = np.uint8(river)
ztopo2 = np.uint8(ztopo2)
thresh[thresh>0] = 1
#new = river.copy()
merged = cv2.inpaint(river,thresh,12,cv2.INPAINT_TELEA)
plt.imshow(merged)
plt.colorbar()
plt.show()