Drawing Gradient Directions - python

I have created some code to visually display the average gradient direction in a cell/kernel. My questions are:
Is my method of calculating the average gradient direction correct? I am aware of a different method (see *) but unsure which is better/more accurate.
Is my normalisation of a degrees value to a hue value correct? Ie, normalising a value that can be 0-359 to a value between 0-179 by simply dividing by 2?
Most importantly am I accurately calculating and representing the average gradient direction over a series of cells?
*Alt method to calculate the average gradient direction:
hMean = cv2.mean(sobelX)
vMean = cv2.mean(sobelY)
avg_dir = math.atan2(-vMean[0], hMean[0])
Draw gradient directions:
import cv2
import math
import numpy as np
np.set_printoptions(precision=3, threshold=np.inf, linewidth=np.inf, suppress=True)
def get_roi(src, pt1, pt2):
col1, col2 = (pt1[0], pt2[0]) if pt1[0] < pt2[0] else (pt2[0], pt1[0])
row1, row2 = (pt1[1], pt2[1]) if pt1[1] < pt2[1] else (pt2[1], pt1[1])
return src[row1:row2, col1:col2]
def get_gradient_direction_line(avg_dir, cellUpperLeft, cellW, cellH, scale=0.8):
halfScale = scale/2;
centrePt = (int(cellUpperLeft[0] + (cellW/2)), int(cellUpperLeft[1] + (cellH/2)))
strtPt = (int(centrePt[0] - (cellW * halfScale * math.cos(avg_dir))), int(centrePt[1] - (cellH * halfScale * math.sin(avg_dir))))
endPt = (int(centrePt[0] + (cellW * halfScale * math.cos(avg_dir))), int(centrePt[1] + (cellH * halfScale * math.sin(avg_dir))))
return [strtPt, endPt]
def get_gradient_directions_arrows(direction, kernel_w=3, kernel_h=3):
arrows = np.zeros(direction.shape, dtype=np.uint8)
n_cols = int(direction.shape[1] / kernel_w)
n_rows = int(direction.shape[0] / kernel_h)
for c in range(n_cols):
# Draw grid lines
cv2.line(arrows, (c*kernel_w, 0), (c*kernel_w, direction.shape[0]), (255,255,255), 1)
cv2.line(arrows, (0, c*kernel_h), (direction.shape[1], c*kernel_h), (255,255,255), 1)
for r in range(n_rows):
roiUpperleft = (c*kernel_w, r*kernel_h)
roi = get_roi(direction, roiUpperleft, ((c+1)*kernel_w, (r+1)*kernel_h))
avg_dir = cv2.mean(roi)[0]
arrow_pnts = get_gradient_direction_line(avg_dir, roiUpperleft, kernel_w, kernel_h)
cv2.arrowedLine(arrows, arrow_pnts[0], arrow_pnts[1], (255,255,255), 1)
return arrows
def get_gradient_directions_colours(direction, kernel_w=3, kernel_h=3):
# (red=0°; yellow=60°, green=120°, blue=240°...)
hsv = np.zeros((direction.shape[0], direction.shape[1], 3), dtype=np.uint8)
hsv = cv2.cvtColor(hsv, cv2.COLOR_BGR2HSV)
n_cols = int(direction.shape[1] / kernel_w)
n_rows = int(direction.shape[0] / kernel_h)
for c in range(n_cols):
# Draw grid lines
cv2.line(hsv, (c*kernel_w, 0), (c*kernel_w, direction.shape[0]), (180,255,255), 1)
cv2.line(hsv, (0, c*kernel_h), (direction.shape[1], c*kernel_h), (180,255,255), 1)
for r in range(n_rows):
roiUpperleft = (c*kernel_w, r*kernel_h)
roi = get_roi(direction, roiUpperleft, ((c+1)*kernel_w, (r+1)*kernel_h))
avg_dir = cv2.mean(roi)[0]
# avg_dir will be value between 0-359. HSV hue needs a value between 0-179
avg_dir /= 2
arrow_pnts = get_gradient_direction_line(avg_dir, roiUpperleft, kernel_w, kernel_h)
cv2.rectangle(hsv, roiUpperleft, ((c+1)*kernel_w, (r+1)*kernel_h), (avg_dir, 255,255), -1)
bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
return bgr
def main():
src = cv2.imread('foo.jpg', 0)
# Calculate gradient magnitude and direction for the image
dX = cv2.Sobel(src, cv2.CV_32F, 1, 0)
dY = cv2.Sobel(src, cv2.CV_32F, 0, 1)
mag, direction = cv2.cartToPolar(dX, dY, angleInDegrees=True)
arrows = get_gradient_directions_arrows(direction, 20, 20)
colours = get_gradient_directions_colours(direction, 20, 20)
cv2.imshow('src', src)
cv2.imshow('arrows', arrows)
cv2.imshow('colours', colours)
cv2.waitKey(0)
main()

Related

Difficulty calculating slope for a set of rotated and shifted ellipses, sometimes inverted, sometimes completely wrong

I am using OpenCV-Python to fit an ellipse to the shape of a droplet.
Then I choose a line, which represents the surface the droplet is resting on.
I calculate the tangents at the intersection of the surface and the ellipse to get the contact angle of the droplet.
It works most of the time, but in some cases, the tangents are flipped upside down or just wrong.
It seems that the calculation for the slope of the tangent fails.
Can someone tell me why this happens?
Here you can see how it should look like (surface at y=250):
And this is the result when I choose a surface level of y=47:
I did some research and I need to detect which of the two maj_ax, min_ax was parallel to the x-Axis before the ellipse gets rotated by phi or else the slope calculation algorithm fails.
What am I doing wrong?
Here is a minimal reproducible example:
from math import cos, sin, pi, sqrt, tan, atan2, radians
import cv2
class Droplet():
def __init__(self):
self.is_valid = False
self.angle_l = 0
self.angle_r = 0
self.center = (0,0)
self.maj = 0
self.min = 0
self.phi = 0.0
self.tilt_deg = 0
self.foc_pt1 = (0,0)
self.foc_pt2 = (0,0)
self.tan_l_m = 0
self.int_l = (0,0)
self.line_l = (0,0,0,0)
self.tan_r_m = 0
self.int_r = (0,0)
self.line_r = (0,0,0,0)
self.base_diam = 0
def evaluate_droplet(img, y_base) -> Droplet:
drplt = Droplet()
crop_img = img[:y_base,:]
shape = img.shape
height = shape[0]
width = shape[1]
# values only for 8bit images!
bw_edges = cv2.Canny(crop_img, 76, 179)
contours, hierarchy = cv2.findContours(bw_edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
if len(contours) == 0:
raise ValueError('No contours found!')
edge = max(contours, key=cv2.contourArea)
(x0,y0),(maj_ax,min_ax),phi_deg = cv2.fitEllipse(edge)
phi = radians(phi_deg) # to radians
a = maj_ax/2
b = min_ax/2
intersection = calc_intersection_line_ellipse((x0,y0,a,b,phi),(0,y_base))
if intersection is None:
raise ValueError('No intersections found')
# select left and right intersection points
x_int_l = min(intersection)
x_int_r = max(intersection)
foc_len = sqrt(abs(a**2 - b**2))
# calc slope and angle of tangent
m_t_l = calc_slope_of_ellipse((x0,y0,a,b,phi), x_int_l, y_base)
angle_l = pi - atan2(m_t_l,1)
m_t_r = calc_slope_of_ellipse((x0,y0,a,b,phi), x_int_r, y_base)
angle_r = atan2(m_t_r,1) + pi
drplt.angle_l = angle_l
drplt.angle_r = angle_r
drplt.maj = maj_ax
drplt.min = min_ax
drplt.center = (x0, y0)
drplt.phi = phi
drplt.tilt_deg = phi_deg
drplt.tan_l_m = m_t_l
drplt.tan_r_m = m_t_r
drplt.line_l = (int(round(x_int_l - (int(round(y_base))/m_t_l))), 0, int(round(x_int_l + ((height - int(round(y_base)))/m_t_l))), int(round(height)))
drplt.line_r = (int(round(x_int_r - (int(round(y_base))/m_t_r))), 0, int(round(x_int_r + ((height - int(round(y_base)))/m_t_r))), int(round(height)))
drplt.int_l = (x_int_l, y_base)
drplt.int_r = (x_int_r, y_base)
drplt.foc_pt1 = (x0 + foc_len*cos(phi), y0 + foc_len*sin(phi))
drplt.foc_pt2 = (x0 - foc_len*cos(phi), y0 - foc_len*sin(phi))
drplt.base_diam = x_int_r - x_int_l
drplt.is_valid = True
# draw ellipse and lines
img = cv2.drawContours(img,contours,-1,(100,100,255),2)
img = cv2.drawContours(img,edge,-1,(255,0,0),2)
img = cv2.ellipse(img, (int(round(x0)),int(round(y0))), (int(round(a)),int(round(b))), int(round(phi*180/pi)), 0, 360, (255,0,255), thickness=1, lineType=cv2.LINE_AA)
y_int = int(round(y_base))
img = cv2.line(img, (int(round(x_int_l - (y_int/m_t_l))), 0), (int(round(x_int_l + ((height - y_int)/m_t_l))), int(round(height))), (255,0,255), thickness=1, lineType=cv2.LINE_AA)
img = cv2.line(img, (int(round(x_int_r - (y_int/m_t_r))), 0), (int(round(x_int_r + ((height - y_int)/m_t_r))), int(round(height))), (255,0,255), thickness=1, lineType=cv2.LINE_AA)
img = cv2.ellipse(img, (int(round(x_int_l)),y_int), (20,20), 0, 0, -int(round(angle_l*180/pi)), (255,0,255), thickness=1, lineType=cv2.LINE_AA)
img = cv2.ellipse(img, (int(round(x_int_r)),y_int), (20,20), 0, 180, 180 + int(round(angle_r*180/pi)), (255,0,255), thickness=1, lineType=cv2.LINE_AA)
img = cv2.line(img, (0,y_int), (width, y_int), (255,0,0), thickness=2, lineType=cv2.LINE_AA)
img = cv2.putText(img, '<' + str(round(angle_l*180/pi,1)), (5,y_int-5), cv2.FONT_HERSHEY_COMPLEX, .5, (0,0,0))
img = cv2.putText(img, '<' + str(round(angle_r*180/pi,1)), (width - 80,y_int-5), cv2.FONT_HERSHEY_COMPLEX, .5, (0,0,0))
cv2.imshow('Test',img)
cv2.waitKey(0)
return drplt
def calc_intersection_line_ellipse(ellipse_pars, line_pars):
"""
calculates intersection(s) of an ellipse with a line
:param ellipse_pars: tuple of (x0,y0,a,b,phi): x0,y0 center of ellipse; a,b sem-axis of ellipse; phi tilt rel to x axis
:param line_pars: tuple of (m,t): m is the slope and t is intercept of the intersecting line
:returns: x-coordinate(s) of intesection as list or float or none if none found
"""
## -->> http://quickcalcbasic.com/ellipse%20line%20intersection.pdf
(x0, y0, h, v, phi) = ellipse_pars
(m, t) = line_pars
y = t - y0
try:
a = v**2 * cos(phi)**2 + h**2 * sin(phi)**2
b = 2*y*cos(phi)*sin(phi) * (v**2 - h**2)
c = y**2 * (v**2 * sin(phi)**2 + h**2 * cos(phi)**2) - (h**2 * v**2)
det = b**2 - 4*a*c
if det > 0:
x1 = int(round((-b - sqrt(det))/(2*a) + x0))
x2 = int(round((-b + sqrt(det))/(2*a) + x0))
return x1,x2
elif det == 0:
x = int(round(-b / (2*a)))
return x
else:
return None
except Exception as ex:
raise ex
def calc_slope_of_ellipse(ellipse_pars, x, y):
"""
calculates the slope of the tangent at point x,y, the point needs to be on the ellipse!
:param ellipse_params: tuple of (x0,y0,a,b,phi): x0,y0 center of ellipse; a,b sem-axis of ellipse; phi tilt rel to x axis
:param x: x-coord where the slope will be calculated
:returns: the slope of the tangent
"""
(x0, y0, a, b, phi) = ellipse_pars
# transform to non-rotated ellipse
x_rot = (x - x0)*cos(phi) + (y - y0)*sin(phi)
y_rot = (x - x0)*sin(phi) + (y - y0)*cos(phi)
m_rot = -(b**2 * x_rot)/(a**2 * y_rot) # slope of tangent to unrotated ellipse
#rotate tangent line back to angle of the rotated ellipse
m_tan = tan(atan2(m_rot,1) + phi)
return m_tan
if __name__ == "__main__":
im = cv2.imread('untitled1.png')
# any value below 250 is just the droplet without the substrate
drp = evaluate_droplet(im, 250)
Original image:
I made a mistake in calc_slope_ellipse:
x_rot = (x - x0)*cos(phi) + (y - y0)*sin(phi)
should be
x_rot = (x - x0)*cos(phi) - (y - y0)*sin(phi)
this fixes the wrong sign of the slope at y=47.
I replaced the atan2:
m_rot = -(b**2 * x_rot)/(a**2 * y_rot) # slope of tangent to unrotated ellipse
#rotate tangent line back to angle of the rotated ellipse
m_tan = tan(atan2(m_rot,1) + phi)
with
tan_a = x_rot/a**2
tan_b = y_rot/b**2
#rotate tangent line back to angle of the rotated ellipse
tan_a_r = tan_a*cos(phi) + tan_b*sin(phi)
tan_b_r = tan_b*cos(phi) - tan_a*sin(phi)
m_tan = - (tan_a_r / tan_b_r)
This fixes the weird behaviour for certain cases (y=62).
Complete fcn:
def calc_slope_of_ellipse(ellipse_pars, x, y):
(x0, y0, a, b, phi) = ellipse_pars
# transform to non-rotated ellipse centered to origin
x_rot = (x - x0)*cos(phi) - (y - y0)*sin(phi)
y_rot = (x - x0)*sin(phi) + (y - y0)*cos(phi)
# Ax + By = C
tan_a = x_rot/a**2
tan_b = y_rot/b**2
#rotate tangent line back to angle of the rotated ellipse
tan_a_r = tan_a*cos(phi) + tan_b*sin(phi)
tan_b_r = tan_b*cos(phi) - tan_a*sin(phi)
m_tan = - (tan_a_r / tan_b_r)
return m_tan

How can i block wrong lines into houghspace

I found hough line implemantation in github.
And I try this code my computer.
When I plot hough space with matplot, there is build-up at shown in picture.
This cause wrong lines detection in image.
def hough_line(img, angle_step=1, lines_are_white=True, value_threshold=5):
# Rho and Theta ranges
thetas = np.deg2rad(np.arange(-90.0, 90.0, angle_step))
width, height = img.shape
diag_len = int(round(math.sqrt(width * width + height * height)))
rhos = np.linspace(-diag_len, diag_len, diag_len * 2)
# Cache some resuable values
cos_t = np.cos(thetas)
sin_t = np.sin(thetas)
num_thetas = len(thetas)
# Hough accumulator array of theta vs rho
accumulator = np.zeros((2 * diag_len, num_thetas), dtype=np.uint8)
# (row, col) indexes to edges
are_edges = img > value_threshold if lines_are_white else img < value_threshold
y_idxs, x_idxs = np.nonzero(are_edges)
# Vote in the hough accumulator
for i in range(len(x_idxs)):
x = x_idxs[i]
y = y_idxs[i]
for t_idx in range(num_thetas):
# Calculate rho. diag_len is added for a positive index
rho = diag_len + int(round(x * cos_t[t_idx] + y * sin_t[t_idx]))
accumulator[rho, t_idx] += 1
return accumulator, thetas, rhos
def show_hough_line(img, accumulator, thetas, rhos, save_path=None):
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 2, figsize=(10, 10))
ax[0].imshow(img, cmap=plt.cm.gray)
ax[0].set_title('Input image')
ax[0].axis('image')
ax[1].imshow(
accumulator, cmap='jet',
extent=[np.rad2deg(thetas[-1]), np.rad2deg(thetas[0]), rhos[-1], rhos[0]])
ax[1].set_aspect('equal', adjustable='box')
ax[1].set_title('Hough transform')
ax[1].set_xlabel('Angles (degrees)')
ax[1].set_ylabel('Distance (pixels)')
ax[1].axis('image')
# plt.axis('off')
if save_path is not None:
plt.savefig(save_path, bbox_inches='tight')
plt.show()
And I run this code,
img = cv2.imread('05102009081.png')
img_dark = cv2.imread('05102009081-1.png')
img1 = cv2.bitwise_and(img, img_dark)
gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5, 5), 0)
edges = cv2.Canny(blur, 100, 200)
accumulator, thetas, rhos = hough_line(edges)
#Thresholding with 100
a = (accumulator > 100).astype(int)
accumulator = accumulator * a
show_hough_line(edges, accumulator, thetas, rhos)
This is result without thresholding
This is result after thresholding
As you see when i apply thresholding this edges, There is some peak point approximatly 55. degree and between 10-50. pixels in the hough space, This cause wrong lines in image.
What is the problem ?
How can i solve this problem ?
Thanks in advance.

Multiple occurrences of bounding boxes around a same location

I implemented the following code, to match nodes in a plant, using as template a small cropped image.
img_rgb = cv2.imread('Exp.2 - Florestópolis, 35DAE, 2017-2018, T4, R2, P4.jpg')
img_rgb = cv2.medianBlur(img_rgb, 7)
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template_image = cv2.imread('TemplateNode.jpg',0)
template_image = cv2.medianBlur(template_image, 5)
width, height = template_image.shape[::-1]
res = cv2.matchTemplate(img_gray, template_image, cv2.TM_CCOEFF_NORMED)
threshold = 0.6
locations = np.where(res >= threshold)
for position_tuple in zip(*locations[::-1]):
cv2.rectangle(img_rgb, position_tuple, (position_tuple[0] + width, position_tuple[1] + height), (0,255,0), 1)
However, it generates too many bounding boxes (tuples), around a same location, as show:
So, there is a work around to solve this issue?
Here is one possible approach. Code is unlikely an efficient one. I think k-means clustering from some package may work better. Idea is to group together locations, which are too close:
def group_locations(locations, min_radius):
x = locations[:, 0][ : , None]
dist_x = x - x.T
y = locations[:, 1][ : , None]
dist_y = y - y.T
dist = np.sqrt(dist_x**2 + dist_y**2)
np.fill_diagonal(dist, min_radius+1)
too_close = np.nonzero(dist <= min_radius)
groups = []
points = np.arange(len(locations))
i = 0
j = 0
while i < len(points):
groups.append([points[i]])
for j in range(len(too_close[0])):
if too_close[0][j] == points[i]:
groups[i].append(too_close[1][j])
points = np.delete(points, np.nonzero(points == too_close[1][j]))
i += 1
new_locations = []
for group in groups:
new_locations.append(np.mean(locations[group], axis=0))
return np.array(new_locations)
So you take your locations and group them before plotting:
locations = []
size = 600
for _ in range(50):
locations.append((random.randint(0, size), random.randint(0, size)))
locations = np.array(locations)
min_radius = 50
new_locations = group_locations(locations, min_radius)
#I run it second time as sometimes locations form chains which are longer than radius
new_locations = group_locations(new_locations, min_radius)
plt.scatter(locations[:, 0], locations[:, 1], c='blue', label='Original locations')
plt.scatter(new_locations[:, 0], new_locations[:, 1], c='red', marker='x', label='New grouped locations')
plt.legend()
plt.show()
actually tried it with image provided
img_rgb = cv2.imread('obsvu.jpg')
img_rgb = cv2.medianBlur(img_rgb, 7)
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template_image = cv2.imread('template.jpg',0)
template_image = cv2.medianBlur(template_image, 5)
width, height = template_image.shape[::-1]
res = cv2.matchTemplate(img_gray, template_image, cv2.TM_CCOEFF_NORMED)
threshold = 0.6
locations = np.where(res >= threshold)
new_locations = group_locations(np.array(locations).T, 50).T
for position_tuple in zip(*new_locations.astype(int)[::-1]):
cv2.rectangle(img_rgb, position_tuple, (position_tuple[0] + width, position_tuple[1] + height), (0,255,0), 5)
Original locations: 723
New locations: 6 (yep, template selection not the best)

How to keep records of multiple previous frames in video processing

I currently working on lane detection for autonomous vehicles using opencv and python. I have used houghline transform to get lines in the road. From those lines I have calculated the mean slope and intercept values separately for the lines which have negative and positive slopes. To reduce errors, I want to keep track of calculated mean slopes and intercepts of previous 3 frames. So that I can check the deviance of mean slope and intercept values of current frame and correct it accordingly. Is there a way to keep track of 3 previous frames ?
this is I have done up to now. This is for the current frame. I want to keep calculated mean values up to 3 previous frames and access those values form current frame.
import numpy as np
import cv2
import math
cap = cv2.VideoCapture('video3.mov')
while (cap.isOpened()):
ret, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HLS)
# HSL color mask
lower_white = np.uint8([0, 140, 0])
upper_white = np.uint8([255, 255, 255])
mask = cv2.inRange(hsv, lower_white, upper_white)
res = cv2.bitwise_and(frame, frame, mask=mask)
height = np.size(hsv, 0)
width = np.size(hsv, 1)
rows, cols = hsv.shape[:2]
bottom_left = [cols * 0, rows * 0.9]
top_left = [cols * 0.4, rows * 0.7]
bottom_right = [cols * 1, rows * 0.9]
top_right = [cols * 0.6, rows * 0.7]
vertices = np.array([[bottom_left, top_left, top_right, bottom_right]], dtype=np.int32)
maskzero = np.zeros_like(res)
cv2.fillPoly(maskzero, vertices, (255,) * maskzero.shape[2])
maskedimg = cv2.bitwise_and(res, maskzero)
# smoothed = cv2.medianBlur(maskedimg,3)
gaussian = cv2.GaussianBlur(maskedimg, (3, 3), 0)
# apply canny on masked image
cannymask = cv2.Canny(gaussian, 150, 250)
# apply hough transform houghlinesP
lines = cv2.HoughLinesP(cannymask, rho=1, theta=np.pi / 180, threshold=20, minLineLength=10, maxLineGap=300)
left_slope = []
right_slope = []
left_intercept = []
right_intercept = []
total_right_length = []
total_left_length = []
if lines is not None:
for line in lines:
for x1, y1, x2, y2 in line:
if (x2 - x1) != 0:
slope = (y2 - y1) / (x2 - x1)
intercept = y1 - slope * x1
length = np.sqrt((y2 - y1) ** 2 + (x2 - x1) ** 2)
angle = math.atan2(y2-y1,x2-x1)
degree = angle * 180 / np.pi
if x2 == x1 or y2 == y1:
continue
elif slope > 0:
if int(degree) in range (27,41):
right_slope.append(slope)
right_intercept.append(intercept)
total_right_length.append(length)
angle = math.atan2(y2 - y1, x2 - x1)
degree = angle * 180 / np.pi
print("positive",degree)
elif slope < 0:
if int(degree) in range (-62,-31):
left_slope.append(slope)
left_intercept.append(intercept)
total_left_length.append(length)
angle = math.atan2(y2 - y1, x2 - x1)
degree = angle * 180 / np.pi
print("negative",degree)
degreeint= int(degree)
#cv2.line(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
sum_right_length = np.sum(total_right_length) if len(total_right_length) > 0 else None
right_mean_slope=np.mean(right_slope) if len(right_slope)>0 else None
right_mean_intercept=np.mean(right_intercept) if len(right_intercept) > 0 else None
sum_left_length = np.sum(total_left_length) if len(total_left_length) > 0 else None
left_mean_slope = np.mean(left_slope) if len(left_slope)>0 else None
left_mean_intercept = np.mean(left_intercept) if len(left_intercept) > 0 else None
right_x1=0
right_y1=0
right_x2=0
right_y2=0
left_x1=0
left_x2=0
left_y2=0
left_y1=0
y1 = frame.shape[0] # bottom of the image
y2 = y1*0.7
if right_mean_intercept is not None and right_mean_slope is not None:
right_x1 = int((y1 - right_mean_intercept)/right_mean_slope)
right_x2 = int((y2 - right_mean_intercept)/right_mean_slope)
right_y1 = int(y1)
right_y2 = int(y2)
if left_mean_intercept is not None and left_mean_slope is not None:
left_x1 = int((y1 - left_mean_intercept)/left_mean_slope)
left_x2 = int((y2 - left_mean_intercept)/left_mean_slope)
left_y1 = int(y1)
left_y2 = int(y2)
cv2.line(frame, (right_x1, right_y1), (right_x2, right_y2), (0, 0, 255), 10)
cv2.line(frame, (left_x1, left_y1), (left_x2, left_y2), (255, 0,0), 10)
cv2.imshow("New_lines", frame)
if cv2.waitKey(100) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
I want to keep right and left mean slope and intercept values of 3 previous frames and access them from the current frame.
import numpy as np
import cv2
import math
cap = cv2.VideoCapture('video3.mov')
previous = [ ] # We will keep track of previous 3 frames in this list
while (cap.isOpened()):
ret, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HLS)
if len(previous) == 3 :
# do your thing like calculating the mean
for x in range(3):
print(previous[x]) # This is how you can access the previous frames
# ================================================================
# After you are done add the below two lines of code
# Below two lines are necessary to keep track of recent 3 frames
# ================================================================
previous = previous[1:] # Removing the first frame so now length of previous is 2
previous.append(frame) # Adding the current frame so length of previous is 3 again
else :
# else condition is for if frames are less then 3
# else condition is for the very beginning when you previous will start with empty list
previous.append(frame)
continue # If you want to do computation even when length of previous is less than 3 than comment the continue statement
if cv2.waitKey(100) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
import numpy as np
import cv2
import math
cap = cv2.VideoCapture('video3.mov')
prevFrames = [None,None,None] # avoid index mishaps for [-1]..[-3]
# you need to test for None when accessing
while (cap.isOpened()):
ret, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HLS)
# [...]
# access prevFrames[-1] for last frame,
# [-2] for the one before that,
# [-3] for the one 3 before now
# check value for None before using
sum_right_length = np.sum(total_right_length) if len(total_right_length) > 0 else None
right_mean_slope=np.mean(right_slope) if len(right_slope)>0 else None
right_mean_intercept=np.mean(right_intercept) if len(right_intercept) > 0 else None
sum_left_length = np.sum(total_left_length) if len(total_left_length) > 0 else None
left_mean_slope = np.mean(left_slope) if len(left_slope)>0 else None
left_mean_intercept = np.mean(left_intercept) if len(left_intercept) > 0 else None
# [...]
# add data from current frame at end
prevFrames.append( [right_mean_slope, left_mean_slope, right_mean_intercept, left_mean_intercept ]
if len(prevFrames) > 3:
prevFrames.pop(0) # remove and discard oldest element
cv2.line(frame, (right_x1, right_y1), (right_x2, right_y2), (0, 0, 255), 10)
cv2.line(frame, (left_x1, left_y1), (left_x2, left_y2), (255, 0,0), 10)
cv2.imshow("New_lines", frame)

overlay a smaller image on a larger image python OpenCv

Hi I am creating a program that replaces a face in a image with someone else's face. However, I am stuck on trying to insert the new face into the original, larger image. I have researched ROI and addWeight(needs the images to be the same size) but I haven't found a way to do this in python. Any advise is great. I am new to opencv.
I am using the following test images:
smaller_image:
larger_image:
Here is my Code so far... a mixer of other samples:
import cv2
import cv2.cv as cv
import sys
import numpy
def detect(img, cascade):
rects = cascade.detectMultiScale(img, scaleFactor=1.1, minNeighbors=3, minSize=(10, 10), flags = cv.CV_HAAR_SCALE_IMAGE)
if len(rects) == 0:
return []
rects[:,2:] += rects[:,:2]
return rects
def draw_rects(img, rects, color):
for x1, y1, x2, y2 in rects:
cv2.rectangle(img, (x1, y1), (x2, y2), color, 2)
if __name__ == '__main__':
if len(sys.argv) != 2: ## Check for error in usage syntax
print "Usage : python faces.py <image_file>"
else:
img = cv2.imread(sys.argv[1],cv2.CV_LOAD_IMAGE_COLOR) ## Read image file
if (img == None):
print "Could not open or find the image"
else:
cascade = cv2.CascadeClassifier("haarcascade_frontalface_alt.xml")
gray = cv2.cvtColor(img, cv.CV_BGR2GRAY)
gray = cv2.equalizeHist(gray)
rects = detect(gray, cascade)
## Extract face coordinates
x1 = rects[0][3]
y1 = rects[0][0]
x2 = rects[0][4]
y2 = rects[0][5]
y=y2-y1
x=x2-x1
## Extract face ROI
faceROI = gray[x1:x2, y1:y2]
## Show face ROI
cv2.imshow('Display face ROI', faceROI)
small = cv2.imread("average_face.png",cv2.CV_LOAD_IMAGE_COLOR)
print "here"
small=cv2.resize(small, (x, y))
cv2.namedWindow('Display image') ## create window for display
cv2.imshow('Display image', small) ## Show image in the window
print "size of image: ", img.shape ## print size of image
cv2.waitKey(1000)
A simple way to achieve what you want:
import cv2
s_img = cv2.imread("smaller_image.png")
l_img = cv2.imread("larger_image.jpg")
x_offset=y_offset=50
l_img[y_offset:y_offset+s_img.shape[0], x_offset:x_offset+s_img.shape[1]] = s_img
Update
I suppose you want to take care of the alpha channel too. Here is a quick and dirty way of doing so:
s_img = cv2.imread("smaller_image.png", -1)
y1, y2 = y_offset, y_offset + s_img.shape[0]
x1, x2 = x_offset, x_offset + s_img.shape[1]
alpha_s = s_img[:, :, 3] / 255.0
alpha_l = 1.0 - alpha_s
for c in range(0, 3):
l_img[y1:y2, x1:x2, c] = (alpha_s * s_img[:, :, c] +
alpha_l * l_img[y1:y2, x1:x2, c])
Using #fireant's idea, I wrote up a function to handle overlays. This works well for any position argument (including negative positions).
def overlay_image_alpha(img, img_overlay, x, y, alpha_mask):
"""Overlay `img_overlay` onto `img` at (x, y) and blend using `alpha_mask`.
`alpha_mask` must have same HxW as `img_overlay` and values in range [0, 1].
"""
# Image ranges
y1, y2 = max(0, y), min(img.shape[0], y + img_overlay.shape[0])
x1, x2 = max(0, x), min(img.shape[1], x + img_overlay.shape[1])
# Overlay ranges
y1o, y2o = max(0, -y), min(img_overlay.shape[0], img.shape[0] - y)
x1o, x2o = max(0, -x), min(img_overlay.shape[1], img.shape[1] - x)
# Exit if nothing to do
if y1 >= y2 or x1 >= x2 or y1o >= y2o or x1o >= x2o:
return
# Blend overlay within the determined ranges
img_crop = img[y1:y2, x1:x2]
img_overlay_crop = img_overlay[y1o:y2o, x1o:x2o]
alpha = alpha_mask[y1o:y2o, x1o:x2o, np.newaxis]
alpha_inv = 1.0 - alpha
img_crop[:] = alpha * img_overlay_crop + alpha_inv * img_crop
Example usage:
import numpy as np
from PIL import Image
# Prepare inputs
x, y = 50, 0
img = np.array(Image.open("img_large.jpg"))
img_overlay_rgba = np.array(Image.open("img_small.png"))
# Perform blending
alpha_mask = img_overlay_rgba[:, :, 3] / 255.0
img_result = img[:, :, :3].copy()
img_overlay = img_overlay_rgba[:, :, :3]
overlay_image_alpha(img_result, img_overlay, x, y, alpha_mask)
# Save result
Image.fromarray(img_result).save("img_result.jpg")
Result:
If you encounter errors or unusual outputs, please ensure:
img should not contain an alpha channel. (e.g. If it is RGBA, convert to RGB first.)
img_overlay has the same number of channels as img.
Based on fireant's excellent answer above, here is the alpha blending but a bit more human legible. You may need to swap 1.0-alpha and alpha depending on which direction you're merging (mine is swapped from fireant's answer).
o* == s_img.*
b* == b_img.*
for c in range(0,3):
alpha = s_img[oy:oy+height, ox:ox+width, 3] / 255.0
color = s_img[oy:oy+height, ox:ox+width, c] * (1.0-alpha)
beta = l_img[by:by+height, bx:bx+width, c] * (alpha)
l_img[by:by+height, bx:bx+width, c] = color + beta
Here it is:
def put4ChannelImageOn4ChannelImage(back, fore, x, y):
rows, cols, channels = fore.shape
trans_indices = fore[...,3] != 0 # Where not transparent
overlay_copy = back[y:y+rows, x:x+cols]
overlay_copy[trans_indices] = fore[trans_indices]
back[y:y+rows, x:x+cols] = overlay_copy
#test
background = np.zeros((1000, 1000, 4), np.uint8)
background[:] = (127, 127, 127, 1)
overlay = cv2.imread('imagee.png', cv2.IMREAD_UNCHANGED)
put4ChannelImageOn4ChannelImage(background, overlay, 5, 5)
A simple function that blits an image front onto an image back and returns the result. It works with both 3 and 4-channel images and deals with the alpha channel. Overlaps are handled as well.
The output image has the same size as back, but always 4 channels.
The output alpha channel is given by (u+v)/(1+uv) where u,v are the alpha channels of the front and back image and -1 <= u,v <= 1. Where there is no overlap with front, the alpha value from back is taken.
import cv2
def merge_image(back, front, x,y):
# convert to rgba
if back.shape[2] == 3:
back = cv2.cvtColor(back, cv2.COLOR_BGR2BGRA)
if front.shape[2] == 3:
front = cv2.cvtColor(front, cv2.COLOR_BGR2BGRA)
# crop the overlay from both images
bh,bw = back.shape[:2]
fh,fw = front.shape[:2]
x1, x2 = max(x, 0), min(x+fw, bw)
y1, y2 = max(y, 0), min(y+fh, bh)
front_cropped = front[y1-y:y2-y, x1-x:x2-x]
back_cropped = back[y1:y2, x1:x2]
alpha_front = front_cropped[:,:,3:4] / 255
alpha_back = back_cropped[:,:,3:4] / 255
# replace an area in result with overlay
result = back.copy()
print(f'af: {alpha_front.shape}\nab: {alpha_back.shape}\nfront_cropped: {front_cropped.shape}\nback_cropped: {back_cropped.shape}')
result[y1:y2, x1:x2, :3] = alpha_front * front_cropped[:,:,:3] + (1-alpha_front) * back_cropped[:,:,:3]
result[y1:y2, x1:x2, 3:4] = (alpha_front + alpha_back) / (1 + alpha_front*alpha_back) * 255
return result
For just add an alpha channel to s_img I just use cv2.addWeighted before the line
l_img[y_offset:y_offset+s_img.shape[0], x_offset:x_offset+s_img.shape[1]] = s_img
as following:
s_img=cv2.addWeighted(l_img[y_offset:y_offset+s_img.shape[0], x_offset:x_offset+s_img.shape[1]],0.5,s_img,0.5,0)
When attempting to write to the destination image using any of these answers above and you get the following error:
ValueError: assignment destination is read-only
A quick potential fix is to set the WRITEABLE flag to true.
img.setflags(write=1)
A simple 4on4 pasting function that works-
def paste(background,foreground,pos=(0,0)):
#get position and crop pasting area if needed
x = pos[0]
y = pos[1]
bgWidth = background.shape[0]
bgHeight = background.shape[1]
frWidth = foreground.shape[0]
frHeight = foreground.shape[1]
width = bgWidth-x
height = bgHeight-y
if frWidth<width:
width = frWidth
if frHeight<height:
height = frHeight
# normalize alpha channels from 0-255 to 0-1
alpha_background = background[x:x+width,y:y+height,3] / 255.0
alpha_foreground = foreground[:width,:height,3] / 255.0
# set adjusted colors
for color in range(0, 3):
fr = alpha_foreground * foreground[:width,:height,color]
bg = alpha_background * background[x:x+width,y:y+height,color] * (1 - alpha_foreground)
background[x:x+width,y:y+height,color] = fr+bg
# set adjusted alpha and denormalize back to 0-255
background[x:x+width,y:y+height,3] = (1 - (1 - alpha_foreground) * (1 - alpha_background)) * 255
return background
I reworked #fireant's concept to allow for optional alpha masks and allow any x or y, including values outside of the bounds of the image. It will crop to the bounds.
def overlay_image_alpha(img, img_overlay, x, y, alpha_mask=None):
"""Overlay `img_overlay` onto `img` at (x, y) and blend using optional `alpha_mask`.
`alpha_mask` must have same HxW as `img_overlay` and values in range [0, 1].
"""
if y < 0 or y + img_overlay.shape[0] > img.shape[0] or x < 0 or x + img_overlay.shape[1] > img.shape[1]:
y_origin = 0 if y > 0 else -y
y_end = img_overlay.shape[0] if y < 0 else min(img.shape[0] - y, img_overlay.shape[0])
x_origin = 0 if x > 0 else -x
x_end = img_overlay.shape[1] if x < 0 else min(img.shape[1] - x, img_overlay.shape[1])
img_overlay_crop = img_overlay[y_origin:y_end, x_origin:x_end]
alpha = alpha_mask[y_origin:y_end, x_origin:x_end] if alpha_mask is not None else None
else:
img_overlay_crop = img_overlay
alpha = alpha_mask
y1 = max(y, 0)
y2 = min(img.shape[0], y1 + img_overlay_crop.shape[0])
x1 = max(x, 0)
x2 = min(img.shape[1], x1 + img_overlay_crop.shape[1])
img_crop = img[y1:y2, x1:x2]
img_crop[:] = alpha * img_overlay_crop + (1.0 - alpha) * img_crop if alpha is not None else img_overlay_crop

Categories