converting dataset yolo v3 pytorch format to required format - python

I am trying to implement the conversion discussed in the blogspot https://medium.com/#alexppppp/how-to-annotate-keypoints-using-roboflow-9bc2aa8915cd.
In my dataset I need 4 keypoints and 1 box. I want the edited code for it.
My code and error are shown below.
keypoint_names = ['Head', 'Tail']
rectangles2keypoints = {1:0, 2:1}
def converter(file_labels, file_image, keypoint_names):
img = cv2.imread(file_image)
img_w, img_h = img.shape[1], img.shape[0]
with open(file_labels) as f:
lines_txt = f.readlines()
lines = []
for line in lines_txt:
lines.append([int(line.split()[0])] + [round(float(el), 5) for el in line.split()[1:]])
bboxes = []
keypoints = []
# In this loop we convert normalized coordinates to absolute coordinates
for line in lines:
# Number 0 is a class of rectangles related to bounding boxes.
if line[0] == 0:
x_c, y_c, w, h = round(line[1] * img_w), round(line[2] * img_h), round(line[3] * img_w), round(line[4] * img_h)
bboxes.append([round(x_c - w/2), round(y_c - h/2), round(x_c + w/2), round(y_c + h/2)])
# Other numbers are the classes of rectangles related to keypoints.
# After convertion, numbers of keypoint classes should start with 0, so we apply rectangles2keypoints dictionary to achieve that.
# In our case:
# 1 is rectangle for head keypoint, which is 0, so we convert 1 to 0;
# 2 is rectangle for tail keypoint, which is 1, so we convert 2 to 1.
if line[0] != 0:
kp_id, x_c, y_c = rectangles2keypoints[line[0]], round(line[1] * img_w), round(line[2] * img_h)
keypoints.append([kp_id, x_c, y_c])
# In this loop we are iterating over each keypoint and looking to which bounding box it matches.
# Thus, we are matching keypoints and corresponding bounding boxes.
keypoints_sorted = [[[] for _ in keypoint_names] for _ in bboxes]
for kp in keypoints:
kp_id, kp_x, kp_y = kp[0], kp[1], kp[2]
for bbox_idx, bbox in enumerate(bboxes):
x1, y1, x2, y2 = bbox[0], bbox[1], bbox[2], bbox[3]
if x1 < kp_x < x2 and y1 < kp_y < y2:
keypoints_sorted[bbox_idx][kp_id] = [kp_x, kp_y, 1] # All keypoints are visible
return bboxes, keypoints_sorted
enter image description here

Related

Extract artwork from table game card image with OpenCV

I wrote a small script in python where I'm trying to extract or crop the part of the playing card that represents the artwork only, removing all the rest. I've been trying various methods of thresholding but couldn't get there. Also note that I can't simply record manually the position of the artwork because it's not always in the same position or size, but always in a rectangular shape where everything else is just text and borders.
from matplotlib import pyplot as plt
import cv2
img = cv2.imread(filename)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret,binary = cv2.threshold(gray, 0, 255, cv2.THRESH_OTSU | cv2.THRESH_BINARY)
binary = cv2.bitwise_not(binary)
kernel = np.ones((15, 15), np.uint8)
closing = cv2.morphologyEx(binary, cv2.MORPH_OPEN, kernel)
plt.imshow(closing),plt.show()
The current output is the closest thing I could get. I could be on the right way and try some further wrangling to draw a rectangle around the white parts, but I don't think it's a sustainable method :
As a last note, see the cards below, not all frames are exactly the same sizes or positions, but there's always a piece of artwork with only text and borders around it. It doesn't have to be super precisely cut, but clearly the art is a "region" of the card, surrounded by other regions containing some text. My goal is to try to capture the region of the artwork as well as I can.
I used Hough line transform to detect linear parts of the image.
The crossings of all lines were used to construct all possible rectangles, which do not contain other crossing points.
Since the part of the card you are looking for is always the biggest of those rectangles (at least in the samples you provided), i simply chose the biggest of those rectangles as winner.
The script works without user interaction.
import cv2
import numpy as np
from collections import defaultdict
def segment_by_angle_kmeans(lines, k=2, **kwargs):
#Groups lines based on angle with k-means.
#Uses k-means on the coordinates of the angle on the unit circle
#to segment `k` angles inside `lines`.
# Define criteria = (type, max_iter, epsilon)
default_criteria_type = cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER
criteria = kwargs.get('criteria', (default_criteria_type, 10, 1.0))
flags = kwargs.get('flags', cv2.KMEANS_RANDOM_CENTERS)
attempts = kwargs.get('attempts', 10)
# returns angles in [0, pi] in radians
angles = np.array([line[0][1] for line in lines])
# multiply the angles by two and find coordinates of that angle
pts = np.array([[np.cos(2*angle), np.sin(2*angle)]
for angle in angles], dtype=np.float32)
# run kmeans on the coords
labels, centers = cv2.kmeans(pts, k, None, criteria, attempts, flags)[1:]
labels = labels.reshape(-1) # transpose to row vec
# segment lines based on their kmeans label
segmented = defaultdict(list)
for i, line in zip(range(len(lines)), lines):
segmented[labels[i]].append(line)
segmented = list(segmented.values())
return segmented
def intersection(line1, line2):
#Finds the intersection of two lines given in Hesse normal form.
#Returns closest integer pixel locations.
#See https://stackoverflow.com/a/383527/5087436
rho1, theta1 = line1[0]
rho2, theta2 = line2[0]
A = np.array([
[np.cos(theta1), np.sin(theta1)],
[np.cos(theta2), np.sin(theta2)]
])
b = np.array([[rho1], [rho2]])
x0, y0 = np.linalg.solve(A, b)
x0, y0 = int(np.round(x0)), int(np.round(y0))
return [[x0, y0]]
def segmented_intersections(lines):
#Finds the intersections between groups of lines.
intersections = []
for i, group in enumerate(lines[:-1]):
for next_group in lines[i+1:]:
for line1 in group:
for line2 in next_group:
intersections.append(intersection(line1, line2))
return intersections
def rect_from_crossings(crossings):
#find all rectangles without other points inside
rectangles = []
# Search all possible rectangles
for i in range(len(crossings)):
x1= int(crossings[i][0][0])
y1= int(crossings[i][0][1])
for j in range(len(crossings)):
x2= int(crossings[j][0][0])
y2= int(crossings[j][0][1])
#Search all points
flag = 1
for k in range(len(crossings)):
x3= int(crossings[k][0][0])
y3= int(crossings[k][0][1])
#Dont count double (reverse rectangles)
if (x1 > x2 or y1 > y2):
flag = 0
#Dont count rectangles with points inside
elif ((((x3 >= x1) and (x2 >= x3))and (y3 > y1) and (y2 > y3) or ((x3 > x1) and (x2 > x3))and (y3 >= y1) and (y2 >= y3))):
if(i!=k and j!=k):
flag = 0
if flag:
rectangles.append([[x1,y1],[x2,y2]])
return rectangles
if __name__ == '__main__':
#img = cv2.imread('TAJFp.jpg')
#img = cv2.imread('Bj2uu.jpg')
img = cv2.imread('yi8db.png')
width = int(img.shape[1])
height = int(img.shape[0])
scale = 380/width
dim = (int(width*scale), int(height*scale))
# resize image
img = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)
img2 = img.copy()
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray,(5,5),cv2.BORDER_DEFAULT)
# Parameters of Canny and Hough may have to be tweaked to work for as many cards as possible
edges = cv2.Canny(gray,10,45,apertureSize = 7)
lines = cv2.HoughLines(edges,1,np.pi/90,160)
segmented = segment_by_angle_kmeans(lines)
crossings = segmented_intersections(segmented)
rectangles = rect_from_crossings(crossings)
#Find biggest remaining rectangle
size = 0
for i in range(len(rectangles)):
x1 = rectangles[i][0][0]
x2 = rectangles[i][1][0]
y1 = rectangles[i][0][1]
y2 = rectangles[i][1][1]
if(size < (abs(x1-x2)*abs(y1-y2))):
size = abs(x1-x2)*abs(y1-y2)
x1_rect = x1
x2_rect = x2
y1_rect = y1
y2_rect = y2
cv2.rectangle(img2, (x1_rect,y1_rect), (x2_rect,y2_rect), (0,0,255), 2)
roi = img[y1_rect:y2_rect, x1_rect:x2_rect]
cv2.imshow("Output",roi)
cv2.imwrite("Output.png", roi)
cv2.waitKey()
These are the results with the samples you provided:
The code for finding line crossings can be found here: find intersection point of two lines drawn using houghlines opencv
You can read more about Hough Lines here.
We know that cards have straight boundaries along the x and y axes. We can use this to extract parts of the image. The following code implements detecting horizontal and vertical lines in the image.
import cv2
import numpy as np
def mouse_callback(event, x, y, flags, params):
global num_click
if num_click < 2 and event == cv2.EVENT_LBUTTONDOWN:
num_click = num_click + 1
print(num_click)
global upper_bound, lower_bound, left_bound, right_bound
upper_bound.append(max(i for i in hor if i < y) + 1)
lower_bound.append(min(i for i in hor if i > y) - 1)
left_bound.append(max(i for i in ver if i < x) + 1)
right_bound.append(min(i for i in ver if i > x) - 1)
filename = 'image.png'
thr = 100 # edge detection threshold
lined = 50 # number of consequtive True pixels required an axis to be counted as line
num_click = 0 # select only twice
upper_bound, lower_bound, left_bound, right_bound = [], [], [], []
winname = 'img'
cv2.namedWindow(winname)
cv2.setMouseCallback(winname, mouse_callback)
img = cv2.imread(filename, 1)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
bw = cv2.Canny(gray, thr, 3*thr)
height, width, _ = img.shape
# find horizontal lines
hor = []
for i in range (0, height-1):
count = 0
for j in range (0, width-1):
if bw[i,j]:
count = count + 1
else:
count = 0
if count >= lined:
hor.append(i)
break
# find vertical lines
ver = []
for j in range (0, width-1):
count = 0
for i in range (0, height-1):
if bw[i,j]:
count = count + 1
else:
count = 0
if count >= lined:
ver.append(j)
break
# draw lines
disp_img = np.copy(img)
for i in hor:
cv2.line(disp_img, (0, i), (width-1, i), (0,0,255), 1)
for i in ver:
cv2.line(disp_img, (i, 0), (i, height-1), (0,0,255), 1)
while num_click < 2:
cv2.imshow(winname, disp_img)
cv2.waitKey(10)
disp_img = img[min(upper_bound):max(lower_bound), min(left_bound):max(right_bound)]
cv2.imshow(winname, disp_img)
cv2.waitKey() # Press any key to exit
cv2.destroyAllWindows()
You just need to click two areas to include. A sample click area and the corresponding result are as follows:
Results from other images:
I don't think it is possible to automatically crop the artwork ROI using traditional image processing techniques due to the dynamic nature of the colors, dimensions, locations, and textures for each card. You would have to look into machine/deep learning and train your own classifier if you want to do it automatically. Instead, here's a manual approach to select and crop a static ROI from an image.
The idea is to use cv2.setMouseCallback() and event handlers to detect if the mouse has been clicked or released. For this implementation, you can extract the artwork ROI by holding down the left mouse button and dragging to select the desired ROI. Once you have selected the desired ROI, press c to crop and save the ROI. You can reset the ROI using the right mouse button.
Saved artwork ROIs
Code
import cv2
class ExtractArtworkROI(object):
def __init__(self):
# Load image
self.original_image = cv2.imread('1.png')
self.clone = self.original_image.copy()
cv2.namedWindow('image')
cv2.setMouseCallback('image', self.extractROI)
self.selected_ROI = False
# ROI bounding box reference points
self.image_coordinates = []
def extractROI(self, event, x, y, flags, parameters):
# Record starting (x,y) coordinates on left mouse button click
if event == cv2.EVENT_LBUTTONDOWN:
self.image_coordinates = [(x,y)]
# Record ending (x,y) coordintes on left mouse button release
elif event == cv2.EVENT_LBUTTONUP:
# Remove old bounding box
if self.selected_ROI:
self.clone = self.original_image.copy()
# Draw rectangle
self.selected_ROI = True
self.image_coordinates.append((x,y))
cv2.rectangle(self.clone, self.image_coordinates[0], self.image_coordinates[1], (36,255,12), 2)
print('top left: {}, bottom right: {}'.format(self.image_coordinates[0], self.image_coordinates[1]))
print('x,y,w,h : ({}, {}, {}, {})'.format(self.image_coordinates[0][0], self.image_coordinates[0][1], self.image_coordinates[1][0] - self.image_coordinates[0][0], self.image_coordinates[1][1] - self.image_coordinates[0][1]))
# Clear drawing boxes on right mouse button click
elif event == cv2.EVENT_RBUTTONDOWN:
self.selected_ROI = False
self.clone = self.original_image.copy()
def show_image(self):
return self.clone
def crop_ROI(self):
if self.selected_ROI:
x1 = self.image_coordinates[0][0]
y1 = self.image_coordinates[0][1]
x2 = self.image_coordinates[1][0]
y2 = self.image_coordinates[1][1]
# Extract ROI
self.cropped_image = self.original_image.copy()[y1:y2, x1:x2]
# Display and save image
cv2.imshow('Cropped Image', self.cropped_image)
cv2.imwrite('ROI.png', self.cropped_image)
else:
print('Select ROI before cropping!')
if __name__ == '__main__':
extractArtworkROI = ExtractArtworkROI()
while True:
cv2.imshow('image', extractArtworkROI.show_image())
key = cv2.waitKey(1)
# Close program with keyboard 'q'
if key == ord('q'):
cv2.destroyAllWindows()
exit(1)
# Crop ROI
if key == ord('c'):
extractArtworkROI.crop_ROI()

Opencv homography does not produce the required tranformation

I am trying to transform an image along the edge of the object (here the object is the book). Using canny edge detection, I am detecting the edges and from the score matrix, based on pixel value, I am choosing a random 4 coordinates lying on the edge for transformation. But the transformation is not as it thought it would be. What is the problem/Where am I missing out?
First I have sliced out a portion of the image. Then applied canny edge detection and randomly selected 4 edge coordinate points based on my own condition as:
My original image is:
For experiment I have sliced out according to my need as:
The size of this image (61,160)
Now I need to transform the above image to make the edge of the book parallel to the horizontal axis.
img = cv2.imread('download1.jpg',0)
edges = cv2.Canny(img,100,200)
print(img.shape)
plt.show()
plt.imshow(img,cmap='gray')
l=[]
y_list=[]
k=1
for i in range (0,img.shape[0]):
for j in range (0,img.shape[1]):
if (edges[i][j]==255) and k<=4 and i>31 and j not in y_list:
l.append([j,i])
y_list.append(j)
k+=1
break
The edge detection image is obtained as:
The contents of l list are
[[49 32]
[44 33]
[40 34]
[36 35]]
Then set the destination points given by list lt as:
[[49 61]
[44 60]
[40 61]
[36 60]]
Then found out the homography matrix and used it to find out the warp perspective as :
h, status = cv2.findHomography(l,lt)
im_out = cv2.warpPerspective(img, h, (img.shape[1],img.shape[0]))
But it doesnot produce the required result! The resultant output image is obtained as:
I faced a similar issue, and this is how I solved it (quite similar to your method actually), just I used get rotation matrix instead homografy:
read image
edge detector
hough line to get all the lines (with an inclination inside a specific interval)
lines = cv.HoughLinesP(img, 1, np.pi/180, 100, minLineLength=100, maxLineGap=10)
get lines average inclination, cause in my case I had lot of parallel lines to use as
references and in this way I was able to get a better result
for line in lines:
x1,y1,x2,y2 = line[0]
if (x2-x1) != 0:
angle = math.atan((float(y2-y1))/float((x2-x1))) * 180 / math.pi
else:
angle = 90
#you can skip this test if you have no info about the lines you re looking for
#in this case offset_angle is = 0
if min_angle_threshold <= angle <= max_angle_threshold:
tot_angle = tot_angle + angle
cnt = cnt + 1
average_angle = (tot_angle / cnt) - offset_angle
apply the counter-rotation
center = your rotation center - probably the center of the image
rotation_matrix = cv.getRotationMatrix2D(center, angle, 1.0)
height, width = img.shape
rotated_image = cv.warpAffine(img, rotation_matrix, (width, height))
#do whatever you want, then rotate image back
counter_rotation_matrix = cv.getRotationMatrix2D(center, -angle, 1.0)
original_image = cv.warpAffine( rotated_image, counter_rotation_matrix, (width, height))
Edit: see the full example here:
import math
import cv2 as cv
img = cv.imread('C:\\temp\\test_3.jpg',0)
edges = cv.Canny(img,100,200)
lines = cv.HoughLinesP(edges[0:50,:], 1, np.pi/180, 50, minLineLength=10, maxLineGap=10)
tot_angle = 0
cnt = 0
for line in lines:
x1,y1,x2,y2 = line[0]
if (x2-x1) != 0:
angle = math.atan((float(y2-y1))/float((x2-x1))) * 180 / math.pi
else:
angle = 90
if -30 <= angle <= 30:
tot_angle = tot_angle + angle
cnt = cnt + 1
average_angle = (tot_angle / cnt)
h,w = img.shape[:2]
center = w/2, h/2
rotation_matrix = cv.getRotationMatrix2D(center, average_angle, 1.0)
height, width = img.shape
rotated_image = cv.warpAffine(img, rotation_matrix, (width, height))
cv.imshow("roto", rotated_image)
#do all your stuff here, add text and whatever
#...
#...
counter_rotation_matrix = cv.getRotationMatrix2D(center, -average_angle, 1.0)
original_image = cv.warpAffine( rotated_image, counter_rotation_matrix, (width, height))
cv.imshow("orig", original_image)
rotated
]1
counter_rotated
]2
EDIT:
in case you want apply an homography(different than just a simple rotation, 'cause it also applies a perspective transformation), below the code to make it work:
#very basic example, similar to your code with fixed terms
l = np.array([(11,32),(43,215),(142,1),(205,174)])
lt = np.array([(43,32),(43,215),(205,32),(205,215)])
h, status = cv.findHomography(l,lt)
im_out = cv.warpPerspective(img, h, (img.shape[1],img.shape[0]))
To do it programmatically
- for "l" : just use houghlines as well and find the 4 corners,
then add them
for "lt": find a "destination" for all the 4 points, for instance use the bottom corners as reference
lines = cv.HoughLinesP(edges, 1, np.pi/180, 100, minLineLength=150, maxLineGap=5)
l = []
for line in lines:
x1,y1,x2,y2 = line[0]
if (x2-x1) != 0:
angle = math.atan((float(y2-y1))/float((x2-x1))) * 180 / math.pi
else:
angle = 90
# consider only vertical edges
if 60 <= angle:
l.append((x1,y1))
l.append((x2,y2))
x_values.append(max(x1,x2))
if len(y_values) == 0:
y_values.append(y1)
y_values.append(y2)
l = np.array(l)
lt = np.array([(x_values[0],y_values[0]),(x_values[0],y_values[1]),(x_values[1],y_values[0]),(x_values[1],y_values[1])])
then call findhomography as done above
Hope it's clear enough!
3

Createing Borders on a table in image in python

I have an image which have a table and some other data. I need to draw borders for table to separate out each cell.
My image looks like this
What i am trying:
1) dilating the image to create continuous spots, which looks like
2) finding contours and drawing
Issue: I am not able to draw correctly because it looks like my table cells are too close and while dilating they are becoming a continuous spot
**I took this code from Internet and was trying to modify But it did not work out well for this image
code :
import os
import cv2
import imutils
# This only works if there's only one table on a page
# Important parameters:
# - morph_size
# - min_text_height_limit
# - max_text_height_limit
# - cell_threshold
# - min_columns
def pre_process_image(img, save_in_file, morph_size=(7, 7)):
# get rid of the color
pre = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Otsu threshold
pre = cv2.threshold(pre,250, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
# dilate the text to make it solid spot
cpy = pre.copy()
struct = cv2.getStructuringElement(cv2.MORPH_RECT, morph_size)
cpy = cv2.dilate(~cpy, struct, anchor=(-1, -1), iterations=1)
# cpy = cv2.dilate(img,kernel,iterations = 1)
pre = ~cpy
# pre=cpy
if save_in_file is not None:
cv2.imwrite(save_in_file, pre)
return pre
def find_text_boxes(pre, min_text_height_limit=3, max_text_height_limit=30):
# Looking for the text spots contours
contours = cv2.findContours(pre, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
# contours = contours[0] if imutils.is_cv2() else contours[1]
contours = contours[0]
# Getting the texts bounding boxes based on the text size assumptions
boxes = []
for contour in contours:
box = cv2.boundingRect(contour)
h = box[3]
if min_text_height_limit < h < max_text_height_limit:
boxes.append(box)
return boxes
def find_table_in_boxes(boxes, cell_threshold=10, min_columns=2):
rows = {}
cols = {}
# Clustering the bounding boxes by their positions
for box in boxes:
(x, y, w, h) = box
col_key = x // cell_threshold
row_key = y // cell_threshold
cols[row_key] = [box] if col_key not in cols else cols[col_key] + [box]
rows[row_key] = [box] if row_key not in rows else rows[row_key] + [box]
# Filtering out the clusters having less than 2 cols
table_cells = list(filter(lambda r: len(r) >= min_columns, rows.values()))
# Sorting the row cells by x coord
table_cells = [list(sorted(tb)) for tb in table_cells]
# Sorting rows by the y coord
table_cells = list(sorted(table_cells, key=lambda r: r[0][1]))
return table_cells
def build_lines(table_cells):
if table_cells is None or len(table_cells) <= 0:
return [], []
max_last_col_width_row = max(table_cells, key=lambda b: b[-1][2])
max_x = max_last_col_width_row[-1][0] + max_last_col_width_row[-1][2]
max_last_row_height_box = max(table_cells[-1], key=lambda b: b[3])
max_y = max_last_row_height_box[1] + max_last_row_height_box[3]
hor_lines = []
ver_lines = []
for box in table_cells:
x = box[0][0]
y = box[0][1]
hor_lines.append((x, y, max_x, y))
for box in table_cells[0]:
x = box[0]
y = box[1]
ver_lines.append((x, y, x, max_y))
(x, y, w, h) = table_cells[0][-1]
ver_lines.append((max_x, y, max_x, max_y))
(x, y, w, h) = table_cells[0][0]
hor_lines.append((x, max_y, max_x, max_y))
return hor_lines, ver_lines
if __name__ == "__main__":
in_file = os.path.join("data", "page1.jpg")
pre_file = os.path.join("data", "pre.png")
out_file = os.path.join("data", "out.png")
img = cv2.imread(os.path.join(in_file))
pre_processed = pre_process_image(img, pre_file)
text_boxes = find_text_boxes(pre_processed)
cells = find_table_in_boxes(text_boxes)
hor_lines, ver_lines = build_lines(cells)
# Visualize the result
vis = img.copy()
# for box in text_boxes:
# (x, y, w, h) = box
# cv2.rectangle(vis, (x, y), (x + w - 2, y + h - 2), (0, 255, 0), 1)
for line in hor_lines:
[x1, y1, x2, y2] = line
cv2.line(vis, (x1, y1), (x2, y2), (0, 0, 255), 1)
for line in ver_lines:
[x1, y1, x2, y2] = line
cv2.line(vis, (x1, y1), (x2, y2), (0, 0, 255), 1)
cv2.imwrite(out_file, vis)
Very interesting application.
Raw dialating may not be the best way to do it.
I do recommend using OCR routing. Like below
The output is sthing like this
So as long as there is two row which are closer to each other. eg, row1-row2< npixel. then it is close line. the find the center position between (row1+height1) and row2. The line should be pretty accurate.
in my sample if |292-335| < 50. then draw a line between (292+27 + 335) /2
means it's between asset line and the property line.
For the OCR package, you can try with tesseract if you insist with python.
https://pypi.org/project/pytesseract/
See here for python text coordinate Tesseract OCR Text Position
Tesseract.PageIteratorLevel myLevel = /*TODO*/;
using (var page = Engine.Process(img))
using (var iter = page.GetIterator())
{
iter.Begin();
do
{
if (iter.TryGetBoundingBox(myLevel, out var rect))
{
var curText = iter.GetText(myLevel);
// Your code here, 'rect' should containt the location of the text, 'curText' contains the actual text itself
}
} while (iter.Next(myLevel));
}
rect contains the part you wanted x y height width
The demo I show it here is actually using sth similar to windows OCR sample
https://github.com/microsoft/Windows-universal-samples/tree/master/Samples/OCR
Feel free to try any of the methods to get the table line you wanted.

Missing arrays from a multidimensional array

So apologies for the length of code here. Basically I have used opencv to analyse an image of 7 shapes and read 4 features from it.
The problem is that the code is only giving me out arrays for 5 shapes and I'm unsure why. I have left out the imports etc at the start to shorten the code.
img = cv2.imread("C:\\Users\\telli\\Desktop\\Shapetest.jpg")
#print(img)
#Converting the image to Grayscale
grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(grey,127,255,1)
im2,contours, h = cv2.findContours(thresh, 1, cv2.CHAIN_APPROX_SIMPLE)
contours.sort(key = len)
numberOfSides = []
corners = []
standardDeviationsPerimeter = []
sidesDividedByPerimeter = []
standardDeviationsAngles = []
largestAngles = []
angles = []
perimeters = []
featureVectors = []
#finding all the possible circles in the image (likely many false positives)
circles = cv2.HoughCircles(grey,cv2.HOUGH_GRADIENT,1,20,
param1=50,param2=30,minRadius=0,maxRadius=0)
#finding sides
count = 0;
#print("New perimeter")
for contour in contours:
#remove double recognitions due to thick lines
if count %2 == 0:
epsilon = 10
x, y, w, h = cv2.boundingRect(contour)
x-= epsilon
y-= epsilon
w+= 2 *epsilon
h+= 2 * epsilon
insideCircle = False
#Removing false positive circles by testing each circle against
#the bounding box of this contour
#if we find a circle that is COMPLETELY inside the bounding box
#we have found a circle
for possibleCircle in range (0, len(circles[0])):
centreX = circles[0][possibleCircle][0]
centreY = circles[0][possibleCircle][1]
radius = circles[0][possibleCircle][2]
ToRight = centreX - radius >= x
ToLeft = centreX + radius <= x + w
ToBottom = centreY - radius >= y
ToTop = centreY + radius <= y + h
insideCircle = insideCircle or ToRight and ToLeft and ToBottom and ToTop
#Finding the perimeter of the shapes
perim = cv2.arcLength(contour, True)
perimeters.append(perim)
#if we found that this contour is a circle then the number of sides is 1
if insideCircle:
numberOfSides.append(1)
#use the number of corners found in the contours to determine how many
#sides it has
else:
corner = cv2.approxPolyDP(contour, 0.01 * perim, True)
corners.append(corner)
numberOfSides.append(len(corner))
#print(numberOfSides)
count = count + 1
#finding angles in shape
for shape in range(0, len(corners)):
angles.append([])
sidesDividedByPerimeter.append([])
for corner in range(0, len(corners[shape])):
# 3 vertices we need to find the angle at vertice b
ax = corners[shape][corner % len(corners[shape])][0][0]
ay = corners[shape][corner % len(corners[shape])][0][1]
bx = corners[shape][(corner + 1) % len(corners[shape])][0][0]
by = corners[shape][(corner + 1) % len(corners[shape])][0][1]
cx = corners[shape][(corner + 2) % len(corners[shape])][0][0]
cy = corners[shape][(corner + 2) % len(corners[shape])][0][1]
#print ("A: ", ax, ", ", ay, "\tB: ", bx, ", ", by, "\tC: ", cx, ", ", cy)
dirBAx = ax - bx
dirBAy = ay - by
dirBCx = cx - bx
dirBCy = cy - by
#do dot product and find angle in degrees
dot = dirBAx * dirBCx + dirBAy * dirBCy
lengthBC = math.sqrt(dirBCx * dirBCx + dirBCy * dirBCy)
lengthBA = math.sqrt(dirBAx * dirBAx + dirBAy * dirBAy)
angle = math.acos(dot / (lengthBC * lengthBA))
angle = angle * 180 / math.pi
angles[shape].append(angle)
sidesDividedByPerimeter[shape].append(lengthBC / perimeters[shape])
#print(lengthBC / perimeters[shape])
#print(angle)
#finding max of all angles in each shape
for shape in range(0, len(angles)):
largestAngles.append(np.amax(angles[shape]))
#print(largestAngles)
#if len(approx) == 16:
# cv2.drawContours(img, [contours[0]], 0, (0,0,255), -1)
#Calculating the standard deviation of the sides divided by the perimeter
#print("Standard Devs")
for shape in range(0, len(sidesDividedByPerimeter)):
standarddevPerim = statistics.stdev(sidesDividedByPerimeter[shape])
#Caluclating the standard deviation of the angles of each shape
standarddevAngle = statistics.stdev(angles[shape])
standardDeviationsPerimeter.append(standarddevPerim)
standardDeviationsAngles.append(standarddevAngle)
for shape in range(0, len(sidesDividedByPerimeter)):
featureVectors.append([])
featureVectors[shape].append(numberOfSides[shape])
featureVectors[shape].append(standardDeviationsPerimeter[shape])
featureVectors[shape].append(standardDeviationsAngles[shape])
featureVectors[shape].append(largestAngles[shape])
print(featureVectors)
And featureVector prints out this:
[[4, 0.001743713493735165, 0.6497055601752815, 90.795723552739275],
[4, 0.0460937435599832, 0.19764217920409227, 90.204147248752378],
[1, 0.001185534503063044, 0.3034913722821194, 60.348908179729023],
[1, 0.015455289770298222, 0.8380914254332884, 109.02120657826231],
[3, 0.0169961646358455, 41.36919146079211, 136.83829993466398]]
However there should be 7 shapes.
What i cant figure out is where to append blank values for the 2nd/3rd/4th feature for a circle and allow the program to continue running. It currently appears to be giving the 2nd/3rd/4th value from the next two shapes to the circles.

overlay a smaller image on a larger image python OpenCv

Hi I am creating a program that replaces a face in a image with someone else's face. However, I am stuck on trying to insert the new face into the original, larger image. I have researched ROI and addWeight(needs the images to be the same size) but I haven't found a way to do this in python. Any advise is great. I am new to opencv.
I am using the following test images:
smaller_image:
larger_image:
Here is my Code so far... a mixer of other samples:
import cv2
import cv2.cv as cv
import sys
import numpy
def detect(img, cascade):
rects = cascade.detectMultiScale(img, scaleFactor=1.1, minNeighbors=3, minSize=(10, 10), flags = cv.CV_HAAR_SCALE_IMAGE)
if len(rects) == 0:
return []
rects[:,2:] += rects[:,:2]
return rects
def draw_rects(img, rects, color):
for x1, y1, x2, y2 in rects:
cv2.rectangle(img, (x1, y1), (x2, y2), color, 2)
if __name__ == '__main__':
if len(sys.argv) != 2: ## Check for error in usage syntax
print "Usage : python faces.py <image_file>"
else:
img = cv2.imread(sys.argv[1],cv2.CV_LOAD_IMAGE_COLOR) ## Read image file
if (img == None):
print "Could not open or find the image"
else:
cascade = cv2.CascadeClassifier("haarcascade_frontalface_alt.xml")
gray = cv2.cvtColor(img, cv.CV_BGR2GRAY)
gray = cv2.equalizeHist(gray)
rects = detect(gray, cascade)
## Extract face coordinates
x1 = rects[0][3]
y1 = rects[0][0]
x2 = rects[0][4]
y2 = rects[0][5]
y=y2-y1
x=x2-x1
## Extract face ROI
faceROI = gray[x1:x2, y1:y2]
## Show face ROI
cv2.imshow('Display face ROI', faceROI)
small = cv2.imread("average_face.png",cv2.CV_LOAD_IMAGE_COLOR)
print "here"
small=cv2.resize(small, (x, y))
cv2.namedWindow('Display image') ## create window for display
cv2.imshow('Display image', small) ## Show image in the window
print "size of image: ", img.shape ## print size of image
cv2.waitKey(1000)
A simple way to achieve what you want:
import cv2
s_img = cv2.imread("smaller_image.png")
l_img = cv2.imread("larger_image.jpg")
x_offset=y_offset=50
l_img[y_offset:y_offset+s_img.shape[0], x_offset:x_offset+s_img.shape[1]] = s_img
Update
I suppose you want to take care of the alpha channel too. Here is a quick and dirty way of doing so:
s_img = cv2.imread("smaller_image.png", -1)
y1, y2 = y_offset, y_offset + s_img.shape[0]
x1, x2 = x_offset, x_offset + s_img.shape[1]
alpha_s = s_img[:, :, 3] / 255.0
alpha_l = 1.0 - alpha_s
for c in range(0, 3):
l_img[y1:y2, x1:x2, c] = (alpha_s * s_img[:, :, c] +
alpha_l * l_img[y1:y2, x1:x2, c])
Using #fireant's idea, I wrote up a function to handle overlays. This works well for any position argument (including negative positions).
def overlay_image_alpha(img, img_overlay, x, y, alpha_mask):
"""Overlay `img_overlay` onto `img` at (x, y) and blend using `alpha_mask`.
`alpha_mask` must have same HxW as `img_overlay` and values in range [0, 1].
"""
# Image ranges
y1, y2 = max(0, y), min(img.shape[0], y + img_overlay.shape[0])
x1, x2 = max(0, x), min(img.shape[1], x + img_overlay.shape[1])
# Overlay ranges
y1o, y2o = max(0, -y), min(img_overlay.shape[0], img.shape[0] - y)
x1o, x2o = max(0, -x), min(img_overlay.shape[1], img.shape[1] - x)
# Exit if nothing to do
if y1 >= y2 or x1 >= x2 or y1o >= y2o or x1o >= x2o:
return
# Blend overlay within the determined ranges
img_crop = img[y1:y2, x1:x2]
img_overlay_crop = img_overlay[y1o:y2o, x1o:x2o]
alpha = alpha_mask[y1o:y2o, x1o:x2o, np.newaxis]
alpha_inv = 1.0 - alpha
img_crop[:] = alpha * img_overlay_crop + alpha_inv * img_crop
Example usage:
import numpy as np
from PIL import Image
# Prepare inputs
x, y = 50, 0
img = np.array(Image.open("img_large.jpg"))
img_overlay_rgba = np.array(Image.open("img_small.png"))
# Perform blending
alpha_mask = img_overlay_rgba[:, :, 3] / 255.0
img_result = img[:, :, :3].copy()
img_overlay = img_overlay_rgba[:, :, :3]
overlay_image_alpha(img_result, img_overlay, x, y, alpha_mask)
# Save result
Image.fromarray(img_result).save("img_result.jpg")
Result:
If you encounter errors or unusual outputs, please ensure:
img should not contain an alpha channel. (e.g. If it is RGBA, convert to RGB first.)
img_overlay has the same number of channels as img.
Based on fireant's excellent answer above, here is the alpha blending but a bit more human legible. You may need to swap 1.0-alpha and alpha depending on which direction you're merging (mine is swapped from fireant's answer).
o* == s_img.*
b* == b_img.*
for c in range(0,3):
alpha = s_img[oy:oy+height, ox:ox+width, 3] / 255.0
color = s_img[oy:oy+height, ox:ox+width, c] * (1.0-alpha)
beta = l_img[by:by+height, bx:bx+width, c] * (alpha)
l_img[by:by+height, bx:bx+width, c] = color + beta
Here it is:
def put4ChannelImageOn4ChannelImage(back, fore, x, y):
rows, cols, channels = fore.shape
trans_indices = fore[...,3] != 0 # Where not transparent
overlay_copy = back[y:y+rows, x:x+cols]
overlay_copy[trans_indices] = fore[trans_indices]
back[y:y+rows, x:x+cols] = overlay_copy
#test
background = np.zeros((1000, 1000, 4), np.uint8)
background[:] = (127, 127, 127, 1)
overlay = cv2.imread('imagee.png', cv2.IMREAD_UNCHANGED)
put4ChannelImageOn4ChannelImage(background, overlay, 5, 5)
A simple function that blits an image front onto an image back and returns the result. It works with both 3 and 4-channel images and deals with the alpha channel. Overlaps are handled as well.
The output image has the same size as back, but always 4 channels.
The output alpha channel is given by (u+v)/(1+uv) where u,v are the alpha channels of the front and back image and -1 <= u,v <= 1. Where there is no overlap with front, the alpha value from back is taken.
import cv2
def merge_image(back, front, x,y):
# convert to rgba
if back.shape[2] == 3:
back = cv2.cvtColor(back, cv2.COLOR_BGR2BGRA)
if front.shape[2] == 3:
front = cv2.cvtColor(front, cv2.COLOR_BGR2BGRA)
# crop the overlay from both images
bh,bw = back.shape[:2]
fh,fw = front.shape[:2]
x1, x2 = max(x, 0), min(x+fw, bw)
y1, y2 = max(y, 0), min(y+fh, bh)
front_cropped = front[y1-y:y2-y, x1-x:x2-x]
back_cropped = back[y1:y2, x1:x2]
alpha_front = front_cropped[:,:,3:4] / 255
alpha_back = back_cropped[:,:,3:4] / 255
# replace an area in result with overlay
result = back.copy()
print(f'af: {alpha_front.shape}\nab: {alpha_back.shape}\nfront_cropped: {front_cropped.shape}\nback_cropped: {back_cropped.shape}')
result[y1:y2, x1:x2, :3] = alpha_front * front_cropped[:,:,:3] + (1-alpha_front) * back_cropped[:,:,:3]
result[y1:y2, x1:x2, 3:4] = (alpha_front + alpha_back) / (1 + alpha_front*alpha_back) * 255
return result
For just add an alpha channel to s_img I just use cv2.addWeighted before the line
l_img[y_offset:y_offset+s_img.shape[0], x_offset:x_offset+s_img.shape[1]] = s_img
as following:
s_img=cv2.addWeighted(l_img[y_offset:y_offset+s_img.shape[0], x_offset:x_offset+s_img.shape[1]],0.5,s_img,0.5,0)
When attempting to write to the destination image using any of these answers above and you get the following error:
ValueError: assignment destination is read-only
A quick potential fix is to set the WRITEABLE flag to true.
img.setflags(write=1)
A simple 4on4 pasting function that works-
def paste(background,foreground,pos=(0,0)):
#get position and crop pasting area if needed
x = pos[0]
y = pos[1]
bgWidth = background.shape[0]
bgHeight = background.shape[1]
frWidth = foreground.shape[0]
frHeight = foreground.shape[1]
width = bgWidth-x
height = bgHeight-y
if frWidth<width:
width = frWidth
if frHeight<height:
height = frHeight
# normalize alpha channels from 0-255 to 0-1
alpha_background = background[x:x+width,y:y+height,3] / 255.0
alpha_foreground = foreground[:width,:height,3] / 255.0
# set adjusted colors
for color in range(0, 3):
fr = alpha_foreground * foreground[:width,:height,color]
bg = alpha_background * background[x:x+width,y:y+height,color] * (1 - alpha_foreground)
background[x:x+width,y:y+height,color] = fr+bg
# set adjusted alpha and denormalize back to 0-255
background[x:x+width,y:y+height,3] = (1 - (1 - alpha_foreground) * (1 - alpha_background)) * 255
return background
I reworked #fireant's concept to allow for optional alpha masks and allow any x or y, including values outside of the bounds of the image. It will crop to the bounds.
def overlay_image_alpha(img, img_overlay, x, y, alpha_mask=None):
"""Overlay `img_overlay` onto `img` at (x, y) and blend using optional `alpha_mask`.
`alpha_mask` must have same HxW as `img_overlay` and values in range [0, 1].
"""
if y < 0 or y + img_overlay.shape[0] > img.shape[0] or x < 0 or x + img_overlay.shape[1] > img.shape[1]:
y_origin = 0 if y > 0 else -y
y_end = img_overlay.shape[0] if y < 0 else min(img.shape[0] - y, img_overlay.shape[0])
x_origin = 0 if x > 0 else -x
x_end = img_overlay.shape[1] if x < 0 else min(img.shape[1] - x, img_overlay.shape[1])
img_overlay_crop = img_overlay[y_origin:y_end, x_origin:x_end]
alpha = alpha_mask[y_origin:y_end, x_origin:x_end] if alpha_mask is not None else None
else:
img_overlay_crop = img_overlay
alpha = alpha_mask
y1 = max(y, 0)
y2 = min(img.shape[0], y1 + img_overlay_crop.shape[0])
x1 = max(x, 0)
x2 = min(img.shape[1], x1 + img_overlay_crop.shape[1])
img_crop = img[y1:y2, x1:x2]
img_crop[:] = alpha * img_overlay_crop + (1.0 - alpha) * img_crop if alpha is not None else img_overlay_crop

Categories