I am doing text-detection using OCR and in my program, if a photo has multiple text, it makes multiple bounding boxes. I was wondering if there was a way to combine all the boxes and make a new cropped output image of the text. PS: I'm using the EAST deep learning text. The problem is it detects the texts in the image but if the texts are a little further apart, it creates 2 or 3 images based on that original image and I am trying to look for a way for it to combine these 2-3 crops into 1.
(newW, newH) = (args["width"], args["height"])
rW = W / float(newW)
rH = H / float(newH)
image = cv2.resize(image, (newW, newH))
(H, W) = image.shape[:2]
layerNames = [
"feature_fusion/Conv_7/Sigmoid",
"feature_fusion/concat_3"]
print("[INFO] loading EAST text detector...")
net = cv2.dnn.readNet(args["east"])
blob = cv2.dnn.blobFromImage(image, 1.0, (W, H),
(123.68, 116.78, 103.94), swapRB=True,
crop=False)
start = time.time()
net.setInput(blob)
(scores, geometry) = net.forward(layerNames)
end = time.time()
print("[INFO] text detection took {:.6f} seconds".format(end - start))
(numRows, numCols) = scores.shape[2:4]
rects = []
confidences = []
for y in range(0, numRows):
scoresData = scores[0, 0, y]
xData0 = geometry[0, 0, y]
xData1 = geometry[0, 1, y]
xData2 = geometry[0, 2, y]
xData3 = geometry[0, 3, y]
anglesData = geometry[0, 4, y]
for x in range(0, numCols):
if scoresData[x] < args["min_confidence"]:
continue
(offsetX, offsetY) = (x * 4.0, y * 4.0)
angle = anglesData[x]
cos = np.cos(angle)
sin = np.sin(angle)
h = xData0[x] + xData2[x]
w = xData1[x] + xData3[x]
endX = int(offsetX + (cos * xData1[x]) + (sin * xData2[x]))
endY = int(offsetY - (sin * xData1[x]) + (cos * xData2[x]))
startX = int(endX - w)
startY = int(endY - h)
rects.append((startX, startY, endX, endY))
confidences.append(scoresData[x])
boxes = non_max_suppression(np.array(rects), probs=confidences)
for number, (startX, startY, endX, endY) in enumerate(boxes):
startX = int(startX * rW)
startY = int(startY * rH)
endX = int(endX * rW)
endY = int(endY * rH)
Final = orig[startY:endY, startX:endX]
cv2.imshow("Text Detection", Final)
cv2.waitKey(0)
cv2.imwrite("crop{}.jpg".format(number), Final)
Related
I am using OpenCv and Trained model and trying assign IDs to the cars with the help of Centroid. There are the following scenarios:
at the beginning all recognized cars get centroid and ID
the cars that leave the frame, their IDs should be removed
new cars that enter into the frame (later) should get new IDs
I have the ID code from a youtube Video , but it dosen't work as it should. the Remove method , removes everything.
If I leave the update method on, the Cars get new IDs every frame. that should not happen.
I am new to the programming and I would appreciate if someone could help me here out.
import cv2
import numpy as np
import pandas as pd
import math
video_name = "videos.mp4"
cap = cv2.VideoCapture(video_name)
net = cv2.dnn.readNetFromONNX("best.onnx")
classes = ['car', 'free_space']
count = 0
center_points_prev_frame = []
tracking_objects = {}
track_id = 0
while True:
ret, img = cap.read()
# ret, frame1 = cap.read()
count += 1
if img is None:
break
img = cv2.resize(img, (1500, 1000))
# frame1 = cv2.resize(frame1, (1500, 1000))
blob = cv2.dnn.blobFromImage(img, scalefactor=1 / 255,
size=(640, 640),
mean=[0, 0, 0, 0],
swapRB=True,
crop=False)
net.setInput(blob)
detections = net.forward()[0]
# print(detections.shape)
# Aufbau: cx, cy, w, h, confidence, class_score
classes_ids = []
confidences = []
boxes = []
rows = detections.shape[0]
img_width, img_height = img.shape[1], img.shape[0]
x_scale = img_width / 640
y_scale = img_height / 640
# apply Non-Maximum Suppression
for i in range(rows):
row = detections[i]
confidence = row[4]
if confidence > 0.3:
classes_score = row[5:]
ind = np.argmax(classes_score)
if classes_score[ind] > 0.3:
classes_ids.append(ind)
confidences.append(confidence)
cx, cy, w, h = row[:4]
x1 = int((cx - w / 2) * x_scale)
y1 = int((cy - h / 2) * y_scale)
# print("X1:",x1 ,"Y1",y1)
width = int(w * x_scale)
height = int(h * y_scale)
box = np.array([x1, y1, width, height])
boxes.append(box)
indices = cv2.dnn.NMSBoxes(boxes, confidences, 0.3, 0.3)
# Point current frame
center_points_cur_frame = []
for i in indices:
x1, y1, w, h = boxes[i]
label = classes[classes_ids[i]]
conf = confidences[i]
text = label + "{:.2f}".format(conf)
if label == 'car':
car_coordinates = [(x1, y1), (x1 + w, y1 + h)]
#cv2.rectangle(img, (x1, y1), (x1 + w, y1 + h), (51, 51, 255), 2)
# center points
cx = int((x1 + x1 + w) / 2)
cy = int((y1 + y1 + h) / 2)
cv2.circle(img, (cx,cy), 3, (255, 0, 255), -1)
cv2.putText(img, str(track_id), (cx,cy), cv2.FONT_HERSHEY_COMPLEX, 0.3, (255, 0, 255), 1)
center_points_cur_frame.append((cx, cy))
# Only at the beginning we compare previous and current frame
if count <= 2:
for pt in center_points_cur_frame:
for pt2 in center_points_prev_frame:
distance = math.hypot(pt2[0] - pt[0], pt2[1] - pt[1])
if distance < 20:
tracking_objects[track_id] = pt
track_id += 1
else:
tracking_objects_copy = tracking_objects.copy()
center_points_cur_frame_copy = center_points_cur_frame.copy()
for object_id, pt2 in tracking_objects_copy.items():
object_exists = False
for pt in center_points_cur_frame_copy:
distance = math.hypot(pt2[0] - pt[0], pt2[1] - pt[1])
# Update IDs position
if distance < 20:
tracking_objects[object_id] = pt
object_exists = True
if pt in center_points_cur_frame:
center_points_cur_frame.remove(pt)
continue
############################### Problem ##########################################
# Remove IDs lost
if not object_exists:
tracking_objects.pop(object_id)
# Add new IDs found
for pt in center_points_cur_frame:
tracking_objects[track_id] = pt
track_id += 1
############################### Problem ##########################################
for object_id, pt in tracking_objects.items():
cv2.circle(img, pt, 3, (255, 0, 255), -1)
cv2.putText(img, str(object_id), (pt[0], pt[1] - 2), cv2.FONT_HERSHEY_COMPLEX, 0.3, (255, 0, 255), 1)
print("Tracking objects")
print(tracking_objects)
print("CUR FRAME LEFT PTS")
print(center_points_cur_frame)
# Make a copy of the points
center_points_prev_frame = center_points_cur_frame.copy()
cv2.imshow("Video", img)
cv2.waitKey(1)
# After the loop release the cap object
cap.release()
# Destroy all the windows
cv2.destroyAllWindows()
I have the following code which i am trying to use to detect objects. I have made some changes and now i am trying to make it work so that it will use a camera to detect objects. What it does now is takes a picture in a loop and doesnt use a video.
Here is my code.
import numpy as np
import cv2
net = cv2.dnn.readNet("yolov3.weights", "yolov3.cfg")
classes = []
with open("coco.names", "r") as f:
classes = [line.strip() for line in f.readlines()]
layers_names = net.getLayerNames()
outputlayers = [layers_names[i - 1] for i in net.getUnconnectedOutLayers()]
colors = np.random.uniform(0, 255, size=(len(classes), 3))
class_ids = []
confidences = []
boxes = []
cap = cv2.VideoCapture(0)
while True:
_, image = cap.read()
height, width, channels = image.shape
blob = cv2.dnn.blobFromImage(image, 0.00392, (416, 416), (0, 0, 0), True,
crop=False)
net.setInput(blob)
outs = net.forward(outputlayers)
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence >= 0.5:
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
w = int(detection[3] * width)
h = int(detection[3] * height)
x = int(center_x - width / 2)
y = int(center_y - height / 2)
boxes.append([x, y, width, height])
confidences.append(float(confidence))
class_ids.append(class_id)
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
objects_detecetd = len(boxes)
font = cv2.FONT_HERSHEY_PLAIN
for i in range(len(boxes)):
if i in indexes:
x, y, width, height = boxes[i]
label = str(classes[class_ids[i]])
color = colors[i]
print(label)
cv2.rectangle(image, (x, y), (x + width, y + height), color, 2)
cv2.putText(image, label, (x, y + 30), font, 4, color, 3)
cv2.imshow("Image", image)
cv2.waitKey(5)
cv2.destroyAllWindows()
cv2.destroyAllWindows()
cap.release
Im trying to detect the ROI with the most black pixels for the license plate
Below are the code on the number plate. It is based of the question of How to recognize vehicle license / number plate (ANPR) from an image?.
I modified it a bit b
import cv2
import numpy as np
import imutils
import sys
import glob
import math
import time
import os
def validate_contour(contour, img, aspect_ratio_range, area_range):
rect = cv2.minAreaRect(contour)
img_width = img.shape[1]
img_height = img.shape[0]
box = cv2.boxPoints(rect)
box = np.int0(box)
X = rect[0][0]
Y = rect[0][1]
angle = rect[2]
width = rect[1][0]
height = rect[1][1]
angle = (angle + 180) if width < height else (angle + 90)
output = False
if (width > 0 and height > 0) and ((width < img_width / 2.0) and (height < img_width / 2.0)):
aspect_ratio = float(width) / height if width > height else float(height) / width
if (aspect_ratio >= aspect_ratio_range[0] and aspect_ratio <= aspect_ratio_range[1]):
if ((height * width > area_range[0]) and (height * width < area_range[1])):
box_copy = list(box)
point = box_copy[0]
del (box_copy[0])
dists = [((p[0] - point[0]) ** 2 + (p[1] - point[1]) ** 2) for p in box_copy]
sorted_dists = sorted(dists)
opposite_point = box_copy[dists.index(sorted_dists[1])]
tmp_angle = 90
if abs(point[0] - opposite_point[0]) > 0:
tmp_angle = abs(float(point[1] - opposite_point[1])) / abs(point[0] - opposite_point[0])
tmp_angle = rad_to_deg(math.atan(tmp_angle))
if tmp_angle <= 45:
output = True
return output
def deg_to_rad(angle):
return angle * np.pi / 180.0
def rad_to_deg(angle):
return angle * 180 / np.pi
def enhance(img):
kernel = np.array([[-1, 0, 1], [-2, 0, 2], [1, 0, 1]])
return cv2.filter2D(img, -1, kernel)
img = cv2.imread('13.jpg')
input_image = imutils.resize(img, width=500)
raw_image = np.copy(input_image)
img_original = input_image.copy()
img_mask = input_image.copy()
lic_plate = input_image.copy()
contoured = input_image.copy()
gray = cv2.cvtColor(img_original, cv2.COLOR_BGR2GRAY)
gray = enhance(gray)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
gray = cv2.Sobel(gray, -1, 1, 0)
h, sobel = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
se = cv2.getStructuringElement(cv2.MORPH_RECT, (16, 4))
binary = cv2.morphologyEx(sobel, cv2.MORPH_CLOSE, se)
ed_img = np.copy(binary)
contours, _ = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
for contour in contours:
aspect_ratio_range = (2.2, 12) # minimum 2.2 , max 12
area_range = (500, 18000)
rectangles = cv2.minAreaRect(contour) # rect = ((center_x,center_y),(width,height),angle)
boxes = cv2.boxPoints(rectangles) # Find four vertices of rectangle from above rect
boxes = np.int0(boxes) # Round the values and make it integers
# print(box)
all_area = cv2.drawContours(contoured, [boxes], 0, (127, 0, 255), 2)
if validate_contour(contour, binary, aspect_ratio_range, area_range):
rect = cv2.minAreaRect(contour)
box = cv2.boxPoints(rect)
box = np.int0(box)
Xs = [i[0] for i in box]
Ys = [i[1] for i in box]
x1 = min(Xs)
x2 = max(Xs)
y1 = min(Ys)
y2 = max(Ys)
angle = rect[2]
if angle < -45:
angle += 90
W = rect[1][0]
H = rect[1][1]
aspect_ratio = float(W) / H if W > H else float(H) / W
center = ((x1 + x2) / 2, (y1 + y2) / 2)
size = (x2 - x1, y2 - y1)
M = cv2.getRotationMatrix2D((size[0] / 2, size[1] / 2), angle, 1.0);
tmp = cv2.getRectSubPix(ed_img, size, center)
tmp = cv2.warpAffine(tmp, M, size)
TmpW = H if H > W else W
TmpH = H if H < W else W
tmp = cv2.getRectSubPix(tmp, (int(TmpW), int(TmpH)), (size[0] / 2, size[1] / 2))
__, tmp = cv2.threshold(tmp, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
sortedplate = cv2.drawContours(img_mask, [box], 0, (127, 0, 255), 2)
white_pixels = 0
for x in range(tmp.shape[0]):
for y in range(tmp.shape[1]):
if tmp[x][y] == 255:
white_pixels += 1
edge_density = float(white_pixels) / (tmp.shape[0] * tmp.shape[1])
tmp = cv2.getRectSubPix(raw_image, size, center)
tmp = cv2.warpAffine(tmp, M, size)
TmpW = H if H > W else W
TmpH = H if H < W else W
tmp = cv2.getRectSubPix(tmp, (int(TmpW), int(TmpH)), (size[0] / 2, size[1] / 2))
# getRectSubPix( = Retrieves a pixel rectangle from an image with sub-pixel accuracy.
if edge_density > 0.5:
cv2.drawContours(input_image, [box], 0, (127, 0, 255), 2)
cv2.imshow('original', img_original)
cv2.imshow('sobel', sobel)
cv2.imshow('binary', binary)
cv2.imshow("all contours", all_area)
cv2.imshow("sorted", sortedplate)
cv2.imshow("detected", lic_plate)
cv2.waitKey(0)
The image The number plate that needed to detect
Example of images lp1 lp2 lp3 lp4
I'm trying to use pillow.grab to get frames of my screen. For later detect objects for every frame.
import numpy as np
import cv2
from PIL import ImageGrab as ig
import time
# Load Yolo
net = cv2.dnn.readNet("yolov3_training_last.weights", "yolov3_testing.cfg")
# Name custom object
classes = ["amongus"]
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
colors = np.random.uniform(0, 255, size=(len(classes), 3))
last_time = time.time()
while(True):
img_ = ig.grab(bbox=None)
open_cv_image = np.array(img_)
open_cv_image = open_cv_image[:, :, ::-1].copy()
img = cv2.resize(open_cv_image, None, fx=0.4, fy=0.4)
height, width, channels = img.shape
# Detecting objects
blob = cv2.dnn.blobFromImage(img, 0.00392, (416, 416), (0, 0, 0), True, crop=False)
net.setInput(blob)
outs = net.forward(output_layers)
# Showing informations on the screen
class_ids = []
confidences = []
boxes = []
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.3:
# Object detected
print(class_id)
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
w = int(detection[2] * width)
h = int(detection[3] * height)
# Rectangle coordinates
x = int(center_x - w / 2)
y = int(center_y - h / 2)
boxes.append([x, y, w, h])
confidences.append(float(confidence))
class_ids.append(class_id)
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
print(indexes)
font = cv2.FONT_HERSHEY_PLAIN
for i in range(len(boxes)):
if i in indexes:
x, y, w, h = boxes[i]
label = str(classes[class_ids[i]])
color = colors[class_ids[i]]
cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)
#cv2.putText(img, label, (x, y + 30), font, 3, color, 2)
cv2.imshow("Image", np.array(img_))
#key = cv2.waitKey(0)
cv2.destroyAllWindows()
If I uncomment the #key = cv2.waitKey(0) works because I'm taking just one frame for every time I press a key, but once I comment that part, I got this screen.
I'm assuming (not sure at all if it's because of that) that this is happening because a lot of frames are coming, but if I sleep(n) the while fps will be so low. (I guess?)
What's happening / how to fix it?
I have a image that has to be cropped around a bounding box and resized to 256x256. In my original image I have an number of Points (x,y) that are in the bounding box.
This is my original image with my original coordinates marked:
Heres the cropped result, where the red points are the right x,y and the blue ones are my current result:
Heres how I'm doing it:
import numpy as np
import cv2
def scaleBB(bb, scale):
centerX = (bb[0][0] + bb[1][0]) / 2
centerY = (bb[0][1] + bb[2][1]) / 2
center = (centerX, centerY)
scl_center = (centerX * scale[0], centerY * scale[1])
p1 = scale * (bb[0] - center) + scl_center
p2 = scale * (bb[1] - center) + scl_center
p3 = scale * (bb[2] - center) + scl_center
p4 = scale * (bb[3] - center) + scl_center
return np.array([p1, p2, p3, p4])
def expandBB(scaledBB, size):
bbw = np.abs(scaledBB[0][0] - scaledBB[1][0])
bbh = np.abs(scaledBB[0][1] - scaledBB[2][1])
expandX = (size[0] - bbw) / 2
expandY = (size[1] - bbh) / 2
p1 = scaledBB[0] + (-expandX, -expandY)
p2 = scaledBB[1] + (+expandX, -expandY)
p3 = scaledBB[2] + (+expandX, +expandY)
p4 = scaledBB[3] + (+expandX, +expandY)
return np.array([p1, p2, p3, p4])
def recalculate_joints_points(oldX, oldY, newX, newY, joints):
R_x = newX / oldX
R_y = newY / oldY
new_joints = []
for index, joint in enumerate(joints):
x = joint[0]
y = joint[1]
n_x = round(R_x * x)
n_y = round(R_y * y)
print(R_x, R_y, x, y, n_x, n_y)
new_joints.append([n_x, n_y])
return np.array(new_joints)
def cropAndResizeImage(label, bb):
img_path = "original.jpg"
# downscale
image = cv2.imread(img_path)
# orgSize = image.shape[:2]
label = label
bb = bb
print(bb)
dim = int(256 / 2)
# define the target height of the bounding box
targetHeight = 200.0
w = np.abs(bb[0][0] - bb[1][0])
h = np.abs(bb[0][1] - bb[2][1])
targetScale = targetHeight / h
print(targetScale)
scaledImage = cv2.resize(image, (0, 0), fx=targetScale, fy=targetScale)
scaledBB = scaleBB(bb, (targetScale, targetScale))
cropRegion = expandBB(scaledBB, (256, 256))
print(scaledBB)
print(cropRegion)
startX = int(cropRegion[0][0] + dim)
startY = int(cropRegion[0][1] + dim)
endX = startX + 256 # cropRegion[2][0] + dim
endY = startY + 256 #cropRegion[2][1] + dim
print(startX, startY, endX, endY)
padded_image = np.pad(scaledImage, ((dim, dim), (dim, dim), (0, 0)), mode='constant')
croppedImage = padded_image[startY:endY, startX:endX]
# new label
print(image.shape, croppedImage.shape)
oldWidth = image.shape[1]
oldHeight = image.shape[0]
newWidth = 256 + dim
newHeight = 256 + dim
out_label = recalculate_joints_points(oldWidth, oldHeight, newWidth, newHeight, label)
return [croppedImage, out_label]
def main():
labels = np.array([[1214, 598],
[1169, 424],
[1238, 273],
[1267, 285],
[1212, 453],
[1229, 622],
[1253, 279],
[1173, 114],
[1171, 113],
[1050, 60],
[1106, 143],
[1140, 100],
[1169, 80],
[1176, 148],
[1152, 280],
[1087, 391]])
bb = np.array([[1050, 60],
[1267, 60],
[1267, 622],
[1050, 622]])
img, label = cropAndResizeImage(labels, bb)
for point in label:
print(point)
x,y = point
cv2.circle(img,(int(x),int(y)),5,(255,0,0),-11)
cv2.imshow("cropped", img)
cv2.waitKey()
if __name__ == '__main__':
main()
As far as I understood is to get the new (x,y) you have to calculate the ratio (difference of size in a scale factor) but it still seems off. Any help is appreciated.
EDIT 1:
Using as newHeight/Width just 256 produces this image:
*EDIT 2:
Using solution of #ChrisH its quite perfect but still a little bit off:
Here is a function that will translate directly from the original coordinates into the cropped and scaled coordinates. You can skip all the other functions and transform points directly with this
def getNewCoords(x,y):
bbUpperLeftX = bb[0][0]
bbUpperLeftY = bb[0][1]
bbLowerRightX = bb[2][0]
bbLowerRightY = bb[2][1]
sizeX = bbLowerRightX - bbUpperLeftX
sizeY = bbLowerRightY - bbUpperLeftY
sizeMax = max(sizeX, sizeY)
centerX = (bbLowerRightX + bbUpperLeftX)/2
centerY = (bbLowerRightY + bbUpperLeftY)/2
offsetX = (centerX-sizeMax/2)*256/sizeMax
offsetY = (centerY-sizeMax/2)*256/sizeMax
x = x * 256/sizeMax - offsetX
y = y * 256/sizeMax - offsetY
return (x,y)
Since you define
endX = startX + 256
endY = startY + 256
And make the output image as
croppedImage = padded_image[startY:endY, startX:endX]
Shouldn’t the new width and height be 256? instead you define them as
newWidth = 256 + dim
newHeight = 256 + dim
I think dim is unnecessary here
You can use augmentit to do the task.
pip install augmentit
Documentation
link : https://github.com/sandesha-hegde/augmentit