I've resized my image to newX, newY. Prior to resizing I had a point (x,y). Now that I've resized my image I'd like to know where the point is on the new image. Sounds simple but I'm bad at math. Any ideas?
It is just a matter of ratios:
On the x-axis, you have resized by a ratio Rx = newX/oldX, and by a ratio Ry = newY/oldY on the y-axis.
Therefore, your new coordinates for point (x,y) are (Rx * x, Ry * y).
from heatmappy import Heatmapper
from PIL import Image
import cv2
import numpy as np
from PIL import ImageFont
from PIL import ImageDraw
def generate_heatmap_data_list(coord_list):
cumulative_data_list = []
for lsingledata in coord_list:
temp = []
for x in range(lsingledata[0][0],lsingledata[1][0]):
for y in range(lsingledata[0][1],lsingledata[1][1]):
temp.append([x,y])
data = [temp[i] for i in range(len(temp)) if (i%250)<lsingledata[2]]
cumulative_data_list += data
return cumulative_data_list
coord = [[[774,265],[909,409],1],[[985,809],[1139,992],5],[[514,842],[803,1024],10],[[127,629],[283, 869],20],[[258,442],[429, 584],30],
[[827,851],[980,1033],40],[[343,611],[514,753],1],[[500,358],[595,409],50],[[163,879],[334,999],15]]
data = generate_heatmap_data_list(coord)
example_img_path = r"C:\Workarea\heatmap\code_testing_pyheatmap\blue_print.jpg"
example_img = Image.open(example_img_path)
print("###", type(example_img))
width, height = example_img.size
original_dim = (width,height)
##resize_dim to plot the heatmap size
resize_dim = (1237,1036)
example_img = example_img.resize(resize_dim)
new_point_list = []
for lsingle_point in data:
x1 = int((lsingle_point[0] * (resize_dim[0] / original_dim[0])))
y1 = int((lsingle_point[1] * (resize_dim[1] / original_dim[1])))
new_point_list.append([x1,y1])
heatmapper = Heatmapper()
heatmap = heatmapper.heatmap_on_img(new_point_list, example_img)
print("PIL_type: ", type(heatmap))
heatmap.save('temp.png')
######if you want to plot percentage on image
img = cv2.imread('temp.png')
print("cv2_type:", type(img))
img = cv2.putText(img, '1%', (803,341), cv2.FONT_HERSHEY_SIMPLEX,1, (0, 0, 0), 2, cv2.LINE_AA)##FRANGRANCE
img = cv2.putText(img, '5%', (1027,919), cv2.FONT_HERSHEY_SIMPLEX,1, (0, 0, 0), 2, cv2.LINE_AA)##COSMETICS
img = cv2.putText(img, '10%', (661,977), cv2.FONT_HERSHEY_SIMPLEX,1, (0, 0, 0), 2, cv2.LINE_AA)##HONEY
img = cv2.putText(img, '20%', (209,765), cv2.FONT_HERSHEY_SIMPLEX,1, (0, 0, 0), 2, cv2.LINE_AA)##AJILE
img = cv2.putText(img, '30%', (337,539), cv2.FONT_HERSHEY_SIMPLEX,1, (0, 0, 0), 2, cv2.LINE_AA)##ANNABELLE
img = cv2.putText(img, '40%', (909,953), cv2.FONT_HERSHEY_SIMPLEX,1, (0, 0, 0), 2, cv2.LINE_AA)##SUNGLASSES
img = cv2.putText(img, '1%', (423,707), cv2.FONT_HERSHEY_SIMPLEX,1, (0, 0, 0), 2, cv2.LINE_AA)##VANHEUSEN
img = cv2.putText(img, '50%', (539,405), cv2.FONT_HERSHEY_SIMPLEX,1, (0, 0, 0), 2, cv2.LINE_AA)##JALLUS
img = cv2.putText(img, '15%', (231,961), cv2.FONT_HERSHEY_SIMPLEX,1, (0, 0, 0), 2, cv2.LINE_AA)##DENIM
cv2.imwrite("put_text_03_01_2022_heatmap.jpg", img)
Related
As I learned from tutorial videos, I tried this code:
import numpy as np
import cv2
ARUCO_DICT = {
"DICT_4X4_50": cv2.aruco.DICT_4X4_50,
"DICT_4X4_100": cv2.aruco.DICT_4X4_100,
}
#In fact there are more dictionary keys than written above. I deleted them to shorten the question.
def aruco_display(corners, ids, rejected, image):
if len(corners) > 0:
ids = ids.flatten()
for (markerCorner, markerID) in zip(corners, ids):
corners = markerCorner.reshape((4, 2))
(topLeft, topRight, bottomRight, bottomLeft) = corners
topRight = (int(topRight[0]), int(topRight[1]))
bottomRight = (int(bottomRight[0]), int(bottomRight[1]))
bottomLeft = (int(bottomLeft[0]), int(bottomLeft[1]))
topLeft = (int(topLeft[0]), int(topLeft[1]))
cv2.line(image, topLeft, topRight, (0, 255, 0), 2)
cv2.line(image, topRight, bottomRight, (0, 255, 0), 2)
cv2.line(image, bottomRight, bottomLeft, (0, 255, 0), 2)
cv2.line(image, bottomLeft, topLeft, (0, 255, 0), 2)
cX = int((topLeft[0] + bottomRight[0]) / 2.0)
cY = int((topLeft[1] + bottomRight[1]) / 2.0)
cv2.circle(image, (cX, cY), 4, (0, 0, 255), -1)
cv2.putText(image, str(markerID),(topLeft[0], topLeft[1] - 10), cv2.FONT_HERSHEY_SIMPLEX,
0.5, (0, 255, 0), 2)
print("[Inference] ArUco marker ID: {}".format(markerID))
return image
img = cv2.imread('markers.jpg', 1)
#the first parameter will change according to the name of the photo
aruco_type = ["DICT_4X4_50",
"DICT_4X4_100",
"DICT_4X4_250",
]
for i in aruco_type:
arucoDict = cv2.aruco.getPredefinedDictionary(ARUCO_DICT[i])
arucoParams = cv2.aruco.DetectorParameters()
corners, ids, rejected = cv2.aruco.ArucoDetector(arucoDict, arucoParams).detectMarkers(img)
detected_markers = aruco_display(corners, ids, rejected, img)
cv2.imshow("Image", detected_markers)
cv2.waitKey(0)
cv2.destroyAllWindows()
This code can detect most of codes but there is still problem due to not detecting some ArUco's like that:
ArUco1
ArUco2
How can I solve this issue?
I think if it can detect some of them, I don't understand why it cannot detect ArUco's on same image.
import math
import pickle
from xml.etree.ElementPath import prepare_predicate
import numpy as np
import cv2
#Import required modules
import cv2
import dlib
import imutils
import glob
import csv
from imutils import face_utils
from sklearn import datasets
from sklearn.multiclass import OneVsRestClassifier
# from sklearn.svm import LinearSVC
from sklearn.model_selection import KFold
from sklearn.metrics import confusion_matrix,classification_report
from sklearn.metrics import roc_curve, auc
from sklearn.preprocessing import label_binarize
import matplotlib.pyplot as plt
from itertools import cycle
from scipy import interp
from sklearn.multiclass import OneVsRestClassifier, OneVsOneClassifier
from sklearn.svm import SVC
import pickle
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("D:\\study folder\\msc project SVM\\Emotion-Recognition-From-Facial-Expressions-master\\shape_predictor_68_face_landmarks.dat") #Or set this to whatever you named the downloaded file
clf = OneVsRestClassifier(SVC(kernel='linear', probability=True, tol=1e-3))
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
def get_landmark_positions(img):
detections = detector(img, 1)
for k, d in enumerate(detections): # For all detected face instances individually
shape = predictor(img, d) # Draw Facial Landmarks with the predictor class
shape2 = face_utils.shape_to_np(shape)
ch = cv2.convexHull(shape2[48:68])
M = cv2.moments(shape2[48:68])
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
sum = 0
for p in ch:
i, j = p[0]
v = (j - cY) / (i - cX)
if ((i - cX) != 0):
sum = sum + v
xlist = []
ylist = []
(x, y, w, h) = cv2.boundingRect(np.array([shape2[48:68]]))
roi = img[y:y + h, x:x + w]
win_size = (64, 128)
img = cv2.resize(img, win_size)
# img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
d = cv2.HOGDescriptor()
hog = d.compute(img)
hog = hog.transpose()[0]
hog = np.asarray(hog)
for i in range(1, 68): # Store X and Y coordinates in two lists
# if(i >= 49 and i <= 68):
# print(shape.part(i))
xlist.append(float(shape.part(i).x))
ylist.append(float(shape.part(i).y))
return xlist, ylist, hog,sum
def get_features(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # convert to grayscale
clahe_image = clahe.apply(gray)
xlist, ylist, hog, sum = get_landmark_positions(clahe_image)
features2 = []
features2.extend(hog)
cap = cv2.VideoCapture(0)
filename = 'D:\\study folder\\msc project SVM\\Emotion-Recognition-From-Facial-Expressions-master\\finalized_model.sav'
face_cascade = cv2.CascadeClassifier('D:\\study folder\\msc project SVM\\Emotion-Recognition-From-Facial-Expressions-master\\haarcascade_frontalface_default.xml')
clf = pickle.load(open(filename, 'rb'))
classes = ["HAPPY", "CONTEMPT", "ANGER", "DISGUST", "FEAR", "SADNESS", "SURPRISE", "NEUTRAL"]
while(True):
_, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
crop_img = frame
if len(faces) == 0:
crop_img = frame
else:
crop_img = frame[y:y + h, x:x + w]
win_size = (64, 128)
feat = get_features(crop_img)
proba = clf.predict_proba([feat])
pred_value = clf.predict([feat])[0]
print(proba)
print(math.floor((proba[0][
0]*1000000))/10000)
if(pred_value == 0):
cv2.putText(frame, 'Happy: ' + str(math.floor((proba[0][0]*1000000))/10000) + '%', (30, 60),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 0), 2, cv2.LINE_AA)
else:
cv2.putText(frame, 'Happy: ' + str(math.floor((proba[0][0] * 1000000)) / 10000) + '%', (30, 60),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 255), 2, cv2.LINE_AA)
if (pred_value == 2):
cv2.putText(frame, 'ANGER: ' + str(math.floor((proba[0][1]*1000000))/10000), (30, 100),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 0), 2, cv2.LINE_AA)
else:
cv2.putText(frame, 'ANGER: ' + str(math.floor((proba[0][1] * 1000000)) / 10000), (30, 100),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 255), 2, cv2.LINE_AA)
if (pred_value == 3):
cv2.putText(frame, 'DISGUST: ' + str(math.floor((proba[0][2]*1000000))/10000), (30, 140),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 0), 2, cv2.LINE_AA)
else:
cv2.putText(frame, 'DISGUST: ' + str(math.floor((proba[0][2] * 1000000)) / 10000), (30, 140),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 255), 2, cv2.LINE_AA)
if(pred_value == 4):
cv2.putText(frame, 'FEAR: ' + str(math.floor((proba[0][3]*1000000))/10000), (30, 180),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 0), 2, cv2.LINE_AA)
else:
cv2.putText(frame, 'FEAR: ' + str(math.floor((proba[0][3] * 1000000)) / 10000), (30, 180),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 255), 2, cv2.LINE_AA)
if (pred_value == 5):
cv2.putText(frame, 'SADNESS: ' + str(math.floor((proba[0][4]*1000000))/10000), (30, 220),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 0), 2, cv2.LINE_AA)
else:
cv2.putText(frame, 'SADNESS: ' + str(math.floor((proba[0][4] * 1000000)) / 10000), (30, 220),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 255), 2, cv2.LINE_AA)
if(pred_value == 6):
cv2.putText(frame, 'SURPRISE: ' + str(math.floor((proba[0][5]*1000000))/10000), (30, 260),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 0), 2, cv2.LINE_AA)
else:
cv2.putText(frame, 'SURPRISE: ' + str(math.floor((proba[0][5] * 1000000)) / 10000), (30, 260),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 255), 2, cv2.LINE_AA)
if (pred_value == 7):
cv2.putText(frame, 'NEUTRAL: ' + str(math.floor((proba[0][6]*1000000))/10000), (30, 300),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 0), 2, cv2.LINE_AA)
else:
cv2.putText(frame, 'NEUTRAL: ' + str(math.floor((proba[0][6] * 1000000)) / 10000), (30, 300),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 255), 2, cv2.LINE_AA)
cv2.imshow('frame',frame)
key = cv2.waitKey(1)
if key == 27:
break
cap.release()
cv2.destroyAllWindows()
Message=local variable 'xlist' referenced before assignment
Source=D:\study folder\msc project SVM\Emotion-Recognition-From-Facial-Expressions-master\live.py
StackTrace:
File "D:\study folder\msc project SVM\Emotion-Recognition-From-Facial-Expressions-master\live.py", line 74, in get_landmark_positions
return xlist, ylist, hog,sum
File "D:\study folder\msc project SVM\Emotion-Recognition-From-Facial-Expressions-master\live.py", line 78, in get_features
xlist, ylist, hog, sum = get_landmark_positions(clahe_image)
File "D:\study folder\msc project SVM\Emotion-Recognition-From-Facial-Expressions-master\live.py", line 109, in <module> (Current frame)
feat = get_features(crop_img)
The local variable 'xlist' referenced before assignment is the issue in the python code. This is a facial emotion detection code using SVM algorithm.
I'm trying to run this Python script inside of a Flask application, but the detections are not loading and only the camera feed is visible. I've tried face and hand detection with Flask, and both of those worked perfectly for me, but when I try to run this script, it doesn't show the detection, and the below script detect the object size in centimeters.
This is the python script which i am trying to load inside flask app.
def midpoint(ptA, ptB):
return ((ptA[0] + ptB[0]) * 0.5, (ptA[1] + ptB[1]) * 0.5)
cap = cv2.VideoCapture(0)
while (cap.read()):
ref,frame = cap.read()
frame = cv2.resize(frame, None, fx=1, fy=1, interpolation=cv2.INTER_AREA)
orig = frame[:1080,0:1920]
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (15, 15), 0)
thresh = cv2.adaptiveThreshold(blur,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY_INV,11,2)
kernel = np.ones((3,3),np.uint8)
closing = cv2.morphologyEx(thresh,cv2.MORPH_CLOSE,kernel,iterations=3)
result_img = closing.copy()
contours,hierachy = cv2.findContours(result_img,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
hitung_objek = 0
pixelsPerMetric = None
for cnt in contours:
area = cv2.contourArea(cnt)
if area < 1000 or area > 120000:
continue
orig = frame.copy()
box = cv2.minAreaRect(cnt)
box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)
box = np.array(box, dtype="int")
box = perspective.order_points(box)
cv2.drawContours(orig, [box.astype("int")], -1, (0, 255, 64), 2)
for (x, y) in box:
cv2.circle(orig, (int(x), int(y)), 5, (0, 255, 64), -1)
(tl, tr, br, bl) = box
(tltrX, tltrY) = midpoint(tl, tr)
(blbrX, blbrY) = midpoint(bl, br)
(tlblX, tlblY) = midpoint(tl, bl)
(trbrX, trbrY) = midpoint(tr, br)
cv2.circle(orig, (int(tltrX), int(tltrY)), 0, (0, 255, 64), 5)
cv2.circle(orig, (int(blbrX), int(blbrY)), 0, (0, 255, 64), 5)
cv2.circle(orig, (int(tlblX), int(tlblY)), 0, (0, 255, 64), 5)
cv2.circle(orig, (int(trbrX), int(trbrY)), 0, (0, 255, 64), 5)
cv2.line(orig, (int(tltrX), int(tltrY)), (int(blbrX), int(blbrY)),
(255, 0, 255), 2)
cv2.line(orig, (int(tlblX), int(tlblY)), (int(trbrX), int(trbrY)),
(255, 0, 255), 2)
lebar_pixel = dist.euclidean((tltrX, tltrY), (blbrX, blbrY))
panjang_pixel = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))
if pixelsPerMetric is None:
pixelsPerMetric = lebar_pixel
pixelsPerMetric = panjang_pixel
lebar = lebar_pixel
panjang = panjang_pixel
cv2.putText(orig, "L: {:.1f}CM".format(lebar_pixel/25.5),(int(trbrX + 10), int(trbrY)), cv2.FONT_HERSHEY_SIMPLEX,0.7, (0,0,255), 2)
cv2.putText(orig, "B: {:.1f}CM".format(panjang_pixel/25.5),(int(tltrX - 15), int(tltrY - 10)), cv2.FONT_HERSHEY_SIMPLEX,0.7, (0,0,255), 2)
hitung_objek+=1
cv2.putText(orig, "OBJECTS: {}".format(hitung_objek),(10,50),cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255),2, cv2.LINE_AA)
cv2.imshow('Camera',orig)
Output Image
PS: i have checked the console also but no error
Reference image of plots
I'm trying to inference classification results through model and plotting it using matplotlib. I want to clear first image data of subplots when press next button or previous button. But while clearing the complete figure is getting refreshed instead of subplots.
Code:
def display_image(Cnt) :
#filepath = os.path.join(folders, test_imgs[Cnt])
img = cv2.imread(test_imgs[Cnt])
#print(img)
img_rsz = cv2.resize(img, (512, 512))
act_img = img_rsz[:,:,::-1]
# Normalize data.
img = img_rsz.astype('float32') / 255
# If subtract pixel mean is enabled
# img_mean = np.mean(img, axis = 0)
img -= x_train_mean
img = np.expand_dims(img, axis=0)
# img = np.expand_dims(x_test[2], axis=0)
# Prediction by Model....
prob_score = model.predict(img)
# print(prob_score)
index = np.argmax(prob_score[0])
prob = prob_score[0][index]
prob = 100*prob
#print(index)
pad = np.full((90,512,3), [0, 0, 0], dtype=np.uint8)
cv2.putText(pad, "Input Image", (10, 50),cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
#text = "Original: {}".format(img.split('.')[0])
#cv2.putText(pad, text, (10, 50),cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
gt_img = np.vstack((act_img,pad))
pad = np.full((90,512,3), [0, 0, 0], dtype=np.uint8)
#cv2.putText(pad, "Predicted Image", (10, 50),cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 0), 3)
text = "Predicted class: {}".format(folders[index])
cls_score = "Probability Score: {:.2f}%".format(prob)
cv2.putText(pad, text, (10, 30),cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
cv2.putText(pad, cls_score, (10, 70),cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2)
pd_img = np.vstack((act_img,pad))
fig = plt.figure(dpi=380,linewidth=2, edgecolor="#04253a")
fig.patch.set_facecolor('#cce7e8')
fig.suptitle("Aggregate Classification",fontweight ="bold",x=.5,y=.9)
fig.add_subplot(1, 2, 1),plt.imshow(gt_img),plt.xticks([]),plt.yticks([])
fig.add_subplot(1, 2, 1),plt.imshow(gt_img)
fig.add_subplot(1, 2, 2),plt.imshow(pd_img),plt.xticks([]),plt.yticks([])
fig.add_subplot(1, 2, 2),plt.imshow(pd_img)
im = plt.imread('/content/drive/MyDrive/Logo.png')
#newax = fig.add_axes([0.6,0.6,0.1,0.1], anchor='NE', zorder=1)
newax = fig.add_axes([0.80,0.80,0.1,0.1], anchor='NE', zorder=1)
newax.imshow(im)
newax.imshow(im)
newax.axis('off')
output.clear(output_tags='some_outputs')
output.clear(output_tags='box_outputs')
with output.use_tags('some_outputs'):
plt.show()
box_layout = widgets.Layout(display='flex',
flex_flow='row',
justify_content='center',
align_items='center',
width='100%')
OutputBox = widgets.HBox([box1, box], layout = box_layout)
#OutputBox.layout.object_position = '200px 100px'
display_image(Count)
with output.use_tags('box_outputs'):
display(OutputBox)
Is it possible with draw() instead of imshow() or any other method to achieve the same. In the reference image only subplot should get clear and should print next image when next is clicked.
I try to build a real time measurement algorithm. My problem is as you can see a picture; enter image description here
I need a diagonal measurement but I just found these 2 edges and one diagonal. When I start to find other diagonal, I took an error. Because calculation just start this corner, I couldn't start another corner. when I try to start another corner I took an error. this corner is 0.
I couldn't understand why I couldn't start with another corner.
Measurement code
import cv2
import utlis
###################################
webcam = True
path = '1.jpg'
cap = cv2.VideoCapture(0)
cap.set(10, 160)
cap.set(3, 1920)
cap.set(4, 1080)
scale = 3
wP = 210 * scale
hP = 297 * scale
###################################
while True:
if webcam:
success, img = cap.read()
else:
img = cv2.imread(path)
imgContours, conts = utlis.getContours(img, minArea=50000, filter=4)
if len(conts) !=0:
biggest = conts[0][2]
print(biggest)
imgWarp = utlis.warpImg(img, biggest, wP, hP)
imgContours2, conts2 = utlis.getContours(imgWarp,
minArea=2000, filter=4,
cThr=[50, 50], draw=False)
if len(conts) != 0:
for obj in conts2:
cv2.polylines(imgContours2, [obj[2]], True, (0, 255, 0), 2)
nPoints = utlis.reorder(obj[2])
nW = round((utlis.findDis(nPoints[0][0] // scale, nPoints[1][0] // scale) / 10), 1)
nH = round((utlis.findDis(nPoints[0][0] // scale, nPoints[2][0] // scale) / 10), 1)
nQ = round((utlis.findDis(nPoints[0][0] // scale, nPoints[3][0] // scale) / 10), 1)
nW2 = round((utlis.findDis(nPoints[2][2] // scale, nPoints[1][2] // scale) / 10), 1)
#nH2 = round((utlis.findDis(nPoints[3][3] // scale, nPoints[2][0] // scale) / 10), 1)
#nZ = round((utlis.findDis(nPoints[3][3] // scale, nPoints[3][0] // scale) / 10), 1)
cv2.arrowedLine(imgContours2, (nPoints[0][0][0], nPoints[0][0][1]),
(nPoints[1][0][0], nPoints[1][0][1]),
(255, 0, 255), 3, 8, 0, 0.05)
cv2.arrowedLine(imgContours2, (nPoints[0][0][0], nPoints[0][0][1]),
(nPoints[2][0][0], nPoints[2][0][1]),
(255, 0, 255), 3, 8, 0, 0.05)
cv2.arrowedLine(imgContours2, (nPoints[0][0][0], nPoints[0][0][1]),
(nPoints[3][0][0], nPoints[3][0][1]),
(255, 0, 255), 3, 8, 0, 0.05)
#cv2.arrowedLine(imgContours2, (nPoints[3][3][3], nPoints[3][3][1]),
#(nPoints[1][0][0], nPoints[1][0][1]),
#(255, 0, 255), 3, 8, 0, 0.05)
x, y, w, h = obj[3]
cv2.putText(imgContours2, '{}cm'.format(nW), (x + 30, y - 10), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1.5,
(255, 0, 255), 2)
cv2.putText(imgContours2, '{}cm'.format(nH), (x - 70, y + h // 2), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1.5,
(255, 0, 255), 2)
cv2.putText(imgContours2, '{}cm'.format(nQ), (x + 200 , y + 200 ), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1.5,
(255, 0, 255), 2)
cv2.imshow('A4', imgContours2)
img = cv2.resize(img, (0, 0), None, 0.5, 0.5)
cv2.imshow('Original', img)
cv2.waitKey(1)
utlis
import cv2
import numpy as np
def getContours(img, cThr=[100, 100], showCanny=False, minArea=1000, filter=0, draw=False):
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
imgBlur = cv2.GaussianBlur(imgGray, (5, 5), 1)
imgCanny = cv2.Canny(imgBlur, cThr[0], cThr[1])
kernel = np.ones((5, 5))
imgDial = cv2.dilate(imgCanny, kernel, iterations=3)
imgThre = cv2.erode(imgDial, kernel, iterations=2)
if showCanny: cv2.imshow('Canny', imgThre)
contours, hiearchy = cv2.findContours(imgThre, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
finalCountours = []
for i in contours:
area = cv2.contourArea(i)
if area > minArea:
peri = cv2.arcLength(i, True)
approx = cv2.approxPolyDP(i, 0.02 * peri, True)
bbox = cv2.boundingRect(approx)
if filter > 0:
if len(approx) == filter:
finalCountours.append([len(approx), area, approx, bbox, i])
else:
finalCountours.append([len(approx), area, approx, bbox, i])
finalCountours = sorted(finalCountours, key=lambda x: x[1], reverse=True)
if draw:
for con in finalCountours:
cv2.drawContours(img, con[4], -1, (0, 0, 255), 3)
return img, finalCountours
def reorder(myPoints):
# print(myPoints.shape)
myPointsNew = np.zeros_like(myPoints)
myPoints = myPoints.reshape((4, 2))
add = myPoints.sum(1)
myPointsNew[0] = myPoints[np.argmin(add)]
myPointsNew[3] = myPoints[np.argmax(add)]
diff = np.diff(myPoints, axis=1)
myPointsNew[1] = myPoints[np.argmin(diff)]
myPointsNew[2] = myPoints[np.argmax(diff)]
return myPointsNew
def warpImg(img, points, w, h, pad=20):
# print(points)
points = reorder(points)
pts1 = np.float32(points)
pts2 = np.float32([[0, 0], [w, 0], [0, h], [w, h]])
matrix = cv2.getPerspectiveTransform(pts1, pts2)
imgWarp = cv2.warpPerspective(img, matrix, (w, h))
imgWarp = imgWarp[pad:imgWarp.shape[0] - pad, pad:imgWarp.shape[1] - pad]
return imgWarp
def findDis(pts1, pts2):
return ((pts2[0] - pts1[0]) ** 2 + (pts2[1] - pts1[1]) ** 2) ** 0.5