Reference image of plots
I'm trying to inference classification results through model and plotting it using matplotlib. I want to clear first image data of subplots when press next button or previous button. But while clearing the complete figure is getting refreshed instead of subplots.
Code:
def display_image(Cnt) :
#filepath = os.path.join(folders, test_imgs[Cnt])
img = cv2.imread(test_imgs[Cnt])
#print(img)
img_rsz = cv2.resize(img, (512, 512))
act_img = img_rsz[:,:,::-1]
# Normalize data.
img = img_rsz.astype('float32') / 255
# If subtract pixel mean is enabled
# img_mean = np.mean(img, axis = 0)
img -= x_train_mean
img = np.expand_dims(img, axis=0)
# img = np.expand_dims(x_test[2], axis=0)
# Prediction by Model....
prob_score = model.predict(img)
# print(prob_score)
index = np.argmax(prob_score[0])
prob = prob_score[0][index]
prob = 100*prob
#print(index)
pad = np.full((90,512,3), [0, 0, 0], dtype=np.uint8)
cv2.putText(pad, "Input Image", (10, 50),cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
#text = "Original: {}".format(img.split('.')[0])
#cv2.putText(pad, text, (10, 50),cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
gt_img = np.vstack((act_img,pad))
pad = np.full((90,512,3), [0, 0, 0], dtype=np.uint8)
#cv2.putText(pad, "Predicted Image", (10, 50),cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 0), 3)
text = "Predicted class: {}".format(folders[index])
cls_score = "Probability Score: {:.2f}%".format(prob)
cv2.putText(pad, text, (10, 30),cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
cv2.putText(pad, cls_score, (10, 70),cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2)
pd_img = np.vstack((act_img,pad))
fig = plt.figure(dpi=380,linewidth=2, edgecolor="#04253a")
fig.patch.set_facecolor('#cce7e8')
fig.suptitle("Aggregate Classification",fontweight ="bold",x=.5,y=.9)
fig.add_subplot(1, 2, 1),plt.imshow(gt_img),plt.xticks([]),plt.yticks([])
fig.add_subplot(1, 2, 1),plt.imshow(gt_img)
fig.add_subplot(1, 2, 2),plt.imshow(pd_img),plt.xticks([]),plt.yticks([])
fig.add_subplot(1, 2, 2),plt.imshow(pd_img)
im = plt.imread('/content/drive/MyDrive/Logo.png')
#newax = fig.add_axes([0.6,0.6,0.1,0.1], anchor='NE', zorder=1)
newax = fig.add_axes([0.80,0.80,0.1,0.1], anchor='NE', zorder=1)
newax.imshow(im)
newax.imshow(im)
newax.axis('off')
output.clear(output_tags='some_outputs')
output.clear(output_tags='box_outputs')
with output.use_tags('some_outputs'):
plt.show()
box_layout = widgets.Layout(display='flex',
flex_flow='row',
justify_content='center',
align_items='center',
width='100%')
OutputBox = widgets.HBox([box1, box], layout = box_layout)
#OutputBox.layout.object_position = '200px 100px'
display_image(Count)
with output.use_tags('box_outputs'):
display(OutputBox)
Is it possible with draw() instead of imshow() or any other method to achieve the same. In the reference image only subplot should get clear and should print next image when next is clicked.
Related
As I learned from tutorial videos, I tried this code:
import numpy as np
import cv2
ARUCO_DICT = {
"DICT_4X4_50": cv2.aruco.DICT_4X4_50,
"DICT_4X4_100": cv2.aruco.DICT_4X4_100,
}
#In fact there are more dictionary keys than written above. I deleted them to shorten the question.
def aruco_display(corners, ids, rejected, image):
if len(corners) > 0:
ids = ids.flatten()
for (markerCorner, markerID) in zip(corners, ids):
corners = markerCorner.reshape((4, 2))
(topLeft, topRight, bottomRight, bottomLeft) = corners
topRight = (int(topRight[0]), int(topRight[1]))
bottomRight = (int(bottomRight[0]), int(bottomRight[1]))
bottomLeft = (int(bottomLeft[0]), int(bottomLeft[1]))
topLeft = (int(topLeft[0]), int(topLeft[1]))
cv2.line(image, topLeft, topRight, (0, 255, 0), 2)
cv2.line(image, topRight, bottomRight, (0, 255, 0), 2)
cv2.line(image, bottomRight, bottomLeft, (0, 255, 0), 2)
cv2.line(image, bottomLeft, topLeft, (0, 255, 0), 2)
cX = int((topLeft[0] + bottomRight[0]) / 2.0)
cY = int((topLeft[1] + bottomRight[1]) / 2.0)
cv2.circle(image, (cX, cY), 4, (0, 0, 255), -1)
cv2.putText(image, str(markerID),(topLeft[0], topLeft[1] - 10), cv2.FONT_HERSHEY_SIMPLEX,
0.5, (0, 255, 0), 2)
print("[Inference] ArUco marker ID: {}".format(markerID))
return image
img = cv2.imread('markers.jpg', 1)
#the first parameter will change according to the name of the photo
aruco_type = ["DICT_4X4_50",
"DICT_4X4_100",
"DICT_4X4_250",
]
for i in aruco_type:
arucoDict = cv2.aruco.getPredefinedDictionary(ARUCO_DICT[i])
arucoParams = cv2.aruco.DetectorParameters()
corners, ids, rejected = cv2.aruco.ArucoDetector(arucoDict, arucoParams).detectMarkers(img)
detected_markers = aruco_display(corners, ids, rejected, img)
cv2.imshow("Image", detected_markers)
cv2.waitKey(0)
cv2.destroyAllWindows()
This code can detect most of codes but there is still problem due to not detecting some ArUco's like that:
ArUco1
ArUco2
How can I solve this issue?
I think if it can detect some of them, I don't understand why it cannot detect ArUco's on same image.
I try to build a real time measurement algorithm. My problem is as you can see a picture; enter image description here
I need a diagonal measurement but I just found these 2 edges and one diagonal. When I start to find other diagonal, I took an error. Because calculation just start this corner, I couldn't start another corner. when I try to start another corner I took an error. this corner is 0.
I couldn't understand why I couldn't start with another corner.
Measurement code
import cv2
import utlis
###################################
webcam = True
path = '1.jpg'
cap = cv2.VideoCapture(0)
cap.set(10, 160)
cap.set(3, 1920)
cap.set(4, 1080)
scale = 3
wP = 210 * scale
hP = 297 * scale
###################################
while True:
if webcam:
success, img = cap.read()
else:
img = cv2.imread(path)
imgContours, conts = utlis.getContours(img, minArea=50000, filter=4)
if len(conts) !=0:
biggest = conts[0][2]
print(biggest)
imgWarp = utlis.warpImg(img, biggest, wP, hP)
imgContours2, conts2 = utlis.getContours(imgWarp,
minArea=2000, filter=4,
cThr=[50, 50], draw=False)
if len(conts) != 0:
for obj in conts2:
cv2.polylines(imgContours2, [obj[2]], True, (0, 255, 0), 2)
nPoints = utlis.reorder(obj[2])
nW = round((utlis.findDis(nPoints[0][0] // scale, nPoints[1][0] // scale) / 10), 1)
nH = round((utlis.findDis(nPoints[0][0] // scale, nPoints[2][0] // scale) / 10), 1)
nQ = round((utlis.findDis(nPoints[0][0] // scale, nPoints[3][0] // scale) / 10), 1)
nW2 = round((utlis.findDis(nPoints[2][2] // scale, nPoints[1][2] // scale) / 10), 1)
#nH2 = round((utlis.findDis(nPoints[3][3] // scale, nPoints[2][0] // scale) / 10), 1)
#nZ = round((utlis.findDis(nPoints[3][3] // scale, nPoints[3][0] // scale) / 10), 1)
cv2.arrowedLine(imgContours2, (nPoints[0][0][0], nPoints[0][0][1]),
(nPoints[1][0][0], nPoints[1][0][1]),
(255, 0, 255), 3, 8, 0, 0.05)
cv2.arrowedLine(imgContours2, (nPoints[0][0][0], nPoints[0][0][1]),
(nPoints[2][0][0], nPoints[2][0][1]),
(255, 0, 255), 3, 8, 0, 0.05)
cv2.arrowedLine(imgContours2, (nPoints[0][0][0], nPoints[0][0][1]),
(nPoints[3][0][0], nPoints[3][0][1]),
(255, 0, 255), 3, 8, 0, 0.05)
#cv2.arrowedLine(imgContours2, (nPoints[3][3][3], nPoints[3][3][1]),
#(nPoints[1][0][0], nPoints[1][0][1]),
#(255, 0, 255), 3, 8, 0, 0.05)
x, y, w, h = obj[3]
cv2.putText(imgContours2, '{}cm'.format(nW), (x + 30, y - 10), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1.5,
(255, 0, 255), 2)
cv2.putText(imgContours2, '{}cm'.format(nH), (x - 70, y + h // 2), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1.5,
(255, 0, 255), 2)
cv2.putText(imgContours2, '{}cm'.format(nQ), (x + 200 , y + 200 ), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1.5,
(255, 0, 255), 2)
cv2.imshow('A4', imgContours2)
img = cv2.resize(img, (0, 0), None, 0.5, 0.5)
cv2.imshow('Original', img)
cv2.waitKey(1)
utlis
import cv2
import numpy as np
def getContours(img, cThr=[100, 100], showCanny=False, minArea=1000, filter=0, draw=False):
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
imgBlur = cv2.GaussianBlur(imgGray, (5, 5), 1)
imgCanny = cv2.Canny(imgBlur, cThr[0], cThr[1])
kernel = np.ones((5, 5))
imgDial = cv2.dilate(imgCanny, kernel, iterations=3)
imgThre = cv2.erode(imgDial, kernel, iterations=2)
if showCanny: cv2.imshow('Canny', imgThre)
contours, hiearchy = cv2.findContours(imgThre, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
finalCountours = []
for i in contours:
area = cv2.contourArea(i)
if area > minArea:
peri = cv2.arcLength(i, True)
approx = cv2.approxPolyDP(i, 0.02 * peri, True)
bbox = cv2.boundingRect(approx)
if filter > 0:
if len(approx) == filter:
finalCountours.append([len(approx), area, approx, bbox, i])
else:
finalCountours.append([len(approx), area, approx, bbox, i])
finalCountours = sorted(finalCountours, key=lambda x: x[1], reverse=True)
if draw:
for con in finalCountours:
cv2.drawContours(img, con[4], -1, (0, 0, 255), 3)
return img, finalCountours
def reorder(myPoints):
# print(myPoints.shape)
myPointsNew = np.zeros_like(myPoints)
myPoints = myPoints.reshape((4, 2))
add = myPoints.sum(1)
myPointsNew[0] = myPoints[np.argmin(add)]
myPointsNew[3] = myPoints[np.argmax(add)]
diff = np.diff(myPoints, axis=1)
myPointsNew[1] = myPoints[np.argmin(diff)]
myPointsNew[2] = myPoints[np.argmax(diff)]
return myPointsNew
def warpImg(img, points, w, h, pad=20):
# print(points)
points = reorder(points)
pts1 = np.float32(points)
pts2 = np.float32([[0, 0], [w, 0], [0, h], [w, h]])
matrix = cv2.getPerspectiveTransform(pts1, pts2)
imgWarp = cv2.warpPerspective(img, matrix, (w, h))
imgWarp = imgWarp[pad:imgWarp.shape[0] - pad, pad:imgWarp.shape[1] - pad]
return imgWarp
def findDis(pts1, pts2):
return ((pts2[0] - pts1[0]) ** 2 + (pts2[1] - pts1[1]) ** 2) ** 0.5
layout = Image.open(r"./meme_templates/heaven.jpg")
asset = member.avatar_url_as(size=128)
data = BytesIO(await asset.read())
pfp = Image.open(data)
bigsize = (pfp.size[0] * 3, pfp.size[1] * 3)
mask = Image.new('L', bigsize, 0)
draw = ImageDraw.Draw(mask)
draw.ellipse((0, 0) + bigsize, fill=255)
mask = mask.resize(pfp.size, Image.ANTIALIAS)
mask = ImageChops.darker(mask, pfp.split()[-1])
pfp.putalpha(mask)
pfp = pfp.resize((93, 93))
layout.paste(pfp, (162, 92))
draw = ImageDraw.Draw(layout)
font1 = ImageFont.truetype("./OpenSans-Bold.ttf", 32)
font2 = ImageFont.truetype("./BullettoKilla.ttf", 24)
font3 = ImageFont.truetype("./BullettoKilla.ttf", 46)
imgdesc = f"{member}"
w, h = draw.textsize(imgdesc)
imgmssg = f"Welcome to DIVINE! Hope you have a blessed stay!"
draw.text((275, 112), imgdesc, (0, 0, 0), font1)
draw.text((45, 217), imgmssg, (0, 0, 0), font2)
draw.text((194, 14), "WELCOME!", (0, 0, 0), font3)
layout.save(r"./meme_templates/heavenedit.jpg")
await channel.send(file=discord.File(r"./meme_templates/heavenedit.jpg"))
This is my code.
And my output is coming in the following way,
1: Output imagehttps://i.stack.imgur.com/j8LzM.png
I want my output to be the profile picture to be a perfect circular cropped image.
Any kind of help would be appreciated. Thank you
I haven't used much PIL, but the issue might be when pasting the image, you haven't passed the alpha mask.
layout.paste(pfp, (162, 92), pfp)
The following code use to scan image from bottom to top. However, the prediction of Kalman filter always show 0,0 in first time. So that, it will draw line from bottom to 0,0. How to make path(Kalman filter) more similar to actual path?
The following code and image was updated.
import cv2
import matplotlib.pyplot as plt
import numpy as np
img = cv2.imread('IMG_4614.jpg',1)
img = cv2.resize(img, (600, 800))
hsv_image = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
low_yellow = np.array([18, 94, 140])
up_yellow = np.array([48, 255, 255])
hsv_mask = cv2.inRange(hsv_image, low_yellow, up_yellow)
hls_image = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)
low_yellow = np.array([0, 170, 24])
up_yellow = np.array([54, 255, 255])
hls_mask = cv2.inRange(hls_image, low_yellow, up_yellow)
mask = np.logical_or(hsv_mask,hls_mask)
offset = 100
height, width, _ = img.shape
previousPos = h
currentPos = h - offset
finalImg = img.copy()
is_first = True
initState = np.array([[np.float32(int(width/2))], [np.float32(h)]], np.float32)
last_measurement = current_measurement = initState
last_prediction = current_prediction = np.array((2, 1), np.float32)
kalman = cv2.KalmanFilter(4, 2)
kalman.measurementMatrix = np.array([[1, 0, 0, 0], [0, 1, 0, 0]], np.float32)
kalman.transitionMatrix = np.array([[1, 0, 1, 0], [0, 1, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1]], np.float32)
while currentPos >= 0:
histogram = np.sum(mask[currentPos:previousPos,:], axis=0)
areas = np.where(histogram > 40)
if areas[0].size >= 2:
bottomLeft = areas[0][0]
topRight = areas[0][-1]
x = int((topRight-bottomLeft) / 2 + bottomLeft)
y = int((previousPos - currentPos) / 2 + currentPos)
last_prediction = current_prediction
last_measurement = current_measurement
current_measurement = np.array([[np.float32(x)], [np.float32(y)]], np.float32)
lmx, lmy = last_measurement[0], last_measurement[1]
cmx, cmy = current_measurement[0], current_measurement[1]
cv2.rectangle(finalImg, (bottomLeft,previousPos), (topRight,currentPos), (0,255,0), 5)
cv2.circle(finalImg,(x,y), 5, (0,0,255), -1)
cv2.line(finalImg, (lmx, lmy), (cmx, cmy), (255, 0, 0),5) #actual path
kalman.correct(current_measurement-initState)
current_prediction = kalman.predict()
lpx, lpy = last_prediction[0] + initState[0], last_prediction[1] + initState[1]
cpx, cpy = current_prediction[0] + initState[0], current_prediction[1] + initState[1]
cv2.line(finalImg, (lpx, lpy), (cpx, cpy), (255, 0, 255),5) # predict path
plt.figure(figsize=(10,10))
plt.imshow(cv2.cvtColor(finalImg, cv2.COLOR_BGR2RGB))
plt.show()
previousPos = currentPos
currentPos = currentPos - offset
This has already been answered here:
Kalman filter always predicting origin
OpenCV Kalman filter implementation does not let you set the an initial state.
You have to save your initial state and then when you call kalman.correct you have to subtract the initial state. And when you call kalman.predict you have to add your initial state.
Something like this pseudo-code:
initialState = (y,x)
....
kalman.correct(current_measurement - initialState)
...
prediction = kalman.predict()
prediction[0] = prediction[0] + initState[0]
prediction[1] = prediction[1] + initState[1]
I managed to change the initial state by changing statePost and statePre.
In init:
self.KF = cv2.KalmanFilter(nmbStateVars, nmbMeasts, nmbControlInputs)
A = self.KF.statePost
A[0:4] = self.measurement.reshape((4, 1))
# A[4:8] = 0.0
self.KF.statePost = A
self.KF.statePre = A
Then update as usual
self.updatedMeasts = self.KF.correct(self.measurement)
I've resized my image to newX, newY. Prior to resizing I had a point (x,y). Now that I've resized my image I'd like to know where the point is on the new image. Sounds simple but I'm bad at math. Any ideas?
It is just a matter of ratios:
On the x-axis, you have resized by a ratio Rx = newX/oldX, and by a ratio Ry = newY/oldY on the y-axis.
Therefore, your new coordinates for point (x,y) are (Rx * x, Ry * y).
from heatmappy import Heatmapper
from PIL import Image
import cv2
import numpy as np
from PIL import ImageFont
from PIL import ImageDraw
def generate_heatmap_data_list(coord_list):
cumulative_data_list = []
for lsingledata in coord_list:
temp = []
for x in range(lsingledata[0][0],lsingledata[1][0]):
for y in range(lsingledata[0][1],lsingledata[1][1]):
temp.append([x,y])
data = [temp[i] for i in range(len(temp)) if (i%250)<lsingledata[2]]
cumulative_data_list += data
return cumulative_data_list
coord = [[[774,265],[909,409],1],[[985,809],[1139,992],5],[[514,842],[803,1024],10],[[127,629],[283, 869],20],[[258,442],[429, 584],30],
[[827,851],[980,1033],40],[[343,611],[514,753],1],[[500,358],[595,409],50],[[163,879],[334,999],15]]
data = generate_heatmap_data_list(coord)
example_img_path = r"C:\Workarea\heatmap\code_testing_pyheatmap\blue_print.jpg"
example_img = Image.open(example_img_path)
print("###", type(example_img))
width, height = example_img.size
original_dim = (width,height)
##resize_dim to plot the heatmap size
resize_dim = (1237,1036)
example_img = example_img.resize(resize_dim)
new_point_list = []
for lsingle_point in data:
x1 = int((lsingle_point[0] * (resize_dim[0] / original_dim[0])))
y1 = int((lsingle_point[1] * (resize_dim[1] / original_dim[1])))
new_point_list.append([x1,y1])
heatmapper = Heatmapper()
heatmap = heatmapper.heatmap_on_img(new_point_list, example_img)
print("PIL_type: ", type(heatmap))
heatmap.save('temp.png')
######if you want to plot percentage on image
img = cv2.imread('temp.png')
print("cv2_type:", type(img))
img = cv2.putText(img, '1%', (803,341), cv2.FONT_HERSHEY_SIMPLEX,1, (0, 0, 0), 2, cv2.LINE_AA)##FRANGRANCE
img = cv2.putText(img, '5%', (1027,919), cv2.FONT_HERSHEY_SIMPLEX,1, (0, 0, 0), 2, cv2.LINE_AA)##COSMETICS
img = cv2.putText(img, '10%', (661,977), cv2.FONT_HERSHEY_SIMPLEX,1, (0, 0, 0), 2, cv2.LINE_AA)##HONEY
img = cv2.putText(img, '20%', (209,765), cv2.FONT_HERSHEY_SIMPLEX,1, (0, 0, 0), 2, cv2.LINE_AA)##AJILE
img = cv2.putText(img, '30%', (337,539), cv2.FONT_HERSHEY_SIMPLEX,1, (0, 0, 0), 2, cv2.LINE_AA)##ANNABELLE
img = cv2.putText(img, '40%', (909,953), cv2.FONT_HERSHEY_SIMPLEX,1, (0, 0, 0), 2, cv2.LINE_AA)##SUNGLASSES
img = cv2.putText(img, '1%', (423,707), cv2.FONT_HERSHEY_SIMPLEX,1, (0, 0, 0), 2, cv2.LINE_AA)##VANHEUSEN
img = cv2.putText(img, '50%', (539,405), cv2.FONT_HERSHEY_SIMPLEX,1, (0, 0, 0), 2, cv2.LINE_AA)##JALLUS
img = cv2.putText(img, '15%', (231,961), cv2.FONT_HERSHEY_SIMPLEX,1, (0, 0, 0), 2, cv2.LINE_AA)##DENIM
cv2.imwrite("put_text_03_01_2022_heatmap.jpg", img)