I am trying to recognize the number on the 7 segment display.
I am using python on Jupyter notebook .
I have the 0~9 7 segment displayed number image,
and each number with . is saved seperately.
Below is the sample image of 3 ,3. ,2 , 2.
and I want to find these image on the target image.
I heard there are tools to find similar image on OpenCV.
I tried Brute-Force Matching with SIFT Descriptors and Ratio Test
but the output does not seem accurate.
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
img1 = cv2.imread('C:\\Users\\USER\\Desktop\\test\\deeplearningimage\\thermo\\3..png',cv2.IMREAD_GRAYSCALE) # trainImage
img2 = cv2.imread('C:\\Users\\USER\\Desktop\\test\\thermosample.jpg',cv2.IMREAD_GRAYSCALE) # queryImage
# Initiate SIFT detector
sift = cv.SIFT_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1,None)
kp2, des2 = sift.detectAndCompute(img2,None)
# BFMatcher with default params
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1,des2,k=2)
# Apply ratio test
good = []
for m,n in matches:
if m.distance < 0.75*n.distance:
good.append([m])
# cv.drawMatchesKnn expects list of lists as matches.
img3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,good,None,flags=cv.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
plt.imshow(img3),plt.show()'
here is the output of the code above
Not sure how to proceed with this.
Any other opencv that would work for this problem?
You can use template matching after thresholding, and edge detection
import numpy as np
import matplotlib.pyplot as plt
import cv2
# Read Image
BGR = cv2.imread('input.jpg')
RGB = cv2.cvtColor(BGR, cv2.COLOR_BGR2RGB)
# Channels split
R = BGR[...,2]
G = BGR[...,1]
B = BGR[...,0]
# Threshold per channel
R[B>120] = 0
R[G>120] = 0
R[R<230] = 0
# Binarize
Binary = cv2.threshold(R, 127, 255, cv2.THRESH_BINARY)[1]
# Edge Detection
Edges = cv2.Canny(Binary, 50, 200)
# Read Template
templBGR = cv2.imread('templ.png')
templRGB = cv2.cvtColor(templBGR, cv2.COLOR_BGR2RGB)
templateGray = cv2.cvtColor(templBGR, cv2.COLOR_BGR2GRAY)
# Binarize Template
templateBinary = cv2.threshold(templateGray, 84, 255, cv2.THRESH_BINARY)[1]
# Denoise Template
templateFiltered = cv2.medianBlur(templateBinary,7)
# Resize Template
template = cv2.resize(templateFiltered, (templBGR.shape[1]//2, templBGR.shape[0]//2))
# Edge Detection Template
templateEdges = cv2.Canny(template, 50, 200)
# Extract Dimensions
h, w = template.shape
res = cv2.matchTemplate(Edges,templateEdges,cv2.TM_CCORR)
(_, _, _, maxLoc) = cv2.minMaxLoc(res)
img = RGB.copy()
cv2.rectangle(img, (maxLoc[0], maxLoc[1]), (maxLoc[0] + w, maxLoc[1] + h), (255,255,128), 2)
plt.subplot(221)
plt.imshow(RGB)
plt.title('Original')
plt.axis('off')
plt.subplot(222)
plt.imshow(Edges, cmap='gray')
plt.title('Segmented')
plt.axis('off')
plt.subplot(223)
plt.imshow(templRGB)
plt.title('Template')
plt.axis('off')
plt.subplot(224)
plt.imshow(img)
plt.title('Result')
plt.axis('off')
plt.show()
if you want to do multi-matching better to use a loop:
threshold = 0.8
Loc = np.where( res >= threshold)
for pt in zip(*Loc):
cv2.rectangle(img, (Loc[0], Loc[1]), (Loc[0] + w, Loc[1] + h), (255,255,128), 2)
Related
I used K-Means Clustering to perform segmentation on this traffic sign as shown below.
These are my code
Read image and blur
img = cv.imread('000_0001.png')
img_rgb = cv.cvtColor(img, cv.COLOR_BGR2RGB)
kernel_size = 5
img_rgb = cv.blur(img_rgb, (kernel_size, kernel_size))
# reshape
img_reshape = img_rgb.reshape((-1, 3))
img_reshape = np.float32(img_reshape)
Perform k-means clustering
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 10, 1.0)
K = 4
attempts = 10
ret, label, center = cv.kmeans(img_reshape, K, None, criteria, attempts, cv.KMEANS_PP_CENTERS)
# reshape into original dimensions
center = np.uint8(center)
res = center[label.flatten()]
result = res.reshape(img_rgb.shape)
plt.figure(figsize = (15, 15))
plt.subplot(1, 2, 1)
plt.imshow(img_rgb)
plt.subplot(1, 2, 2)
plt.imshow(result)
plt.show()
create masks
from numpy import linalg as LN
red = (255, 0, 0)
Idx_min_red = np.argmin(LN.norm(center-red, axis = 1))
white = (255, 255, 255)
Idx_min_white = np.argmin(LN.norm(center-white, axis = 1))
black = (0, 0, 0)
Idx_min_black = np.argmin(LN.norm(center-black, axis = 1))
mask_red = result == center[Idx_min_red]
mask_white = result == center[Idx_min_white]
mask_black = result == center[Idx_min_black]
pre_mask = cv.bitwise_or(np.float32(mask_red), np.float32(mask_white))
mask = cv.bitwise_or(np.float32(pre_mask), np.float32(mask_black))
Segment the image
seg_img = img*(mask.astype("uint8"))
Morphological Transformation
kernel = np.ones((5, 5), dtype = np.uint8)
img_dilate = cv.morphologyEx(seg_img, cv.MORPH_OPEN, kernel)
res = np.hstack((img, img_dilate))
cv.imshow("res", res)
cv.waitKey(0)
cv.destroyAllWindows()
Question here
These codes can only segment red traffic signs, is it possible to tweak a little bit on different colours so that it can segment red, blue and yellow traffic signs? (like the one below for example)
Update, this is what I have tried:
I used a pipeline to do OR operation on all the masks
mask = mask_red | mask_white | mask_black | mask_blue
but then the new mask will fail to segment the image
Rather than K means, I'd suggest a connected components based approach to find this. These signs and symbols have relatively uniform color areas that take a fairly significant part of the image, that would result in large connected components. You can later write downstream logic to select the relevant connected components to define your sign and create a segmented image from them.
import cv2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
colors = (np.array(plt.cm.tab10.colors) * 255).astype('int')
im1 = cv2.imread('37Gu6.png')
im2 = cv2.imread('dU47J.png')
for i in [im1, im2]:
gray = cv2.cvtColor(i, cv2.COLOR_BGR2GRAY)
# Calculate image value histogram to get an adaptive threshold value based
hist = np.histogram(gray.ravel(), bins=25)
modeIdx = np.where(hist[0] == hist[0].max())[0][0]
modeVal = hist[1][modeIdx]
tVal = modeVal * 1.15 # Our threshold is 115% of the most common hist value
_, thresh = cv2.threshold(gray, tVal, 255, cv2.THRESH_BINARY)
# Find connected componenents
output = cv2.connectedComponentsWithStats(thresh, 8, cv2.CV_32S)
(numLabels, labels, stats, centroids) = output
# The 5th column in the CC stats is the area
# Find the indices of CCs with above average area
CCArea = stats[:,4]
aboveAvgLbls = np.where(CCArea > CCArea.mean())[0]
# Generate labeled output image
outputImg = np.zeros(i.shape, dtype="uint8")
for l, c in zip(aboveAvgLbls, colors):
w = np.where(labels == l)
outputImg[ w[0], w[1] ] = c
f, ax = plt.subplots(1,4,figsize=(20,5))
for a, img, t in zip(ax, [i, gray, thresh, outputImg], ['RGB', 'Gray', f'Thresh [t={tVal}]', 'Labels']):
a.imshow(img[:,:,::-1] if img.shape[-1] == 3 else img, 'gray')
a.set_title(t)
plt.show()
I'm new to deep learning and image processing but I've implemented the following code for splitting Arabic characters, but my code does not recognize characters and it only finds the whole word.
this is the whole word that is detected by the code
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
%matplotlib inline
image = cv.imread('hamd.png')
height, width, depth = image.shape
print(image.shape)
image = cv.resize(image, dsize=(width*5, height*4), interpolation=cv.INTER_CUBIC)
print(image.shape)
gray = cv.cvtColor(image,cv.COLOR_BGR2GRAY)
ret,thresh = cv.threshold(gray,127,255,cv.THRESH_BINARY_INV)
kernel = np.ones((5,5), np.uint8)
img_dilation = cv.dilate(thresh, kernel, iterations=1)
gsblur=cv.GaussianBlur(img_dilation,(5,5),0)
ctrs, hier = cv.findContours(gsblur.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
m = list()
sorted_ctrs = sorted(ctrs, key=lambda ctr: cv.boundingRect(ctr)[0])
pchl = list()
dp = image.copy()
for i, ctr in enumerate(sorted_ctrs):
x, y, w, h = cv.boundingRect(ctr)
cv.rectangle(dp,(x-10,y-10),( x + w + 10, y + h + 10 ),(90,0,255),9)
plt.imshow(dp)
now i wondered how can i split the character on the distances between two chars? what i mean looks like the image below:
this is what i want
I searched for image recognition using python. It seems there is no tutorial about Extracting Numbers from colored background so I followed THIS TUTORIAL
import cv2
import matplotlib.pyplot as plt
def detect_edge(image):
''' function Detecting Edges '''
image_with_edges = cv2.Canny(image , 100, 200)
images = [image , image_with_edges]
location = [121, 122]
for loc, img in zip(location, images):
plt.subplot(loc)
plt.imshow(img, cmap='gray')
plt.savefig('edge.png')
plt.show()
image = cv2.imread('myscreenshot.png', 0)
detect_edge(image)
This is my image:
This is the result:
Any solution to print out these numbers?
Here is some code for getting clean canny edges for this image.
import cv2
import numpy as np
# load image
img = cv2.imread("numbers.png");
# change to hue colorspace
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV);
h,s,v = cv2.split(hsv);
# use clahe to improve contrast
# (the contrast is pretty good already, so not much change, but good habit to have here)
clahe = cv2.createCLAHE(clipLimit = 10)
contrast = clahe.apply(v);
# use canny
canny = cv2.Canny(contrast, 20, 110);
# show
cv2.imshow('i', img);
cv2.imshow('v', v);
cv2.imshow('c', contrast);
cv2.imshow("canny", canny);
cv2.waitKey(0);
# save
cv2.imwrite("edges.png", canny);
Without using any OCR like pytesseract or something, I don't see an obvious way to be able to consistently turn this image into "text" numbers. I'll leave that for someone else who might know how to solve that without any pattern recognition stuff because I don't even know where to begin without that. If you're willing to forgo that restriction then pytessaract should have no problem with this; possibly even without doing processing like this.
Ok, I filled in the numbers for the image. OpenCV's findContours' hierarchy wasn't cooperating for some reason so I had to manually do it which makes this code pretty janky. Honestly, if I were to try this again from scratch, I'd try to find colors that contribute to a small number of total pixels and threshold on each and combine the masks.
import cv2
import numpy as np
# check if small box is in big box
def contained(big, small):
# big corners
x,y,w,h = big;
big_tl = [x, y];
big_br = [x+w, y+h];
# small corners
x,y,w,h = small;
small_tl = [x, y];
small_br = [x+w, y+h];
# check
if small_tl[0] > big_tl[0] and small_br[0] < big_br[0]:
if small_tl[1] > big_tl[1] and small_br[1] < big_br[1]:
return True;
return False;
# load image
img = cv2.imread("numbers.png");
# change to hue colorspace
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV);
h,s,v = cv2.split(hsv);
# use clahe to improve contrast
# (the contrast is pretty good already, so not much change, but good habit to have here)
clahe = cv2.createCLAHE(clipLimit = 10)
contrast = clahe.apply(v);
# rescale
scale = 2.0;
h, w = img.shape[:2];
h = int(h * scale);
w = int(w * scale);
contrast = cv2.resize(contrast, (w,h), cv2.INTER_LINEAR);
img = cv2.resize(img, (w,h), cv2.INTER_LINEAR);
# use canny
canny = cv2.Canny(contrast, 10, 60);
# show
cv2.imshow('i', img);
cv2.imshow('v', v);
cv2.imshow('c', contrast);
cv2.imshow("canny", canny);
cv2.waitKey(0);
# try to fill in contours
# contours
_, contours, hierarchy = cv2.findContours(canny, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE);
# filter contours by size
# filter out noisy bits and the big grid boxes
filtered = [];
for contour in contours:
perimeter = cv2.arcLength(contour, True);
if 50 < perimeter and perimeter < 750:
filtered.append(contour);
# draw contours again
# create a mask of the contoured image
mask = np.zeros_like(contrast);
mask = cv2.drawContours(mask, filtered, -1, 255, -1);
# close to get rid of annoying little gaps
kernel = np.ones((3,3),np.uint8)
mask = cv2.dilate(mask,kernel,iterations = 1);
mask = cv2.erode(mask,kernel, iterations = 1);
# contours
_, contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE);
# alright, hierarchy is being stupid, plan B
# SUUUUUPEEERRR JAAAANK
outer_cntrs = [a for a in range(len(contours))];
children = [];
for a in range(len(contours)):
if a in outer_cntrs:
# get current box
big_box = cv2.boundingRect(contours[a]);
# check against all other boxes
for b in range(0, len(contours)):
if b in outer_cntrs:
small_box = cv2.boundingRect(contours[b]);
# remove any children
if contained(big_box, small_box):
outer_cntrs.remove(b);
children.append(contours[b]);
# # select by hierarchy
top_cntrs = [];
for a in range(len(contours)):
if a in outer_cntrs:
top_cntrs.append(contours[a]);
# create a mask of the contoured image
mask = np.zeros_like(contrast);
mask = cv2.drawContours(mask, top_cntrs, -1, 255, -1);
mask = cv2.drawContours(mask, children, -1, 255, -1);
# close
kernel = np.ones((3,3),np.uint8)
mask = cv2.dilate(mask,kernel,iterations = 1);
mask = cv2.erode(mask,kernel, iterations = 1);
# do contours agains because opencv is being super difficult
# honestly, at this point, a fill method would've been better
# contours
_, contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE);
# fill in
for con in contours:
cv2.fillPoly(mask, pts = [con], color=(255));
for con in children:
cv2.fillPoly(mask, pts = [con], color=(0));
# resize back down
h, w = mask.shape;
h = int(h / scale);
w = int(w / scale);
mask = cv2.resize(mask, (w,h));
# show
cv2.imshow("mask", mask);
cv2.waitKey(0);
# save
cv2.imwrite("filled.png", mask);
You can find the digits in three-steps
Applying Adaptive-threshold
Applying erosion
Read using pytesseract
Adaptive-threshold result:
Here we see 9 and 0 is different from rest of the digits. We need to remove the boundaries of the 9.
Erosion result:
Pytesseract result:
8 | 1
5 9
4 #
3 | 3
6 | 1
There are multiple page-segmentation-modes are available for pytesseract
If you want to remove | from the output you can use re.sub
text = re.sub('[^A-Za-z0-9]+', ',', text)
Result will be:
8
1
5
9
4
3
3
6
1
Code:
import cv2
import pytesseract
import re
import numpy as np
image = cv2.imread("7UUGYHw.png")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 13, 2)
erode = cv2.erode(thresh, np.array((7, 7)), iterations=1)
text = pytesseract.image_to_string(erode, config="--psm 6")
text = re.sub('[^A-Za-z0-9]+', '\n', text)
print(text)
I would like to change the background of this image to a white background in python. I tried canny edge detection, but it has a hard time finding the edges at the top of the product as you can see in the second picture. I tried different thresholds, but it would result in more background being not white. This is probably due to the fact that the top of the product in the image has almost the same color as the background.
Is there a method to detect a small difference like that? I also tried it with a green screen behind the product, but because of the reflective state of the product, the product turns green.
Here is my code:
import cv2
import numpy as np
from skimage import filters
import matplotlib.pyplot as plt
from os import listdir
from os.path import isfile, join
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
plt.rcParams['figure.dpi'] = 200
#== Parameters =======================================================================
BLUR = 15
CANNY_THRESH_1 = 10
CANNY_THRESH_2 = 255
MASK_DILATE_ITER = 10
MASK_ERODE_ITER = 10
MASK_COLOR = (1.0,1.0,1.0) # In BGR format
#== Processing =======================================================================
mypath = "./images"
images = [f for f in listdir(mypath) if isfile(join(mypath, f))]
#-- Read image -----------------------------------------------------------------------
for image in images:
img_loc = mypath + "/" + image
img = cv2.imread(img_loc)
# threshold
img_thresh = img
thresh = 180
img_thresh[ img_thresh >= thresh ] = 255
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#-- Edge detection -------------------------------------------------------------------
edges = cv2.Canny(gray, CANNY_THRESH_1, CANNY_THRESH_2)
edges = cv2.dilate(edges, None)
edges = cv2.erode(edges, None)
#-- Find contours in edges, sort by area ---------------------------------------------
contour_info = []
contours, _ = cv2.findContours(edges, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
for c in contours:
contour_info.append((
c,
cv2.isContourConvex(c),
cv2.contourArea(c),
))
contour_info = sorted(contour_info, key=lambda c: c[2], reverse=True)
max_contour = contour_info[0]
mask = np.zeros(edges.shape)
cv2.fillConvexPoly(mask, max_contour[0], (255))
#-- Smooth mask, then blur it --------------------------------------------------------
mask = cv2.dilate(mask, None, iterations=MASK_DILATE_ITER)
mask = cv2.erode(mask, None, iterations=MASK_ERODE_ITER)
mask = cv2.GaussianBlur(mask, (BLUR, BLUR), 0)
mask_stack = np.dstack([mask]*3) # Create 3-channel alpha mask
#-- Blend masked img into MASK_COLOR background --------------------------------------
mask_stack = mask_stack.astype('float32') / 255.0 # Use float matrices,
img = img.astype('float32') / 255.0 # for easy blending
masked = (mask_stack * img) + ((1-mask_stack) * MASK_COLOR) # Blend
masked = (masked * 255).astype('uint8') # Convert back to 8-bit
result_dir = "./results/" + image
cv2.imwrite(result_dir, masked) # Save
I am trying to build verification system which can point out the difference between two scanned leaflets.
I blurred both images, found homography to align both images and then found difference between two images to draw the rectangles.
Standard leaflet: https://ibb.co/GWjKW1x
Sample for checking: https://ibb.co/pv3Crxm
import cv2
import numpy as np
from skimage.measure import compare_ssim as ssim
from scipy import ndimage
#MAX_MATCHES = 1000000 #MSE for labels
MAX_MATCHES = 50000 #MSE for text
GOOD_MATCH_PERCENT = 0.4
def mse(imageA, imageB):
# the 'Mean Squared Error' between the two images is the
# sum of the squared difference between the two images;
# NOTE: the two images must have the same dimension
err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2)
err /= float(imageA.shape[0] * imageA.shape[1])
return err
# return the MSE, the lower the error, the more "similar"
# the two images are
def diff_remove_bg(img0, img, img1):
d1 = diff(img0, img)
d2 = diff(img, img1)
return cv2.bitwise_and(d1, d2)
def alignImages(im1, im2):
# Convert images to grayscale
im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
im2Gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
# Detect ORB features and compute descriptors.
orb = cv2.ORB_create(MAX_MATCHES)
keypoints1, descriptors1 = orb.detectAndCompute(im1Gray, None)
keypoints2, descriptors2 = orb.detectAndCompute(im2Gray, None)
# Match features.
matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
matches = matcher.match(descriptors1, descriptors2, None)
# Sort matches by score
matches.sort(key=lambda x: x.distance, reverse=False)
# Remove not so good matches
numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
matches = matches[:numGoodMatches]
# Draw top matches
imMatches = cv2.drawMatches(im1, keypoints1, im2, keypoints2, matches, None)
cv2.imwrite("matches.png", imMatches)
# Extract location of good matches
points1 = np.zeros((len(matches), 2), dtype=np.float32)
points2 = np.zeros((len(matches), 2), dtype=np.float32)
for i, match in enumerate(matches):
points1[i, :] = keypoints1[match.queryIdx].pt
points2[i, :] = keypoints2[match.trainIdx].pt
# Find homography
h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
# Use homography
height, width, channels = im2.shape
im1Reg = cv2.warpPerspective(im1, h, (width, height))
return im1Reg, h
# Read reference image
refFilename = "standard.png"
print("Reading reference image : ", refFilename)
imReference = cv2.imread(refFilename, cv2.IMREAD_COLOR)
imReference = ndimage.gaussian_filter(imReference, 2)
imFilename = "sample-fin.png"
print("Reading image to align : ", imFilename);
im = cv2.imread(imFilename, cv2.IMREAD_COLOR)
im = ndimage.gaussian_filter(im, 2)
print("Aligning images ...")
# Registered image will be resotred in imReg.
# The estimated homography will be stored in h.
imReg, h = alignImages(im, imReference)
# Write aligned image to disk.
outFilename = "aligned.png"
print("Saving aligned image : ", outFilename);
cv2.imwrite(outFilename, imReg)
# Print estimated homography
print("Estimated homography : \n", h)
x1 = imReg
x2 = imReference
absdiff = cv2.absdiff(x1, x2)
print(np.max(absdiff))
#Threshold for minimum detection
threshold = absdiff < 30
absdiff[threshold] = 0
cv2.imwrite("absdiff.png", absdiff)
diff = cv2.subtract(x1, x2)
result = not np.any(diff)
########### Drawing Rectangles around the differences ##############
img = cv2.pyrDown(cv2.imread('absdiff.png', cv2.IMREAD_UNCHANGED))
img1 = cv2.pyrDown(cv2.imread('aligned.png', cv2.IMREAD_UNCHANGED))
ret, threshed_img = cv2.threshold(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY),
40, 40, cv2.THRESH_BINARY)
_,contours, hier = cv2.findContours(threshed_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for c in contours:
x,y,w,h = cv2.boundingRect(c)
cv2.rectangle(img1, (x-5, y-5), (x+w+10, y+h+10), (0, 255, 0), 2)
# get the min area rect
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
# convert all coordinates floating point values to int
box = np.int0(box)
print(len(contours))
cv2.imwrite("cont-1.png", img1)
Difference image: https://ibb.co/NCj32qs
Pointed out errors: https://ibb.co/YWKxYLF
I want to improve the accuracy of this algorithm. Or is there some other way which can be used to solve such task?
Edit:
I want to remove extra rectangles that are present at the end of the leaflet. Also the homography is not perfect. I have to put a huge threshold on the difference to get this result.