I have a set of pictures (sample) of the same formatted code, I've tried every thing but nothing works well.
I tried blurring, hsv, threshing, etc.
can you help me out?
import pytesseract
import cv2
imgr = cv2.imread("a.png")
img = cv2.resize(imgr, (int(imgr.shape[1] * 3), int(imgr.shape[0] * 3)), interpolation=cv2.INTER_AREA)
img = cv2.blur(img, (7, 7))
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv)
cv2.imshow("", v)
cv2.waitKey(0)
p = pytesseract.image_to_string(v)
print(p)
thresh = cv2.threshold(v, 170, 255, cv2.THRESH_BINARY)[1]
cv2.imshow("", thresh)
cv2.waitKey(0)
print(pytesseract.image_to_string(thresh))
ation
below is a possible solution.
I felt that distortion was part of the problem. So I tried to "fix" that.
The result looks fine: the detection is successful.
Unfortunately, since you give only one sample, I have no way to figure out if this will work on the other ones... (probably not...) Nevertheless, you can give it a try.
Best regards,
Stéphane
Note: I use tesseract-5.0.0-alpha with the tessdata_best dataset.
Here is the output from the console:
Regression parameters for the second-degree polynomial:
[ 2.33735101e-04 -1.92211992e-01 2.43573673e+02]
=============================
Rectified image
RESULT: EG01-012R210126024
=============================
================================================
Test on the non rectified image
with the same blur, erode, threshold and
tesseract parameters
RESULT: EGO1-012R2101269
================================================
Press any key on an opened opencv window to close
And below is the program:
# Standard imports
import cv2
import numpy as np
from matplotlib import pyplot as plt
import pytesseract
pytesseract.pytesseract.tesseract_cmd = r'/usr/local/bin/tesseract'
# Read image
imgr = cv2.imread("a.png")
# Resizing, converting...
factor=3
imgr = cv2.resize(imgr, (int(imgr.shape[1]*factor ), int(imgr.shape[0]*factor)), interpolation=cv2.INTER_AREA)
# First detection in order to crop the image
# We want a detection. Not important if result is bad.
strings=pytesseract.image_to_data(imgr, lang = 'eng', config='--psm 11 --oem 3 -c tessedit_char_whitelist=ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-')
strings=strings.split('\n')
for line in strings[2:]:
s=line.split('\t')
if len(s[11])>0:
xmin=int(s[6])
break
## We crop the image to keep the interesting part...
imgr=imgr[:,np.max([0,xmin-imgr.shape[1]//10]):,:]
cv2.imshow("Cropped image",imgr)
hsv = cv2.cvtColor(imgr, cv2.COLOR_BGR2HSV)
h0, s0, Im0 = cv2.split(hsv)
w=Im0.shape[1] # From now, this is the image we will work on.
h=Im0.shape[0]
# Blob image to compute the image distortion
blob=cv2.blur(Im0,(w//3,1))
blob=cv2.normalize(blob,None,0,255,cv2.NORM_MINMAX)
blob=cv2.threshold(blob,170,255,cv2.THRESH_BINARY)[1]
cv2.imshow("Blob image",blob)
x=[]
y=[]
for i in range(w):
for j in range(h):
if blob[j,i]==0:
x.append(i)
y.append(j)
x=np.array(x)
y=np.array(y)
model = np.polyfit(x,y, 2)
print("Regression parameters for the second-degree polynomial: ")
print(model)
plt.plot(x,y,'x')
X=np.linspace(0,w)
plt.plot(X,X*X*model[0]+X*model[1]+model[2])
Ymean=np.mean(X*X*model[0]+X*model[1]+model[2])
# Remapping the cropped image with the found model parameters
map_x = np.zeros((Im0.shape[0], Im0.shape[1]), dtype=np.float32)
map_y = np.zeros((Im0.shape[0], Im0.shape[1]), dtype=np.float32)
for i in range(w):
for j in range(h):
map_x[j,i]=i
map_y[j,i]=j+i*i*model[0]+i*model[1]+model[2]-Ymean
Im1=cv2.remap(Im0, map_x, map_y, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE)
# Actual detection on the rectified image: Im1
Im1=cv2.normalize(Im1,None,0,255,cv2.NORM_MINMAX)
blur_radius=8
threshold=120
Im1= cv2.blur(Im1, (blur_radius,blur_radius))
kernel = np.ones((4,4), np.uint8)
Im1=255-cv2.erode(255-Im1, kernel)#, cv2.BORDER_REPLICATE)
Im1=cv2.normalize(Im1,None,0,255,cv2.NORM_MINMAX)
Im1 = cv2.threshold(Im1, threshold, 255, cv2.THRESH_BINARY)[1]
cv2.imshow("Rectified image for text detection",Im1)
strings=pytesseract.image_to_string(Im1, lang = 'eng', config='--psm 11 --oem 3 -c tessedit_char_whitelist=ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-')
strings=strings.split()
strings=max(strings,key=len)
print('=============================')
print("Rectified image")
print('RESULT: ',strings)
print('=============================')
# For comparison: detection on the non rectified image
# using the same parameters:
Im2 = Im0 # whithout remapping
Im2 = cv2.normalize(Im2,None,0,255,cv2.NORM_MINMAX)
Im2 = cv2.blur(Im2, (blur_radius,blur_radius))
Im2 = 255-cv2.erode(255-Im2, kernel)#, cv2.BORDER_REPLICATE)
Im2 = cv2.normalize(Im2,None,0,255,cv2.NORM_MINMAX)
Im2 = cv2.threshold(Im2, threshold, 255, cv2.THRESH_BINARY)[1]
strings=pytesseract.image_to_string(Im2, lang = 'eng', config='--psm 11 --oem 3 -c tessedit_char_whitelist=ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-')
strings=strings.split()
strings=max(strings,key=len)
print('================================================')
print("Test on the non rectified image")
print("with the same blur, erode, threshold and")
print("tesseract parameters")
print('RESULT: ',strings)
print('================================================')
cv2.imshow("Unrectified image for text detection",Im2)
# Close windows
print("Press any key on an opened opencv window to close")
cv2.waitKey()
plt.close()
cv2.destroyAllWindows()
import cv2
import numpy as np
import pytesseract
from PIL import Image, ImageStat
# Load image
image = cv2.imread('a.png')
img=image.copy()
# Remove border
kernel_vertical = cv2.getStructuringElement(cv2.MORPH_RECT, (1,50))
temp1 = 255 - cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel_vertical)
horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (50,1))
temp2 = 255 - cv2.morphologyEx(image, cv2.MORPH_CLOSE, horizontal_kernel)
temp3 = cv2.add(temp1, temp2)
result = cv2.add(temp3, image)
# Convert to grayscale and Otsu's threshold
gray = cv2.cvtColor(result, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray,(5,5),0)
_,thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_OTSU | cv2.THRESH_BINARY_INV)
kernel = np.ones((3,3), np.uint8)
dilated = cv2.dilate(thresh, kernel, iterations = 5)
# Find the biggest Contour (Where the words are)
contours, hierarchy = cv2.findContours(dilated,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
Reg = []
for j in range(len(contours)-1):
for i in range(len(contours)-1):
if len(contours[i+1])>len(contours[i]):
Reg = contours[i]
contours [i] = contours[i+1]
contours [i+1] = Reg
x, y, w, h = cv2.boundingRect(contours[0])
img_cut = np.zeros(shape=(h,w))
img_cut = gray[y:y+h, x:x+w]
img_cut = 255-img_cut
# Tesseract
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe'
print(pytesseract.image_to_string(img_cut, lang = 'eng', config='--psm 7 --oem 3 -c tessedit_char_whitelist=ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-'))
cv2.imwrite('generator.jpg',img_cut)
cv2.imshow('img', img_cut)
cv2.waitKey()
Tesseract recognition : EGO1-012R210124 (Actually it's unfavorable, I try my best.)
Related
First of all this is my original image which I try to detect the defects (parallel lines) on a brushed aluminium surface.
Here is the steps I take:
Gaussian Blur
Dilate the image
Converting the image to grayscale
Morph Close Operation
Dilate again
Difference of the image
Canny Edge Detection
Finding the contours
Drawing a green line around the contours
Here is my code:
import numpy as np
import cv2
from matplotlib import pyplot as plt
import imutils
path = ''
path_output = ''
img_bgr = cv2.imread(path)
plt.imshow(img_bgr)
# bgr to rgb
img_rgb = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)
plt.imshow(img_rgb)
# Converting to grayscale
img_just_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
# Displaying the grayscale image
plt.imshow(img_just_gray, cmap='gray')
# Gaussian Blur
ksize_w = 13
ksize_h = 13
img_first_gb = cv2.GaussianBlur(img_rgb, (ksize_w,ksize_h), 0, 0, cv2.BORDER_REPLICATE);
plt.imshow(img_first_gb)
# Dilate the image
dilated_img = cv2.dilate(img_first_gb, np.ones((11,11), np.uint8))
plt.imshow(dilated_img)
# Converting to grayscale
img_gray_operated = cv2.cvtColor(dilated_img, cv2.COLOR_BGR2GRAY)
# Displaying the grayscale image
plt.imshow(img_gray_operated, cmap='gray')
# closing:
kernel_closing = np.ones((7,7),np.uint8)
img_closing = cv2.morphologyEx(img_gray_operated, cv2.MORPH_CLOSE, kernel_closing)
plt.imshow(img_closing, cmap='gray')
# dilation:
# add pixels to the boundaries of objects in an image
kernel_dilation = np.ones((3,3),np.uint8)
img_dilation2 = cv2.dilate(img_closing, kernel_dilation, iterations = 1)
plt.imshow(img_dilation2, cmap='gray')
diff_img = 255 - cv2.absdiff(img_just_gray, img_dilation2)
plt.imshow(diff_img, cmap='gray')
# canny
edgesToFindImage = img_dilation2
v = np.median(img_just_gray)
#print(v)
sigma = 0.33
lower_thresh = int(max(0,(1.0-sigma)*v))
higher_thresh = int(min(255,(1.0+sigma)*v))
img_edges = cv2.Canny(edgesToFindImage, lower_thresh, higher_thresh)
plt.imshow(img_edges, cmap='gray')
kernel_dilation2 = np.ones((2,2),np.uint8)
img_dilation2 = cv2.dilate(img_edges, kernel_dilation, iterations = 2)
plt.imshow(img_dilation2, cmap='gray')
# find contours
contoursToFindImage = img_dilation2
(_, cnts, _) = cv2.findContours(contoursToFindImage.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
print(type(cnts))
print(len(cnts))
# -1 for all
cntsWhichOne = -1
# -1 for infill
# >0 for edge thickness
cntsInfillOrEdgeThickness = 3
img_drawing_contours_on_rgb_image = cv2.drawContours(img_rgb.copy(), cnts, cntsWhichOne, (0, 255, 0), cntsInfillOrEdgeThickness)
plt.imshow(img_drawing_contours_on_rgb_image)
and this is the result.
How can I improve this detection? Is there a more effective method to detect lines?
Here is one way in Python OpenCV. You are close, you should use adaptive thresholding, morphology to clean up the small regions and skip the canny edges.
Input:
import cv2
import numpy as np
# load image
img = cv2.imread('scratches.jpg')
# convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# adaptive threshold
thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, -35)
# apply morphology
kernel = np.ones((3,30),np.uint8)
morph = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
kernel = np.ones((3,35),np.uint8)
morph = cv2.morphologyEx(morph, cv2.MORPH_OPEN, kernel)
# get hough line segments
threshold = 25
minLineLength = 10
maxLineGap = 20
lines = cv2.HoughLinesP(morph, 1, 30*np.pi/360, threshold, minLineLength, maxLineGap)
# draw lines
linear1 = np.zeros_like(thresh)
linear2 = img.copy()
for [line] in lines:
x1 = line[0]
y1 = line[1]
x2 = line[2]
y2 = line[3]
cv2.line(linear1, (x1,y1), (x2,y2), 255, 1)
cv2.line(linear2, (x1,y1), (x2,y2), (0,0,255), 1)
print('number of lines:',len(lines))
# save resulting masked image
cv2.imwrite('scratches_thresh.jpg', thresh)
cv2.imwrite('scratches_morph.jpg', morph)
cv2.imwrite('scratches_lines1.jpg', linear1)
cv2.imwrite('scratches_lines2.jpg', linear2)
# display result
cv2.imshow("thresh", thresh)
cv2.imshow("morph", morph)
cv2.imshow("lines1", linear1)
cv2.imshow("lines2", linear2)
cv2.waitKey(0)
cv2.destroyAllWindows()
Threshold image:
Morphology cleaned image:
Lines on original image:
Lines on black background:
Pytesseract is unable to extract text when texts are present in different colors . I tried using opencv to invert the image but it doesn't work for dark text colors.
The image:
import cv2
import pytesseract
from PIL import Image
def text(image):
image = cv2.resize(image, (0, 0), fx=7, fy=7)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cv2.imwrite("gray.png", gray)
blur = cv2.GaussianBlur(gray, (3, 3), 0)
cv2.imwrite("gray_blur.png", blur)
thresh = cv2.threshold(blur, 127, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
cv2.imwrite("thresh.png", thresh)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=1)
cv2.imwrite("opening.png", opening)
invert = 255 - opening
cv2.imwrite("invert.png", invert)
data = pytesseract.image_to_string(invert, lang="eng", config="--psm 7")
return data
Is there a way to extract both the texts from the given image: DEADLINE(red) and WHITE HOUSE(white)
You can use ImageOps to invert the image.And binaryzate the Image.
import pytesseract
from PIL import Image,ImageOps
import numpy as np
img = Image.open("OCR.png").convert("L")
img = ImageOps.invert(img)
# img.show()
threshold = 240
table = []
pixelArray = img.load()
for y in range(img.size[1]): # binaryzate it
List = []
for x in range(img.size[0]):
if pixelArray[x,y] < threshold:
List.append(0)
else:
List.append(255)
table.append(List)
img = Image.fromarray(np.array(table)) # load the image from array.
# img.show()
print(pytesseract.image_to_string(img))
The result:
The img in the end like this:
This is the imageI am trying to give proper shape to the images in my folder but unable to get that perfect result. Following is one type of example:
Following is the coding that I have done for my folder containing this type of images:
''''code''''
import cv2
import numpy as np
import glob
path = r'C:\Users\User\Desktop\A\*.jpg'
def k_function(image,k):
z= image.reshape((-1,4))
z=np.float32(z)
criteria = (cv2.TERM_CRITERIA_EPS+cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
ret,label,center=cv2.kmeans(z,k,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS)
center = np.uint8(center)
res = center[label.flatten()]
res2 = res.reshape((image.shape))
return res2
def noise_function(image):
kernel = np.ones((2, 2), np.uint8)
closing = cv2.morphologyEx(image, cv2.MORPH_CLOSE,
kernel, iterations = 2)
bg = cv2.dilate(closing, kernel, iterations = 1)
dist_transform = cv2.distanceTransform(closing, cv2.DIST_L2, 0)
ret, fg = cv2.threshold(dist_transform, 0.02
* dist_transform.max(), 255, 0)
return fg
def filling(thresh):
im_floodfill = thresh.copy()
h, w = thresh.shape[:2]
mask = np.zeros((h+2, w+2), np.uint8)
cv2.floodFill(im_floodfill, mask,(60,60),255);
im_floodfill_inv = cv2.bitwise_not(im_floodfill)
n = thresh | im_floodfill_inv
return n
for i, img in enumerate(glob.glob(path)):
img1 = cv2.imread(img)
n = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
b= k_function(n,2)
nm, thresh1 = cv2.threshold(b, 127, 255, cv2.THRESH_BINARY_INV);
fill = filling(thresh1)
noise = noise_function(fill)
cv2.imwrite(r'C:\Users\User\Desktop\New folder\image{}.jpg'.format(i),noise)
Try using copyMakeBorder to make a border. It looks like you're trying to use floodFill and I've never figured out how that is supposed to work.
import cv2
image = cv2.imread('elbow.png')
image = cv2.copyMakeBorder(image, 10, 0, 0, 10, cv2.BORDER_CONSTANT)
cv2.imwrite('elbow_border.png', image)
elbow.png:
elbow_border.png:
I would approach it a bit differently in Python/OpenCV. I would convert to HSV and threshold the saturation channel. Then use morphology open to smooth outline.
Input (cropped from your post):
import cv2
# load image as HSV and select saturation
img = cv2.imread("finger.png")
sat = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)[:,:,1]
# threshold the saturation channel
ret, thresh = cv2.threshold(sat,25,255,0)
# apply morphology open to smooth the outline
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (19,19))
smoothed = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
# write result to disk
cv2.imwrite("finger_smoothed.png", smoothed)
cv2.imshow("SAT", sat)
cv2.imshow("THRESH", thresh)
cv2.imshow("SMOOTHED", smoothed)
cv2.waitKey(0)
cv2.destroyAllWindows()
Result:
I have an image
After my code runs,
The new image is
I need to find the line between them like this
How do I do?
My code
import numpy as np
import cv2
import cv2 as cv
ima = cv2.imread('track1.pNg')
imgray = cv2.cvtColor(ima,cv2.COLOR_BGR2GRAY)
im = cv2.cvtColor(ima,cv2.COLOR_BGR2GRAY)
imm = cv2.inRange(im,(0),(49))
kernel = np.ones((5,5),np.uint8)
gradient = cv2.morphologyEx(imm, cv2.MORPH_GRADIENT, kernel)
il = cv2.dilate(gradient, kernel, iterations=7)
ol = cv2.erode(il, kernel, iterations=7)
contours,hei = cv2.findContours(ol,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
img = cv2.drawContours(ima, contours, -1, (200,255,0), 3)
cv2.imshow('window',ima)
How can i achieve this?
This answer explains how to find a line that runs between two sides of a shape. The center can be found by iteratively eroding the image.
This is the result:
This is the code I used:
import cv2
import numpy as np
img = 255-cv2.imread('/home/stephen/Desktop/PITqe.png',0)
kernel = np.ones((20,20), np.uint8)
img = cv2.erode(img, kernel, iterations=2)
size = np.size(img)
skel = np.zeros(img.shape,np.uint8)
ret,img = cv2.threshold(img,127,255,0)
element = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3))
done = False
while( not done):
eroded = cv2.erode(img,element)
temp = cv2.dilate(eroded,element)
temp = cv2.subtract(img,temp)
skel = cv2.bitwise_or(skel,temp)
img = eroded.copy()
zeros = size - cv2.countNonZero(img)
cv2.imshow('img', img)
cv2.waitKey(100)
if zeros==size:
done = True
cv2.imshow("img",skel)
cv2.waitKey(0)
cv2.destroyAllWindows()
Here is another way to skeletonize in OpenCV (without explicitly iterating) by using distance transform and top hat morphology.
Input:
import cv2
import numpy as np
# read image and invert so blob is white on black background
img = 255-cv2.imread('tall_blob.png',0)
# do some eroding of img, but not too much
kernel = np.ones((20,20), np.uint8)
img = cv2.erode(img, kernel, iterations=2)
# threshold img
ret, thresh = cv2.threshold(img,127,255,0)
# do distance transform
dist = cv2.distanceTransform(thresh, distanceType=cv2.DIST_L2, maskSize=5)
# set up cross for tophat skeletonization
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3))
skeleton = cv2.morphologyEx(dist, cv2.MORPH_TOPHAT, kernel)
# threshold skeleton
ret, skeleton = cv2.threshold(skeleton,0,255,0)
# display skeleton
cv2.imshow("skeleton",skeleton)
cv2.waitKey(0)
cv2.destroyAllWindows()
# save results
cv2.imwrite('tall_blob_skeleton.png', skeleton)
I have an invoice image, and I want to detect the text on it. So I plan to use 2 steps: first is to identify the text areas, and then using OCR to recognize the text.
I am using OpenCV 3.0 in python for that. I am able to identify the text(including some non text areas) but I further want to identify text boxes from the image(also excluding the non-text areas).
My input image is:
And the output is:
I am using the below code for this:
img = cv2.imread('/home/mis/Text_Recognition/bill.jpg')
mser = cv2.MSER_create()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #Converting to GrayScale
gray_img = img.copy()
regions = mser.detectRegions(gray, None)
hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions]
cv2.polylines(gray_img, hulls, 1, (0, 0, 255), 2)
cv2.imwrite('/home/mis/Text_Recognition/amit.jpg', gray_img) #Saving
Now, I want to identify the text boxes, and remove/unidentify any non-text areas on the invoice. I am new to OpenCV and am a beginner in Python. I am able to find some examples in MATAB example and C++ example, but If I convert them to python, it will take a lot of time for me.
Is there any example with python using OpenCV, or can anyone help me with this?
Below is the code
# Import packages
import cv2
import numpy as np
#Create MSER object
mser = cv2.MSER_create()
#Your image path i-e receipt path
img = cv2.imread('/home/rafiullah/PycharmProjects/python-ocr-master/receipts/73.jpg')
#Convert to gray scale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
vis = img.copy()
#detect regions in gray scale image
regions, _ = mser.detectRegions(gray)
hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions]
cv2.polylines(vis, hulls, 1, (0, 255, 0))
cv2.imshow('img', vis)
cv2.waitKey(0)
mask = np.zeros((img.shape[0], img.shape[1], 1), dtype=np.uint8)
for contour in hulls:
cv2.drawContours(mask, [contour], -1, (255, 255, 255), -1)
#this is used to find only text regions, remaining are ignored
text_only = cv2.bitwise_and(img, img, mask=mask)
cv2.imshow("text only", text_only)
cv2.waitKey(0)
This is an old post, yet I'd like to contribute that if you are trying to extract all the texts out of an image, here is the code to get that text in an array.
import cv2
import numpy as np
import re
import pytesseract
from pytesseract import image_to_string
pytesseract.pytesseract.tesseract_cmd = r"C:\Program Files\Tesseract-OCR\tesseract.exe"
from PIL import Image
image_obj = Image.open("screenshot.png")
rgb = cv2.imread('screenshot.png')
small = cv2.cvtColor(rgb, cv2.COLOR_BGR2GRAY)
#threshold the image
_, bw = cv2.threshold(small, 0.0, 255.0, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
# get horizontal mask of large size since text are horizontal components
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (20, 1))
connected = cv2.morphologyEx(bw, cv2.MORPH_CLOSE, kernel)
# find all the contours
contours, hierarchy,=cv2.findContours(connected.copy(),cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
#Segment the text lines
counter=0
array_of_texts=[]
for idx in range(len(contours)):
x, y, w, h = cv2.boundingRect(contours[idx])
cropped_image = image_obj.crop((x-10, y, x+w+10, y+h ))
str_store = re.sub(r'([^\s\w]|_)+', '', image_to_string(cropped_image))
array_of_texts.append(str_store)
counter+=1
print(array_of_texts)