Generate noiseless binary image and skeleton from image - python

I have a overlapping filaments image. I am interested in generating the noiseless binary image and subsequently use it for skeleton generation. I have tried different ways to get skeleton but not able to succeed. Below find the code written in python for the same and attached skeleton image generated through it. It would be great if anyone help in solving issue.
Original vs Skeleton image:
import cv2
import numpy as np
from skimage import morphology, graph
from skan import Skeleton
from skimage.morphology import skeletonize
import matplotlib.pyplot as plt
img00 = cv2.imread(r'img_test.jpg')
img01 = cv2.cvtColor(img00, cv2.COLOR_BGR2GRAY)
cv2.imshow('1',img01)
img02 = cv2.adaptiveThreshold(img01,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,5,5)
cv2.imshow('2',img02)
i_size = min(np.size(img02,1),600) # image size for imshow
kernel = np.ones((2, 2), np.uint8) # Creating kernel
# Using cv2.erode() method
img_erosion = cv2.erode(img02, kernel, borderType = cv2.BORDER_REFLECT, iterations=1, borderValue = 1)
filterSize =(5,5)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, filterSize)
tophat_img = cv2.morphologyEx(img_erosion, cv2.MORPH_BLACKHAT, kernel)
img03 = cv2.bitwise_not(tophat_img)
cv2.imshow('3',img03)
kernel1 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(2,2))
img04 = cv2.morphologyEx(img03, cv2.MORPH_CLOSE, kernel1)
img04 = cv2.morphologyEx(img04, cv2.MORPH_OPEN, kernel1)
cv2.imshow('4',img04)
thresh = (img04/255).astype(np.uint8)
# skeleton based on default method
skeleton1 = skeletonize(thresh)
skeleton2 = (skeleton1*255).astype(np.uint8)
cv2.imshow('5',skeleton2)
# Avg diameter calculation
diameter = np.sum(thresh)/np.sum(skeleton1)
print('diameter',diameter)
cv2.waitKey(0) & 0xFF == ord('q')
cv2.destroyAllWindows()

Straight binarization followed by morphological closing can give interesting results, though they are sensitive to parameters.

Related

python OCR recognize image into text

seems resolution of image effect the output is success or not
usually the image's resolution/quality from production line is like test image 1, instead of change camera quality, is there any way to make success rate higher? like improve code make simple AI to help detect or something? I need a hand thanks.
the demo .py code I found from tutorial
from PIL import Image
import pytesseract
img = Image.open('new_003.png')
text = pytesseract.image_to_string(img, lang='eng')
print("size")
print(img.size)
print(text)
(pic) test image 1: https://ibb.co/VLsM9LL
size
(122, 119)
# the output is:
R carac7
(pic) test image 2: https://ibb.co/XyRcf45
size
(329, 249)
# the output is:
R1 oun,
2A
R ca7ac2
(pic) test image 3: https://ibb.co/fNtDRc7
this one just for test but is the only one 100% correct
size
(640, 640)
# the output is:
BREAKING THE STATUE
i have always known
i just didn't understand
the inner conflictions
arresting our hands
gravitating close enough
expansive distamce between
i couldn't give you more
but i meant everything
when the day comes
you find your heart
wants something more
than a viece and a part
your life will change
like astatue set free
to walk among us
to created estiny
we didn't break any rules
we didn't make mistakes
making beauty in loving
making lovine for days
SHILOW
I tried to find out/proof the solution can only be the image resolution or there can be other alternative way to solve this issue
I try Dilation and Erosion to image, hoped can get more clear image for OCR recognize like link demo pic https://ibb.co/3pDgDnF
import cv2
import numpy as np
import matplotlib.pyplot as plt
import glob
from IPython.display import clear_output
def show_img(img, bigger=False):
if bigger:
plt.figure(figsize=(15,15))
image_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
plt.imshow(image_rgb)
plt.show()
def sharpen(img, sigma=100):
# sigma = 5、15、25
blur_img = cv2.GaussianBlur(img, (0, 0), sigma)
usm = cv2.addWeighted(img, 1.5, blur_img, -0.5, 0)
return usm
def img_processing(img):
# do something here
img = sharpen(img)
return img
img = cv2.imread("/home/joy/桌面/all_pic_OCR/simple_pic/03.png")
cv2.imshow('03', img) # Original image
img2 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (11, 11))
img = cv2.dilate(img, kernel) # tried Dilation
cv2.imshow('image_after_Dilation', img) # image after Dilation
img = cv2.erode(img, kernel) # tried Erosion
cv2.imshow('then_Erosion', img) # image after Erosion
cv2.waitKey(0)
cv2.destroyAllWindows()
result: https://ibb.co/TbZjg3d
so still trying to achieve python OCR recognize image into text with 99.9999% correct

Get mask of image without using OpenCV

I'm trying the following to get the mask out of this image, but unfortunately I fail.
import numpy as np
import skimage.color
import skimage.filters
import skimage.io
# get filename, sigma, and threshold value from command line
filename = 'pathToImage'
# read and display the original image
image = skimage.io.imread(fname=filename)
skimage.io.imshow(image)
# blur and grayscale before thresholding
blur = skimage.color.rgb2gray(image)
blur = skimage.filters.gaussian(blur, sigma=2)
# perform inverse binary thresholding
mask = blur < 0.8
# use the mask to select the "interesting" part of the image
sel = np.ones_like(image)
sel[mask] = image[mask]
# display the result
skimage.io.imshow(sel)
How can I obtain the mask?
Is there a general approach that would work for this image as well. without custom fine-tuning and changing parameters?
Apply high contrast (maximum possible value)
convert to black & white image using high threshold (I've used 250)
min filter (value=8)
max filter (value=8)
Here is how you can get a rough mask using only the skimage library methods:
import numpy as np
from skimage.io import imread, imsave
from skimage.feature import canny
from skimage.color import rgb2gray
from skimage.filters import gaussian
from skimage.morphology import dilation, erosion, selem
from skimage.measure import find_contours
from skimage.draw import polygon
def get_mask(img):
kernel = selem.rectangle(7, 6)
dilate = dilation(canny(rgb2gray(img), 0), kernel)
dilate = dilation(dilate, kernel)
dilate = dilation(dilate, kernel)
erode = erosion(dilate, kernel)
mask = np.zeros_like(erode)
rr, cc = polygon(*find_contours(erode)[0].T)
mask[rr, cc] = 1
return gaussian(mask, 7) > 0.74
def save_img_masked(file):
img = imread(file)[..., :3]
mask = get_mask(img)
result = np.zeros_like(img)
result[mask] = img[mask]
imsave("masked_" + file, result)
save_img_masked('belt.png')
save_img_masked('bottle.jpg')
Resulting masked_belt.png:
Resulting masked_bottle.jpg:
One approach uses the fact that the background changes color only very slowly. Here I apply the gradient magnitude to each of the channels and compute the norm of the result, giving me an image highlighting the quicker changes in color. The watershed of this (with sufficient tolerance) should have one or more regions covering the background and touching the image edge. After identifying those regions, and doing a bit of cleanup we get these results (red line is the edge of the mask, overlaid on the input image):
I did have to adjust the tolerance, with a lower tolerance in the first case, more of the shadow is seen as object. I think it should be possible to find a way to set the tolerance based on the statistics of the gradient image, I have not tried.
There are no other parameters to tweak here, the minimum object area, 300, is quite safe; an alternative would be to keep only the one largest object.
This is the code, using DIPlib (disclaimer: I'm an author). out is the mask image, not the outline as displayed above.
import diplib as dip
import numpy as np
# Case 1:
img = dip.ImageRead('Pa9DO.png')
img = img[362:915, 45:877] # cut out actual image
img = img(slice(0,2)) # remove alpha channel
tol = 7
# Case 2:
#img = dip.ImageRead('jTnVr.jpg')
#tol = 1
# Compute gradient
gm = dip.Norm(dip.GradientMagnitude(img))
# Compute watershed with tolerance
lab = dip.Watershed(gm, connectivity=1, maxDepth=tol, flags={'correct','labels'})
# Identify regions touching the image edge
ll = np.unique(np.concatenate((
np.unique(lab[:,0]),
np.unique(lab[:,-1]),
np.unique(lab[0,:]),
np.unique(lab[-1,:]))))
# Remove regions touching the image edge
out = dip.Image(lab.Sizes(), dt='BIN')
out.Fill(1)
for l in ll:
if l != 0: # label zero is for the watershed lines
out = out - (lab == l)
# Remove watershed lines
out = dip.Opening(out, dip.SE(3, 'rectangular'))
# Remove small regions
out = dip.AreaOpening(out, filterSize=300)
# Display
dip.Overlay(img, dip.Dilation(out, 3) - out).Show()

Measuring feret diameter of microscopic particles using Python OpenCV

I am trying to measure the feret diameter of microscopic particles deposited onto glass using Python OpenCV2. Presently, I have close to 150 images for which, this process needs to be automated. For measuring, I have written a Python script which is given below:
import cv2
import numpy as np
import matplotlib.pyplot as plt
from skimage import io, color, measure
##step-1 reading the image
img = cv2.imread('1.tif', 0)
pixel_2_micron = 1.75 #1 pixel is equal too 1.75 microns
#img = color.rgb2gray(io.imread('1.tif', 0))
##step-2 selecting required region if necessary
cropped_img = img[0:1422,:]
#plt.hist(img.flat, bins=100, range=(0,255))
ret, thresh = cv2.threshold(cropped_img, 162, 217, cv2.THRESH_BINARY)
#Step-3
kernel = np.ones((3,3),np.uint8)
eroded = cv2.erode(thresh, kernel, iterations = 1)
dilated = cv2.dilate(eroded, kernel, iterations = 1)
#cv2.imshow("Original Image", img)
#cv2.imshow("Threshold Image", thresh)
#cv2.imshow("Eroded Image", eroded)
#cv2.imshow("Dilated Image", dilated)
#cv2.waitKey(0)
#step-4
mask = thresh == 217
io.imshow(mask) #show the masked image
Please assist me in measuring the dimensions of the masked regions. Especially the feret diameter for all the masked regions.
I have attached the image having masked the particles.
I have just released a python module to calculate the feret diameter of binary images which would solve your problem.
https://pypi.org/project/feret/
At the moment it can’t handle images with more than one region but as described above your can use this skimage module to find connecting regions and then just take the maximum and minimum of those regions to cutout the region of the image. If you need help, tell me.

MRI (brain tumor) image processing and segmentation, skull removing

I need help for image segmentation. I have a MRI image of brain with tumor. I need to remove cranium (skull) from MRI and then segment only tumor object. How could I do that in python? with image processing. I have tried make contours, but I don't know how to find and remove the largest contour and get only brain without a skull.
Thank's a lot.
def get_brain(img):
row_size = img.shape[0]
col_size = img.shape[1]
mean = np.mean(img)
std = np.std(img)
img = img - mean
img = img / std
middle = img[int(col_size / 5):int(col_size / 5 * 4), int(row_size / 5):int(row_size / 5 * 4)]
mean = np.mean(middle)
max = np.max(img)
min = np.min(img)
img[img == max] = mean
img[img == min] = mean
kmeans = KMeans(n_clusters=2).fit(np.reshape(middle, [np.prod(middle.shape), 1]))
centers = sorted(kmeans.cluster_centers_.flatten())
threshold = np.mean(centers)
thresh_img = np.where(img < threshold, 1.0, 0.0) # threshold the image
eroded = morphology.erosion(thresh_img, np.ones([3, 3]))
dilation = morphology.dilation(eroded, np.ones([5, 5]))
These images are similar to the ones I'm looking at:
Thanks for answers.
Preliminaries
Some preliminary code:
%matplotlib inline
import numpy as np
import cv2
from matplotlib import pyplot as plt
from skimage.morphology import extrema
from skimage.morphology import watershed as skwater
def ShowImage(title,img,ctype):
plt.figure(figsize=(10, 10))
if ctype=='bgr':
b,g,r = cv2.split(img) # get b,g,r
rgb_img = cv2.merge([r,g,b]) # switch it to rgb
plt.imshow(rgb_img)
elif ctype=='hsv':
rgb = cv2.cvtColor(img,cv2.COLOR_HSV2RGB)
plt.imshow(rgb)
elif ctype=='gray':
plt.imshow(img,cmap='gray')
elif ctype=='rgb':
plt.imshow(img)
else:
raise Exception("Unknown colour type")
plt.axis('off')
plt.title(title)
plt.show()
For reference, here's one of the brain+skulls you linked to:
#Read in image
img = cv2.imread('brain.png')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ShowImage('Brain with Skull',gray,'gray')
Extracting a Mask
If the pixels in the image can be classified into two different intensity classes, that is, if they have a bimodal histogram, then Otsu's method can be used to threshold them into a binary mask. Let's check that assumption.
#Make a histogram of the intensities in the grayscale image
plt.hist(gray.ravel(),256)
plt.show()
Okay, the data is nicely bimodal. Let's apply the threshold and see how we do.
#Threshold the image to binary using Otsu's method
ret, thresh = cv2.threshold(gray,0,255,cv2.THRESH_OTSU)
ShowImage('Applying Otsu',thresh,'gray')
Things are easier to see if we overlay our mask onto the original image
colormask = np.zeros(img.shape, dtype=np.uint8)
colormask[thresh!=0] = np.array((0,0,255))
blended = cv2.addWeighted(img,0.7,colormask,0.1,0)
ShowImage('Blended', blended, 'bgr')
Extracting the Brain
The overlap of the brain (shown in red) with the mask is so perfect, that we'll stop right here. To do so, let's extract the connected components and find the largest one, which will be the brain.
ret, markers = cv2.connectedComponents(thresh)
#Get the area taken by each component. Ignore label 0 since this is the background.
marker_area = [np.sum(markers==m) for m in range(np.max(markers)) if m!=0]
#Get label of largest component by area
largest_component = np.argmax(marker_area)+1 #Add 1 since we dropped zero above
#Get pixels which correspond to the brain
brain_mask = markers==largest_component
brain_out = img.copy()
#In a copy of the original image, clear those pixels that don't correspond to the brain
brain_out[brain_mask==False] = (0,0,0)
ShowImage('Connected Components',brain_out,'rgb')
Considering the Second Brain
Running this again with your second image produces a mask with many holes:
We can close many of these holes using a closing transformation:
brain_mask = np.uint8(brain_mask)
kernel = np.ones((8,8),np.uint8)
closing = cv2.morphologyEx(brain_mask, cv2.MORPH_CLOSE, kernel)
ShowImage('Closing', closing, 'gray')
We can now extract the brain:
brain_out = img.copy()
#In a copy of the original image, clear those pixels that don't correspond to the brain
brain_out[closing==False] = (0,0,0)
ShowImage('Connected Components',brain_out,'rgb')
If you need to cite this for some reason:
Richard Barnes. (2018). Using Otsu's method for skull-brain segmentation (v1.0.1). Zenodo. https://doi.org/10.5281/zenodo.6042312
Have you perhaps tried to use python skull_stripping.py
You can modify the parameters but it normally works good.
There are some new studies using deep learning for skull stripping which I found it interesting:
https://github.com/mateuszbuda/brain-segmentation/tree/master/skull-stripping
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 28 17:10:56 2021
#author: K Somasundaram, ka.somasundaram#gmail.com
"""
import numpy as npy
from skimage.filters import threshold_otsu
from skimage import measure
# import image reading module image from matplotlib
import matplotlib.image as img
#import image ploting module pyplot from matplotlib
import matplotlib.pyplot as plt
inim=img.imread('015.bmp')
#Find the dimension of the input image
dimn=inim.shape
print('dim=',dimn)
plt.figure(1)
plt.imshow(inim)
#-----------------------------------------------
# Find a threshold for the image using Otsu method in filters
th=threshold_otsu(inim)
print('Threshold = ',th)
# Binarize using threshold th
binim1=inim>th
plt.figure(2)
plt.imshow(binim1)
#--------------------------------------------------
# Erode the binary image with a structuring element
from skimage.morphology import disk
import skimage.morphology as morph
#Erode it with a radius of 5
eroded_image=morph.erosion(binim1,disk(3))
plt.figure(3)
plt.imshow(eroded_image)
#---------------------------------------------
#------------------------------------------------
# label the binar image
labelimg=measure.label(eroded_image,background=0)
plt.figure(4)
plt.imshow(labelimg)
#--------------------------------------------------
# Find area of the connected regiond
prop=measure.regionprops(labelimg)
# Find the number of objecte in the image
ncount=len(prop)
print ( 'Number of regions=',ncount)
#-----------------------------------------------------
# Find the LLC index
argmax=0
maxarea=0
#Find the largets connected region
for i in range(ncount):
if(prop[i].area >maxarea):
maxarea=prop[i].area
argmax=i
print('max area=',maxarea,'arg max=',argmax)
print('values=',[region.area for region in prop])
# Take only the largest connected region
# Generate a mask of size of th einput image with all zeros
bmask=npy.zeros(inim.shape,dtype=npy.uint8)
# Set all pixel values in whole image to the LCC index to 1
bmask[labelimg == (argmax+1)] =1
plt.figure(5)
plt.imshow(bmask)
#------------------------------------------------
#Dilate the isolated region to recover the pixels lost in erosion
dilated_mask=morph.dilation(bmask,disk(6))
plt.figure(6)
plt.imshow(dilated_mask)
#---------------------------------------
# Extract the brain using the barinmask
brain=inim*dilated_mask
plt.figure(7)
plt.imshow(brain)
-----------------------------------------
Input Image
--------------------

How to implement imbinarize in OpenCV

I developed script in Matlab which is analysing engraved text on a colour steal. I'm using range of morphological techniques to extract the text and read it with OCR. I need to implement it on Raspberry Pi therefore I decided to transfer my Matlab code into OpenCV (in python). I tried to transfer some methods and they work similarly but how do I implement imreconstruct and imbinarize (shown below) to OpenCV? (the challenge here is appropriate differentiate foreground and background).
Maybe I should try adding grabCut or getStructuringElement or morphologyEx or dilate? I tried them in range of combinations but have not found a perfect solution.
I will put the whole script for both if anyone could give me suggestions on how to generally improve this extraction and accuracy of OCR process I would greatly appreciate it.
Based on bin values of grey-scale image. I change some parameters in
those functions:
Matlab:
se = strel('disk', 300);
img = imtophat(img, se);
maker = imerode(img, strel('line',100,0)); %for whiter ones
maker = imerode(img, strel('line',85,0)); %for medium
maker = imerode(img, strel('line',5,0));
imgClear = imreconstruct(maker, img);
imgBlur = imgaussfilt(imgClear,1); %less blur for whiter frames
BW = imbinarize(imgBlur,'adaptive','ForegroundPolarity','Bright',...
'Sensitivity',0.7); %process for medium
BW = imbinarize(imgBlur, 'adaptive', 'ForegroundPolarity',...
'Dark', 'Sensitivity', 0.4); % process for black and white
res = ocr(BW, 'CharacterSet', '0123456789', 'TextLayout', 'Block');
res.Text;
OpenCv
kernel = numpy.ones((5,5),numpy.uint8)
blur = cv2.GaussianBlur(img,(5,5),0)
erosion = cv2.erode(blur,kernel,iterations = 1)
opening = cv2.morphologyEx(erosion, cv2.MORPH_OPEN, kernel)
#bremove = cv2.grabCut(opening,mask,rect,bgdModelmode==GC_INIT_WITH_MASK)
#th3 = cv2.adaptiveThreshold(opening,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU,11,2)
ret, thresh= cv2.threshold(opening,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
ocr = pytesseract.image_to_string(Image.open('image2.png'),config='stdout -c tessedit_char_whitelist=0123456789')
Here is the input image:
I am surprised at how much difference between matlab and opencv there is when they both appear to use the same algorithm. Why do you run imbinarize twice? What does the sensitivity keyword actually do (mathematically, behind the background). Because they obviously have several steps more than just the bare OTSU.
import cv2
import numpy as np
import matplotlib.pyplot as plt
def show(img):
plt.imshow(img, cmap="gray")
plt.show()
img = cv2.imread("letters.jpg", cv2.IMREAD_GRAYSCALE)
kernel = np.ones((3,3), np.uint8)
blur = cv2.GaussianBlur(img,(3,3), 0)
erosion = cv2.erode(blur, kernel, iterations=3)
opening = cv2.dilate(erosion, kernel)
th3 = cv2.adaptiveThreshold(opening, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY, 45, 2)
show(th3)
kernel2 = cv2.getGaussianKernel(6, 2) #np.ones((6,6))
kernel2 = np.outer(kernel2, kernel2)
th3 = cv2.dilate(th3, kernel2)
th3 = cv2.erode(th3, kernel)
show(th3)
The images that get displayed are:
After a bit of cleaning up:
So all in all not the same and certainly not as nice as matlab. But the basic principle seems the same, it's just that the numbers need playing with.
A better approach would probably be to do a threshold by the mean of the image and then use the output of that as a mask to adaptive threshold the original image. Hopefully then the results would be better than both opencv and matlab.
Try doing it with ADAPTIVE_THRESH_MEAN_C you can get some really nice results but there's more trash lying around. Again, maybe if you can use it as a mask to isolate the text and then do tresholding again it might turn out to be better. Also the shape of the erosion and dilation kernels will make a big difference here.
I worked out the code to have a positive result based on your engraved text sample.
import cv2
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
def show(img):
plt.imshow(img, cmap="gray")
plt.show()
# load the input image
img = cv2.imread('./imagesStackoverflow/engraved_text.jpg',0);
show(img)
ret, mask = cv2.threshold(img, 60, 120, cv2.THRESH_BINARY) # turn 60, 120 for the best OCR results
kernel = np.ones((5,3),np.uint8)
mask = cv2.erode(mask,kernel,iterations = 1)
show(mask)
# I used a version of OpenCV with Tesseract, you may use your pytesseract and set the modes as:
# OCR Enginer Mode (OEM) = 3 (defualt = 3)
# Page Segmentation mode (PSmode) = 11 (defualt = 3)
tesser = cv2.text.OCRTesseract_create('C:/Program Files/Tesseract 4.0.0/tessdata/','eng','0123456789',11,3)
retval = tesser.run(mask, 0) # return string type
print 'OCR:' + retval
Processed image and OCR output:
It would be great if you can feedback your test results with more sample images.
opencvpythontesseractocr
What I can see from your code is you have used tophat filtering in your Matlab code as the first step. However, I couldn't see the same in your python OpenCV code.
Python has built in tophat filter try applying that for getting similar result
kernel = np.ones((5,5),np.uint8)
tophat = cv2.morphologyEx(img, cv2.MORPH_TOPHAT, kernel)
Also, try using CLAHE it gives better contrast to your image and then apply blackhat to filter out small details.
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
cl1 = clahe.apply(img)
I have got better results by applying these transformations.
Tried below, it works to recognize the lighter engraved text sample. Hope it helps.
def show(img):
plt.imshow(img, cmap="gray")
plt.show()
# load the input image
img = cv2.imread('./imagesStackoverflow/engraved_text2.jpg',0);
show(img)
# apply CLAHE to adjust the contrast
clahe = cv2.createCLAHE(clipLimit=5.1, tileGridSize=(5,3))
cl1 = clahe.apply(img)
img = cl1.copy()
show(img)
img = cv2.GaussianBlur(img,(3,3), 1)
ret, mask = cv2.threshold(img, 125, 150, cv2.THRESH_BINARY) # turn 125, 150 for the best OCR results
kernel = np.ones((5,3),np.uint8)
mask = cv2.erode(mask,kernel,iterations = 1)
show(mask)
# I used a version of OpenCV with Tesseract, you may use your pytesseract and set the modes as:
# Page Segmentation mode (PSmode) = 11 (defualt = 3)
# OCR Enginer Mode (OEM) = 3 (defualt = 3)
tesser = cv2.text.OCRTesseract_create('C:/Program Files/Tesseract 4.0.0/tessdata/','eng','0123456789',11,3)
retval = tesser.run(mask, 0) # return string type
print 'OCR:' + retval

Categories