Clean black point on the image for OCR - python

I want to clear black point on the image with python. Because I need to applied ocr processing to image file.
I convert image to monochorome color so I get this image ;
http://s23.postimg.org/bulq1dmt3/ba210.png
So, I want to delete black points ;
def temizleHips2 (x,y,w,h,listei):
koordinat=list(nfind(x,y))
x = int(x)
y = int(y)
w = int(w)
h = int(h)
i=0
a=0
m=4
b=0
for i in xrange(8):
b=0
k=koordinat[i]
x2,y2=koordinatparse(k)
if x2>=0 and y2>=0 and x2<w and y2<h:
if listei[x2,y2]==0:
a=a+1
if a>2:
return 0
else:
return 255
def ultratemizle(dosya):
# 290a.tif
image_file = dosya
img = Image.open(image_file)
# img=img.convert('1')
# img.save("209i.tif","TIFF")
datas = list(img.getdata())
newData = list()
temizlemes = list()
temizlemeson = list()
siyah =0
beyaz =0
for each in datas:
if each == 255:
beyaz = beyaz +1
else:
siyah = siyah+1
if siyah > beyaz :
for each in datas:
if each == 255:
each=0
elif each==0:
each = 255
newData.append(each)
img.putdata(newData)
x1,y1=0,0
tmp_isim = "a"+dosya
img.save("b"+tmp_isim, "TIFF")
img = Image.open(tmp_isim)
imgmat = img.load()
x,y= img.size
x1=0
y1=0
deger =0
temizlemes =[]
for x1 in range (0,x):
for y1 in range(0,y):
if imgmat[x1,y1] == 0:
deger = temizleHips(x1,y1,x,y,imgmat)
temizlemes.append(deger)
if deger != imgmat[x1,y1]:
print "noktalar : "+str(x1)+","+str(y1)+" ilk : "+str(imgmat[x1,y1])+" son: "+str(deger)
else:
temizlemes.append(imgmat[x1,y1])
img.putdata(temizlemes)
img.show()
img.save(tmp_isim,"TIFF")
tem = img.load()
I get this image ;
http://s16.postimg.org/wc97bdzdt/a356.png
However, I want to clean "s" around black pixels on the second pic.
I can't find where the problem is

You should try find the big contours and remove another contours

Related

PIL - animated GIF trim outermost black part of image and turn it into transparent

I'm trying to convert some transparent PNGs to one animated GIF, but there is a trimming issue. Except for the first image, all other images' outermost space with only black colour are cropped and become transparent.
e.g. the red part of the PNG will be cut away in the generated GIF:
example pic for trimming issue of animated GIF
Below is my code. Sorry if it is a bit messy cause I am still learning Python.
from PIL import Image
import glob
# https://stackoverflow.com/questions/46850318/transparent-background-in-gif-using-python-imageio
def gen_frame(path):
im = Image.open(path)
alpha = im.getchannel('A')
# Convert the image into P mode but only use 255 colors in the palette out of 256
im = im.convert('RGB').convert('P', palette=Image.Palette.ADAPTIVE, colors=255)
# Set all pixel values below 128 to 255 , and the rest to 0
mask = Image.eval(alpha, lambda a: 255 if a <=0 else 0)
# Paste the color of index 255 and use alpha as a mask
im.paste(255, mask)
# The transparency index is 255
im.info['transparency'] = 255
return im
def resize4Twitter(img):
TWITTER_MAX_WIDTH, TWITTER_MAX_HEIGHT = 1280, 1080
if img.width < TWITTER_MAX_WIDTH and img.height < TWITTER_MAX_HEIGHT:
return img
elif img.width/img.height > TWITTER_MAX_WIDTH/TWITTER_MAX_HEIGHT:
x, y = TWITTER_MAX_WIDTH, (img.height / img.width * TWITTER_MAX_WIDTH)
else:
x, y = (img.width / img.height * TWITTER_MAX_HEIGHT), TWITTER_MAX_HEIGHT
return img.resize((int(x),int(y)))
### User Input
imagePath, gifName, fpsStr, forTwitter = '', '', '', ''
fps = 0
imagePath = input("Enter PNG path:")
gifName = input("Enter GIF name:")
while fps == 0:
fpsStr = input("Enter FPS [1-50]:")
if fpsStr.isdigit():
if int(fpsStr) >= 1 and int(fpsStr) <= 50:
fps = int(fpsStr)
else:
print("Invalid. Please enter an integer from 1 to 50.")
else:
print("Invalid. Please enter an integer from 1 to 50.")
while forTwitter!= "Y" and forTwitter != "N":
forTwitter = input("Resize for Twitter? [Y/N]: ")
### filepaths
fp_in = imagePath + "\\" + gifName + "_*.png"
details = "_fps" + str(fps)
if forTwitter == "Y":
details = details + "_twitterSize"
fp_out = imagePath + "\\" + gifName + details + ".gif"
### Process Images
# https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html#gif
# https://legacy.imagemagick.org/script/command-line-options.php?#dispose
imgpaths = sorted(glob.glob(fp_in))
imgs = []
for imgpath in imgpaths:
img = gen_frame(imgpath)
if forTwitter == "Y":
img = resize4Twitter(img)
imgs.append(img)
print("Image loaded:\t" + imgpath)
imgs = iter(imgs) # I tried .show() here, the PNGs are still normal
dur = 1000/fps
img = next(imgs) # extract first image from iterator
img.save(fp_out, save_all=True, append_images=imgs,
optimize=False, duration=dur, loop=0, disposal=2) # use diposal to clear prev. frame
print("Animated GIF produced at: " + fp_out)

How can I convert a colorful pfm image to a png one?

So I found a code on Github that can convert pfm grayscales images to png ones. The problem with this code is that it's not working on colourful images.
Here is an image sample that I wanna convert to a png extension : https://drive.google.com/file/d/16yP987otoCyOX-ail22Aru2AltnSa1hR/view?usp=sharing It's a link cause I can't upload pfm images :)
The link to the original code : https://github.com/MahdiGhiasi/DepthEstimationProject/blob/master/pfm2png/pfm2png_depth.py
from PIL import Image
import numpy as np
import colorsys
import sys
import re
import os
def readPFM(file):
file = open(file, 'rb')
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().rstrip().decode('utf-8')
if header == 'PF':
color = True
elif header == 'Pf':
color = False
else:
print(header)
raise Exception('Not a PFM file.')
dim_match = re.match(r'^(\d+)\s(\d+)\s$', file.readline().decode('utf-8'))
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header.')
scale = float(file.readline().rstrip().decode('utf-8'))
if scale < 0: # little-endian
endian = '<'
scale = -scale
else:
endian = '>' # big-endian
data = np.fromfile(file, endian + 'f')
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
data = np.flipud(data)
return data, scale, color
def writePFM(file, image, scale=1):
file = open(file, 'wb')
color = None
if image.dtype.name != 'float32':
raise Exception('Image dtype must be float32.')
image = np.flipud(image)
if len(image.shape) == 3 and image.shape[2] == 3: # color image
color = True
elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1: # greyscale
color = False
else:
raise Exception('Image must have H x W x 3, H x W x 1 or H x W dimensions.')
file.write('PF\n' if color else 'Pf\n')
file.write('%d %d\n' % (image.shape[1], image.shape[0]))
endian = image.dtype.byteorder
if endian == '<' or endian == '=' and sys.byteorder == 'little':
scale = -scale
file.write('%f\n' % scale)
image.tofile(file)
def save_depth_image(result, image_path):
height = len(result)
width = len(result[0])
maxVal = np.max(result)
#print("width:", width)
#print("height:", height)
output = []
for i in range(height):
row = []
for j in range(width):
h = 240 * result[i][j] / maxVal;
(r,g,b) = colorsys.hsv_to_rgb(h / 360,1,1)
#color = (int)(255 * result[j][i] / maxVal)
row.append([int(r * 255), int(g * 255), int(b * 255), 255])
output.append(row)
img = Image.fromarray(np.asarray(output).astype(np.uint8), mode='RGBA')
img.save(image_path, "PNG")
def normalize(data):
maxVal = np.max(data)
print("maxVal:", maxVal)
data = data / maxVal
data = 1 - data
data *= 90
return data
file_name = input("pfm file?\n")
data, scale, color = readPFM(file_name)
data = normalize(data)
if color:
print("Not a depth file")
else:
save_depth_image(data, 'pfm2png_depth.png')
os.startfile('pfm2png_depth.png')
no need to roll code for this:
import cv2
img = cv2.imread("img.pfn", cv2.IMREAD_UNCHANGED) # it's float32 !
img /= 255 # back to [0..255]
cv2.imwrite("img.png", img)

Converting a grayscale image(which was previously converted from a color image) to color image using PIL?

So, I have written a code that hides data into a grayscale image, and can retrieve back from a grayscale image. I want be able to do this for a color image.
At the moment, I'm thinking to convert a color image to grayscale, hide the data, convert the image back to color. If that's possible.
Another stuff that I'm thinking is, for grayscale, getpixel returns a single value, while for color getpixel returns a tuple, so I also thought of just manipulating only one value of a tuple, (if this is correct).
Edit: Code, where I'm trying to get a value from a tuple of a color image. Also, sorry its not documented at all.
from PIL import Image
import numpy as np
import glob
import os
from helper import tobits
image_list = []
for filename in glob.glob('*.png'):
image_list.append(filename)
print(image_list)
#onlyforalphas
message = "he23#"*200
#print(message)
messagebi = ''.join(format(ord(x), '07b') for x in message)
#messagebi = tobits(message)
#print(messagebi)
payload = len(messagebi)
print(".........................PAYLOAD LENGTH : ", payload, "..................................................")
width = 512
height = 512
max_a = 0
min_b = 0
max_value = 0
min_value = 10000
z = 0
zi = 0
fileindex = 0
im = Image.open(image_list[0])
#print(im.histogram())
while payload > 0 and z < len(image_list):
print(".........................PAYLOAD LENGTH : ", payload, "..........................................")
print("OPENING FILE", image_list[z])
im = Image.open(image_list[z])
#print(im.histogram())
z = z + 1
hist_list = np.asarray(im.histogram())
print(im.histogram())
for i in range(len(hist_list)):
if hist_list[i] >= max_value:
max_value = hist_list[i]
max_a = i
if hist_list[i] <= min_value:
min_value = hist_list[i]
min_b = i
if payload > max_value:
print("ERROR:payload size: ", payload, " too large. Trying another image")
hist_list = np.asarray(im.histogram())
print(max_a, " ", max_value)
print(min_b, " ", min_value)
payload = payload - max_value
if payload < 0:
payload = 0
hist_list[max_a] = 0
#zi = 0
messagelength = len(messagebi)
#print(messagebi, " ", messagelength)
for i in range(width):
for j in range(height):
temp = im.getpixel((i,j))[0]
#print(temp)
if temp > max_a and temp < min_b:
im.putpixel((i,j), temp + 1)
#print(im.getpixel((i,j)), end = "")
if zi < messagelength and messagebi[zi] == '1' and temp == max_a:
im.putpixel((i,j), max_a+1)
zi = zi + 1
elif zi < messagelength and messagebi[zi] == '0' and temp == max_a:
zi = zi + 1
#print("")
#imnu = Image.fromarray(hist_list, mode='L')
print("payload size after ", fileindex, "iteration is:", payload)
filename = "output/filename" + str(fileindex) + ".png"
im.save(filename)
fileindex = fileindex + 1
print(im.histogram())

Error 'NoneType' object has no attribute '__getitem__' what is wrong

When i run this code
import numpy as np
import cv2
from sklearn.datasets import fetch_mldata
from skimage.measure import label, regionprops
from sklearn.neighbors import KNeighborsClassifier
def train(data, target):
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(data, target)
return knn
def move(image, x, y):
img = np.zeros((28, 28))
img[:(28-x), :(28-y)] = image[x:, y:]
return img
def fill(image):
if np.shape(image)!=(28, 28):
img = np.zeros((28,28))
x = 28 - np.shape(image)[0]
y = 28 - np.shape(image)[1]
img[:-x,:-y] = image
return img
else:
return image
def my_rgb2gray(img_rgb):
img_gray = 0.5*img_rgb[:, :, 0] + 0*img_rgb[:, :, 1] + 0.5*img_rgb[:, :, 2]
img_gray = img_gray.astype('uint8')
return img_gray
def my_rgb2gray2(img_rgb):
frame = img_rgb
grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
ret, frame_bw = cv2.threshold(grey, 170, 255, 0)
frame_bw = cv2.morphologyEx(frame_bw, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)))
return frame_bw
def count_images(framecal):
regions = label(framecal)
labels = regionprops(regions)
images = []
for i in range(0, len(labels)):
if labels[i].centroid[0] < result[0] and labels[i].centroid[1] < result[1]:
images.append(labels[i].image)
count = 0
for img in images:
obrada = fill(np.array(img.astype('uint8')))
count += model.predict(obrada.reshape(1, -1))
return count
def check2(indices, i):
check = False
for el in indices:
if (el == i):
check = True
break
return check
def findPoints(lines):
Xmin = 1000
Ymin = 1000
Ymax = 0
Xmax = 0
for i in range(len(lines)):
for x1, y1, x2, y2 in lines[i]:
if x1 < Xmin:
Xmin = x1
Ymin = y1
if x2 > Xmax:
Ymax = y2
Xmax = x2
return Xmin, Ymin, Xmax, Ymax
def hough(frame, gray, min_line_len, max_line_gap):
edges = cv2.Canny(gray, 50, 150, apertureSize=3)
cv2.imwrite('line.png', frame)
lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 40, min_line_len, max_line_gap)
minx, miny, maxx, maxy = findPoints(lines)
cv2.line(frame, (minx, miny), (maxx, maxy), (233, 0, 0), 2)
return minx, miny, maxx, maxy
homepath = 'SoftVideoData/'
videopaths = ['video-0.avi',
'video-1.avi',
'video-2.avi',
'video-3.avi',
'video-4.avi',
'video-5.avi',
'video-6.avi',
'video-7.avi',
'video-8.avi',
'video-9.avi']
mnist = fetch_mldata('MNIST original')
data = mnist.data>0
data = data.astype('uint8')
target = mnist.target
fixed = np.empty_like(data)
for i in range(0, len(data)):
l = label(data[i].reshape(28, 28))
r = regionprops(l)
min_x = r[0].bbox[0]
min_y = r[0].bbox[1]
for j in range(1, len(r)):
if r[j].bbox[0] < min_x:
min_x = r[j].bbox[0]
if r[j].bbox[1] < min_y:
min_y = r[j].bbox[1]
img = move(data[i].reshape(28, 28), min_x, min_y)
fixed[i] = img.reshape(784, )
model = train(fixed, target)
for index in range(0,9):
total = 0
video = cv2.VideoCapture(homepath + videopaths[index])
flag, frame = video.read()
bw = my_rgb2gray(frame)
result = hough(frame, bw, 10, 50)
while 1:
flag1, frame1 = video.read()
last_count = total
if flag1 is True:
bwframe = my_rgb2gray2(frame1)
curr_count = count_images(bwframe)
if curr_count <= last_count:
last_count = curr_count
else:
total += curr_count - last_count
last_count = curr_count
print total
k = cv2.waitKey(15) & 0xff
if k == 27:
break
else:
break
with open('out.txt', 'a') as file:
file.write(homepath + videopaths[index] + '\t' + str(total))
i get this error:
Traceback (most recent call last):
File "C:\Users\Joe\Desktop\SOFT-master7o\SoftProject.py", line 147, in <module>
bw = my_rgb2gray(frame)
File "C:\Users\Joe\Desktop\SOFT-master7o\SoftProject.py", line 35, in my_rgb2gray
img_gray = 0.5*img_rgb[:, :, 0] + 0*img_rgb[:, :, 1] + 0.5*img_rgb[:, :, 2]
TypeError: 'NoneType' object has no attribute '__getitem__'
What's wrong? Thanks
When you call a name with the square brackets, Python calla 'getitem' under the hood.
So it means that img_rgb is not what you expect it to be. Instead of a numpy array it is None.
Check the portion of code where img_rgb is assigned to.
Answer to comment:
Check your inputs before you do operations on them.
I would use the VS inbuild debugger and set a breakpoint on this line:
bw = my_rgb2gray(frame)
and inspect each frame if it is None before entering the function.
How to handle it if its None? Depends - either skip that video-frame or, if all are None, something is amiss and you need to check why flag, frame = video.read() produces a frame that is None. Sometimes the documentation will help you out:
https://docs.opencv.org/2.4/modules/highgui/doc/reading_and_writing_images_and_video.html#videocapture
https://docs.opencv.org/2.4/modules/highgui/doc/reading_and_writing_images_and_video.html#videocapture-read

simplecv | How to save generated frames to video

This is the function which generates a set of frames from an input video and stores them in a folder.
The full function:
def new_dewarp(self):
vidpath = self.iVidPath
def isInROI(x, y, R1, R2, Cx, Cy):
isInOuter = False
isInInner = False
xv = x-Cx
yv = y-Cy
rt = (xv*xv)+(yv*yv)
if(rt < R2*R2):
isInOuter = True
if(rt < R1*R1):
isInInner = True
return isInOuter and not isInInner
def buildMap(Ws, Hs, Wd, Hd, R1, R2, Cx, Cy):
map_x = np.zeros((Hd,Wd),np.float32)
map_y = np.zeros((Hd,Wd),np.float32)
rMap = np.linspace(R1, R1 + (R2 - R1), Hd)
thetaMap = np.linspace(0, 0 + float(Wd) * 2.0 * np.pi, Wd)
sinMap = np.sin(thetaMap)
cosMap = np.cos(thetaMap)
for y in xrange(0, int(Hd-1)):
map_x[y] = Cx + rMap[y] * sinMap
map_y[y] = Cy + rMap[y] * cosMap
return map_x, map_y
# do the unwarping
def unwarp(img, xmap, ymap):
output = cv2.remap(img.getNumpyCv2(), xmap, ymap, cv2.INTER_LINEAR)
result = Image(output, cv2image=True)
# return result
return result
#vidpath =
disp = Display((800, 600))
#disp = Display((1296,972))
vals = []
last = (0, 0)
# Load the video from the rpi
vc = VirtualCamera(vidpath, "video")
# Sometimes there is crud at the begining, buffer it out
for i in range(0, 10):
img = vc.getImage()
img.save(disp)
# Show the user a frame let them left click the center
# of the "donut" and the right inner and outer edge
# in that order. Press esc to exit the display
while not disp.isDone():
test = disp.leftButtonDownPosition()
if( test != last and test is not None):
last = test
print "[360fy]------- center = {0}\n".format(last)
vals.append(test)
# center of the "donut"
Cx = vals[0][0]
Cy = vals[0][1]
#print str(Cx) + " " + str(Cy)
# Inner donut radius
R1x = vals[1][0]
R1y = vals[1][1]
R1 = R1x-Cx
#print str(R1)
# outer donut radius
R2x = vals[2][0]
R2y = vals[2][1]
R2 = R2x-Cx
#print str(R2)
# our input and output image siZes
Wd = round(float(max(R1, R2)) * 2.0 * np.pi)
#Wd = 2.0*((R2+R1)/2)*np.pi
#Hd = (2.0*((R2+R1)/2)*np.pi) * (90/360)
Hd = (R2-R1)
Ws = img.width
Hs = img.height
# build the pixel map, this could be sped up
print ("BUILDING MAP!")
xmap,ymap = buildMap(Ws, Hs, Wd, Hd, R1, R2, Cx, Cy)
print ("MAP DONE!")
result = unwarp(img, xmap, ymap)
result.save(disp)
i = 0
while img is not None:
print "Frame Number: {0}".format(i)
result = unwarp(img, xmap, ymap)
result.save(disp)
# Save to file
fname = "vid_files/frames/FY{num:06d}.png".format(num=i)
result.save(fname)
img = vc.getImage()
i = i + 1
if img is None:
self.statusText.setText(str( "Status: Done"))
The section in above code which does the saving of frames:
while img is not None:
print "Frame Number: {0}".format(i)
result = unwarp(img, xmap, ymap)
result.save(disp)
# Save to file
fname = "vid_files/frames/FY{num:06d}.png".format(num=i)
result.save(fname)
img = vc.getImage()
i = i + 1
if img is None:
self.statusText.setText(str( "Status: Done"))
I want to know if there is anyway I can save the frames directly to a video( preferably mp4) with the frame rate and frame size of input video?
I can fetch the frame size and frame rate using ffprobe if needed.

Categories