How to turn the picture to black and white - python

I am trying to convert some of my pictures to black and white. I have this so far
import image
def black_and_white(pic):
for y in range(pic.getHeight()):
for x in range(pic.getWidth()):
p = pic.getPixel(x,y)
r = p.getRed()
g = p.getGreen()
b = p.getBlue()
if x > 0.128:
x = .255 * r + .255 * g +.255 * b
else:
x = .0 * r + .0 * g +.0 * b
x = int(x) ## to convert it to an integer
newp = image.Pixel(x, x, x) ## to convert to a new pixel
pic.setPixel(x, y, newp)
return pic
def main():
bell = image.Image("luther.jpg")
width = bell.getWidth()
height = bell.getHeight()
win = image.ImageWin(width, height)
bell.draw(win)
gs_bell = grayscale(bell)
gs_bell.draw(win)
main() ## starts execution
If anyone could give me some advice, I would greatly appreciate it!
I am truly sorry for the lack of clarity. Here is the image I am getting.
enter image description here

Here's a complete working example, using PIL (because I don't know where your import image is coming from, you didn't say).
import sys
import argparse
import PIL.Image # https://pillow.readthedocs.io/en/3.1.x/reference/Image.html
import os
def black_and_white(pic):
for y in range(pic.size[1]):
for x in range(pic.size[0]):
r, g, b = pic.getpixel((x, y))
v = 0.2989 * r + 0.5870 * g + 0.1140 * b
if v > 128.0:
v = 255
else:
v = 0
v = int(v) ## to convert it to an integer
pic.putpixel((x, y), (v, v, v))
def main(options):
try:
image = PIL.Image.open(options.filename)
except:
print('ERROR: Could not open %s' % (options.filename))
else:
black_and_white(image)
basename = os.path.splitext(options.filename)[0]
image.save(basename + '_solution' + '.jpg', 'JPEG')
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'filename',
help='Image file.')
options = parser.parse_args()
sys.exit(main(options))
This works for me:

Your code has numerous bugs. You're using x as a pixel value, when it is actually a pixel coordinate.
I would change this:
if x > 0.128:
x = .255 * r + .255 * g +.255 * b
else:
x = .0 * r + .0 * g +.0 * b
x = int(x) ## to convert it to an integer
newp = image.Pixel(x, x, x) ## to convert to a new pixel
to this:
v = 0.2989 * r + 0.5870 * g + 0.1140 * b
if v > 128.0:
v = 255
else:
v = 0
v = int(v) ## to convert it to an integer
newp = image.Pixel(v, v, v) ## to convert to a new pixel
The RGB weighting values come from this article.

Related

Converting a grayscale image(which was previously converted from a color image) to color image using PIL?

So, I have written a code that hides data into a grayscale image, and can retrieve back from a grayscale image. I want be able to do this for a color image.
At the moment, I'm thinking to convert a color image to grayscale, hide the data, convert the image back to color. If that's possible.
Another stuff that I'm thinking is, for grayscale, getpixel returns a single value, while for color getpixel returns a tuple, so I also thought of just manipulating only one value of a tuple, (if this is correct).
Edit: Code, where I'm trying to get a value from a tuple of a color image. Also, sorry its not documented at all.
from PIL import Image
import numpy as np
import glob
import os
from helper import tobits
image_list = []
for filename in glob.glob('*.png'):
image_list.append(filename)
print(image_list)
#onlyforalphas
message = "he23#"*200
#print(message)
messagebi = ''.join(format(ord(x), '07b') for x in message)
#messagebi = tobits(message)
#print(messagebi)
payload = len(messagebi)
print(".........................PAYLOAD LENGTH : ", payload, "..................................................")
width = 512
height = 512
max_a = 0
min_b = 0
max_value = 0
min_value = 10000
z = 0
zi = 0
fileindex = 0
im = Image.open(image_list[0])
#print(im.histogram())
while payload > 0 and z < len(image_list):
print(".........................PAYLOAD LENGTH : ", payload, "..........................................")
print("OPENING FILE", image_list[z])
im = Image.open(image_list[z])
#print(im.histogram())
z = z + 1
hist_list = np.asarray(im.histogram())
print(im.histogram())
for i in range(len(hist_list)):
if hist_list[i] >= max_value:
max_value = hist_list[i]
max_a = i
if hist_list[i] <= min_value:
min_value = hist_list[i]
min_b = i
if payload > max_value:
print("ERROR:payload size: ", payload, " too large. Trying another image")
hist_list = np.asarray(im.histogram())
print(max_a, " ", max_value)
print(min_b, " ", min_value)
payload = payload - max_value
if payload < 0:
payload = 0
hist_list[max_a] = 0
#zi = 0
messagelength = len(messagebi)
#print(messagebi, " ", messagelength)
for i in range(width):
for j in range(height):
temp = im.getpixel((i,j))[0]
#print(temp)
if temp > max_a and temp < min_b:
im.putpixel((i,j), temp + 1)
#print(im.getpixel((i,j)), end = "")
if zi < messagelength and messagebi[zi] == '1' and temp == max_a:
im.putpixel((i,j), max_a+1)
zi = zi + 1
elif zi < messagelength and messagebi[zi] == '0' and temp == max_a:
zi = zi + 1
#print("")
#imnu = Image.fromarray(hist_list, mode='L')
print("payload size after ", fileindex, "iteration is:", payload)
filename = "output/filename" + str(fileindex) + ".png"
im.save(filename)
fileindex = fileindex + 1
print(im.histogram())

Identify spoken language by creating spectrograms in python?

I'm practicing spoken language recognition code from 'https://github.com/YerevaNN/Spoken-language-identification'.
Input 'csv' dataset is downloaded from 'https://gist.github.com/Harhro94/aa11fe6b454c614cdedea882fd00f8d7'
First task is to convert the inputs into spectrograms. I tried this code but showing error. You can use any audio file(wav file) for this example.
original code from github :'https://github.com/YerevaNN/Spoken-language-identification/blob/master/create_spectrograms.py
import numpy as np
from matplotlib import pyplot as plt
import scipy.io.wavfile as wav
from numpy.lib import stride_tricks
import PIL.Image as Image
import os
def stft(sig, frameSize, overlapFac=0.5, window=np.hanning):
win = window(frameSize)
hopSize = int(frameSize - np.floor(overlapFac * frameSize))
samples = np.append(np.zeros(np.floor(frameSize / 2.0)), sig)
cols = np.ceil((len(samples) - frameSize) / float(hopSize)) + 1
samples = np.append(samples, np.zeros(frameSize))
frames = stride_tricks.as_strided(samples, shape=(cols, frameSize),
strides=(samples.strides[0] * hopSize,
samples.strides[0])).copy()
frames *= win
return np.fft.rfft(frames)
def logscale_spec(spec, sr=44100, factor=20, alpha=1.0, f0=0.9, fmax=1):
spec = spec[:, 0:256]
timebins, freqbins = np.shape(spec)
scale = np.linspace(0, 1, freqbins) # ** factor
scale = np.array(map(lambda x: x * alpha
if x <= f0 else (fmax - alpha * f0) / (fmax - f0) *
(x - f0) + alpha * f0, scale))
scale *= (freqbins - 1) / max(scale)
newspec = np.complex128(np.zeros([timebins, freqbins]))
allfreqs = np.abs(np.fft.fftfreq(freqbins * 2, 1. / sr)[:freqbins + 1])
freqs = [0.0 for i in range(freqbins)]
totw = [0.0 for i in range(freqbins)]
for i in range(0, freqbins):
if (i < 1 or i + 1 >= freqbins):
newspec[:, i] += spec[:, i]
freqs[i] += allfreqs[i]
totw[i] += 1.0
continue
else:
w_up = scale[i] - np.floor(scale[i])
w_down = 1 - w_up
j = int(np.floor(scale[i]))
newspec[:, j] += w_down * spec[:, i]
freqs[j] += w_down * allfreqs[i]
totw[j] += w_down
newspec[:, j + 1] += w_up * spec[:, i]
freqs[j + 1] += w_up * allfreqs[i]
totw[j + 1] += w_up
for i in range(len(freqs)):
if (totw[i] > 1e-6):
freqs[i] /= totw[i]
return newspec, freqs
def plotstft(audiopath, binsize=2 ** 10, plotpath=None, colormap="gray",
channel=0, name='sampleaudio.png', alpha=1, offset=0):
samplerate, samples = wav.read(audiopath)
samples = samples[:, channel]
s = stft(samples, binsize)
sshow, freq = logscale_spec(s, factor=1, sr=samplerate, alpha=alpha)
sshow = sshow[2:, :]
ims = 20. * np.log10(np.abs(sshow) / 10e-6)
timebins, freqbins = np.shape(ims)
ims = np.transpose(ims)
ims = ims[0:256, :]
image = Image.fromarray(ims)
image = image.convert('L')
image.save(name)
file = open('trainingData.csv', 'r')
for iter, line in enumerate(file.readlines()[1:]):
filepath = line.split(',')[0]
filename = filepath[:-4]
wavfile = 'sampleaudio.wav'
os.system('mpg123 -w'+wavfile+'/C:/AnacondaProj/sampaudio.wav/'+ filepath)
plotstft(wavfile,channel=0,name='/C:/AnacondaProj/sampaudio.wav/'+
filename+'.png', alpha=1)
# os.remove(wavfile)
print("processed %d files" % (iter + 1))
Thankyou

simplecv | How to save generated frames to video

This is the function which generates a set of frames from an input video and stores them in a folder.
The full function:
def new_dewarp(self):
vidpath = self.iVidPath
def isInROI(x, y, R1, R2, Cx, Cy):
isInOuter = False
isInInner = False
xv = x-Cx
yv = y-Cy
rt = (xv*xv)+(yv*yv)
if(rt < R2*R2):
isInOuter = True
if(rt < R1*R1):
isInInner = True
return isInOuter and not isInInner
def buildMap(Ws, Hs, Wd, Hd, R1, R2, Cx, Cy):
map_x = np.zeros((Hd,Wd),np.float32)
map_y = np.zeros((Hd,Wd),np.float32)
rMap = np.linspace(R1, R1 + (R2 - R1), Hd)
thetaMap = np.linspace(0, 0 + float(Wd) * 2.0 * np.pi, Wd)
sinMap = np.sin(thetaMap)
cosMap = np.cos(thetaMap)
for y in xrange(0, int(Hd-1)):
map_x[y] = Cx + rMap[y] * sinMap
map_y[y] = Cy + rMap[y] * cosMap
return map_x, map_y
# do the unwarping
def unwarp(img, xmap, ymap):
output = cv2.remap(img.getNumpyCv2(), xmap, ymap, cv2.INTER_LINEAR)
result = Image(output, cv2image=True)
# return result
return result
#vidpath =
disp = Display((800, 600))
#disp = Display((1296,972))
vals = []
last = (0, 0)
# Load the video from the rpi
vc = VirtualCamera(vidpath, "video")
# Sometimes there is crud at the begining, buffer it out
for i in range(0, 10):
img = vc.getImage()
img.save(disp)
# Show the user a frame let them left click the center
# of the "donut" and the right inner and outer edge
# in that order. Press esc to exit the display
while not disp.isDone():
test = disp.leftButtonDownPosition()
if( test != last and test is not None):
last = test
print "[360fy]------- center = {0}\n".format(last)
vals.append(test)
# center of the "donut"
Cx = vals[0][0]
Cy = vals[0][1]
#print str(Cx) + " " + str(Cy)
# Inner donut radius
R1x = vals[1][0]
R1y = vals[1][1]
R1 = R1x-Cx
#print str(R1)
# outer donut radius
R2x = vals[2][0]
R2y = vals[2][1]
R2 = R2x-Cx
#print str(R2)
# our input and output image siZes
Wd = round(float(max(R1, R2)) * 2.0 * np.pi)
#Wd = 2.0*((R2+R1)/2)*np.pi
#Hd = (2.0*((R2+R1)/2)*np.pi) * (90/360)
Hd = (R2-R1)
Ws = img.width
Hs = img.height
# build the pixel map, this could be sped up
print ("BUILDING MAP!")
xmap,ymap = buildMap(Ws, Hs, Wd, Hd, R1, R2, Cx, Cy)
print ("MAP DONE!")
result = unwarp(img, xmap, ymap)
result.save(disp)
i = 0
while img is not None:
print "Frame Number: {0}".format(i)
result = unwarp(img, xmap, ymap)
result.save(disp)
# Save to file
fname = "vid_files/frames/FY{num:06d}.png".format(num=i)
result.save(fname)
img = vc.getImage()
i = i + 1
if img is None:
self.statusText.setText(str( "Status: Done"))
The section in above code which does the saving of frames:
while img is not None:
print "Frame Number: {0}".format(i)
result = unwarp(img, xmap, ymap)
result.save(disp)
# Save to file
fname = "vid_files/frames/FY{num:06d}.png".format(num=i)
result.save(fname)
img = vc.getImage()
i = i + 1
if img is None:
self.statusText.setText(str( "Status: Done"))
I want to know if there is anyway I can save the frames directly to a video( preferably mp4) with the frame rate and frame size of input video?
I can fetch the frame size and frame rate using ffprobe if needed.

how to append float numbers to a list in python

I am trying to compare one image with all images of another file , get the difference percentage and print file name of the least difference percentage .... if i try to append the output differences to a list ... i get an error saying " float values cannot be iterated".... this is what i have done so far ....
from itertools import izip
import os
import numpy as np
import cv2
from matplotlib import pyplot as plt
from PIL import Image
import math
res = 0
def take_and_save_picture(im_save):
'''Take a picture and save it
Args:
im_save: filepath where the image should be stored
'''
camera_port = 0
ramp_frames = 30
cap = cv2.VideoCapture(camera_port)
def get_image():
retval, im = cap.read()
return im
for i in xrange(ramp_frames):
temp = get_image()
print("Taking image...")
# Take the actual image we want to keep
camera_capture = get_image()
#im_save_tmp = im_save + '.jpg'
im_save_tmp = im_save
# A nice feature of the imwrite method is that it will automatically choose the
# correct format based on the file extension you provide. Convenient!
cv2.imwrite(im_save_tmp, camera_capture)
# You'll want to release the camera, otherwise you won't be able to create a new
# capture object until your script exits
# del(cap)
img1 = cv2.imread(im_save_tmp, 0)
edges = cv2.Canny(img1, 100, 200)
cv2.imwrite(im_save, edges)
cv2.waitKey(0)
cv2.destroyAllWindows()
def re(path1,path2):
#path1 = raw_input("Enter the path1:")
#path2 = raw_input("Enter the path2:")
i2= Image.open(path2)
listing = os.listdir(path1)
for file in listing:
i1 = Image.open(path1 + file)
assert i1.mode == i2.mode, "Different kinds of images."
assert i1.size == i2.size, "Different sizes."
pairs = izip(i1.getdata(), i2.getdata())
if len(i1.getbands()) == 1:
# for gray-scale jpegs
dif = sum(abs(p1-p2) for p1,p2 in pairs)
else:
dif = sum(abs(c1-c2) for p1,p2 in pairs for c1,c2 in zip(p1,p2))
ncomponents = i1.size[0] * i1.size[1] * 3
res = (dif / 255.0 * 100) / ncomponents
print "Difference (percentage):", res
def main():
capture_img = "/Users/Me/Documents/python programs/New/pro.png"
#img_to_compare = "/Users/Me/Documents/python programs/compare/img2.jpg"
take_and_save_picture(capture_img)
path1 = "/Users/Me/Documents/python programs/New/numbers1/"
path2 = "/Users/Me/Documents/python programs/New/pro.png"
re(path1,path2)
if __name__ == '__main__':
main()
the output is the difference
Difference (percentage): 2.52484809028
Difference (percentage): 2.64822048611
Difference (percentage): 2.64822048611
Difference (percentage): 3.55436197917
the values that i get in "res" have to be stored in a list and the minimum value should be found and printed.... please give me some code ... totally new to python ... thank you ...
You're code must be like this:
#######
list_dif = []
def re(path1,path2):
#path1 = raw_input("Enter the path1:")
#path2 = raw_input("Enter the path2:")
i2= Image.open(path2)
listing = os.listdir(path1)
for file in listing:
i1 = Image.open(path1 + file)
assert i1.mode == i2.mode, "Different kinds of images."
assert i1.size == i2.size, "Different sizes."
pairs = izip(i1.getdata(), i2.getdata())
if len(i1.getbands()) == 1:
# for gray-scale jpegs
dif = sum(abs(p1-p2) for p1,p2 in pairs)
else:
dif = sum(abs(c1-c2) for p1,p2 in pairs for c1,c2 in zip(p1,p2))
ncomponents = i1.size[0] * i1.size[1] * 3
#######
for n in range(ncomponents):
res = (dif / 255.0 * 100) / (ncomponents + 1)
list_dif.append(res)
print "Difference (percentage):", list_dif
Something like this?
def re(path1,path2):
#path1 = raw_input("Enter the path1:")
#path2 = raw_input("Enter the path2:")
i2= Image.open(path2)
listing = os.listdir(path1)
res = []
for file in listing:
i1 = Image.open(path1 + file)
assert i1.mode == i2.mode, "Different kinds of images."
assert i1.size == i2.size, "Different sizes."
pairs = izip(i1.getdata(), i2.getdata())
if len(i1.getbands()) == 1:
# for gray-scale jpegs
dif = sum(abs(p1-p2) for p1,p2 in pairs)
else:
dif = sum(abs(c1-c2) for p1,p2 in pairs for c1,c2 in zip(p1,p2))
ncomponents = i1.size[0] * i1.size[1] * 3
res.append((dif / 255.0 * 100) / ncomponents)
print "Difference (percentage):", res
minimum = min(res) # Find minimum value in res
print(minimum)

TypeError: argument 1 must be ImagingCore, not ImagingCore

Under the Windows I get this error. How to fix PIL?
This is error: TypeError: argument 1 must be ImagingCore, not ImagingCore
#!/usr/bin/python
## -*- coding: utf-8 -*-
from PIL import Image, ImageFont
import ImageDraw, StringIO, string
from random import *
from math import *
import os
SITE_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
class Captcha3d(object):
_hypot = 4
_xx, _yy = 35, 70
_CIMAGE = None
_CHARS = string.ascii_lowercase + string.ascii_uppercase + string.digits
_TEXT = ''
def __init__(self):
self._CIMAGE = Image.new("RGB", (self._yy * self._hypot, self._xx * self._hypot), (255,255,255))
self.generateCode()
self.render()
def imageColorAllocate(self, r,g,b):
hexchars = "0123456789ABCDEF"
hexcolor = hexchars[r / 16] + hexchars[r % 16] + hexchars[g / 16] + hexchars[g % 16] + hexchars[b / 16] + hexchars[b % 16]
return int(hexcolor, 16)
def generateCode(self):
chars = self._CHARS
self._TEXT = "".join(choice(chars) for x in range(randint(3, 3)))
def getText(self):
return self._TEXT
def getProection(self, x1,y1,z1):
x = x1 * self._hypot
y = z1 * self._hypot
z = -y1 * self._hypot
xx = 0.707106781187
xy = 0
xz = -0.707106781187
yx = 0.408248290464
yy = 0.816496580928
yz = 0.408248290464 # 1/sqrt(6)
cx = xx*x + xy*y + xz*z
cy = yx*x + yy*y + yz*z + 20*self._hypot
return [cx, cy]
def zFunction(self, x,y):
z = 2.6
if self._CIMAGE.getpixel((y/2,x/2)) == (0,0,0):
z = 0
if z != 0:
z += float(randint(0,60))/100
z += 1.4 * sin((x+self.startX)*pi/15) * sin((y+self.startY)*pi/15)
return z
def render(self):
fontSans = ImageFont.truetype(os.path.join(SITE_PATH, "data", "fonts", "FreeSans.ttf"), 14)
draw = ImageDraw.Draw(self._CIMAGE)
whiteColor = 'white'
draw.rectangle([0, 0, self._yy * self._hypot, self._xx * self._hypot], fill=whiteColor)
#textColor = 'black'
#imgtext = Image.open("i8n.png")
#self._CIMAGE.paste(imgtext, (0,0))
imgtext = Image.new("1", (self._yy * self._hypot, self._xx * self._hypot), (1))
drawtext = ImageDraw.Draw(imgtext)
drawtext.text((1,0), self._TEXT, font=fontSans, fill=0)
self._CIMAGE.paste(imgtext, (0,0))
#draw.text((2,0), self.text, font=fontSans, fill=textColor)
self.startX = randint(0,self._xx)
self.startY = randint(0,self._yy)
crd = {}
x = 0
while x < (self._xx+1):
y = 0
while y < (self._yy+1):
crd[str(x) + '&' + str(y)] = self.getProection(x,y,self.zFunction(x,y))
y += 1
x += 1
x = 0
while x < self._xx:
y = 0
while y < self._yy:
coord = []
coord.append((int(crd[str(x) + '&' + str(y)][0]),int(crd[str(x) + '&' + str(y)][1])))
coord.append((int(crd[str(x+1) + '&' + str(y)][0]),int(crd[str(x+1) + '&' + str(y)][1])))
coord.append((int(crd[str(x+1) + '&' + str(y+1)][0]),int(crd[str(x+1) + '&' + str(y+1)][1])))
coord.append((int(crd[str(x) + '&' + str(y+1)][0]),int(crd[str(x) + '&' + str(y+1)][1])))
c = int(self.zFunction(x,y)*32)
linesColor = (c,c,c)
draw.polygon(coord, fill=whiteColor, outline=linesColor)
#draw.polygon(coord, fill=whiteColor)
y += 1
x += 1
draw.rectangle([0, 0, self._xx, self._yy], fill=whiteColor)
#draw.text((2,0), self.text, font=fontSans, fill=textColor)
#imageString($this->image, 1, 3, 0, (microtime(true)-$this->time), $textColor);
del draw
#self._CIMAGE.save("image.png", "PNG")
return [self._CIMAGE, self._TEXT]
def main():
a = Captcha3d()
print a.getText()
if __name__ == '__main__':
main()
Also happens for me on OSX 10.6.8, Python 2.6.5. I think some class is getting dynamically imported twice.
Try changing
from PIL import Image, ImageFont
to
import Image, ImageFont
That worked for me.
In my case it solved the situation to also import ImageDraw from PIL
from PIL import ImageDraw
importing all PIL things directly from PIL should always work.
However, if you mix imports, as such,
from PIL import Image
import ImageDraw
This can lead to conflict between two un-identical PIL libraries.
This can happen if you have installed both PIL and Pillow
We should really always do,
from PIL import Image
from PIL import ImageDraw
etc.
I.e. be specific about which package to use.

Categories