PyGame Saving Webcam, the image is split - python

I am using pygame to capture an image, and I can't seem to get it right. Attached is the image, and as you can see the image is split down the middle.
Here is the sourcecode:
def getImg(fname):
pygame.camera.init()
cm = pygame.camera.list_cameras()
cam = pygame.camera.Camera(cm[0])
cam.start()
img = cam.get_image()
pygame.image.save(img,fname)
cam.stop()

Ok, so I found a work around that I am posting, since it may be useful in the future:
def saveImage(fname):
pygame.camera.init()
cam_list = pygame.camera.list_cameras()
cam = pygame.camera.Camera(cam_list[0])
cam.start()
## the duplicate get_image():
## the first one gets the camera working,
## the second captures a clean image
cam.get_image()
cam.get_image()
img = cam.get_image()
pygame.image.save(img,fname)
cam.stop()

Related

How to append an image to a video using OpenCV or FFMPEG or Moviepy or other libraries?

Do you know a library in Python to add a frame image to an existing video? The result video must have the same quality as the image.
I tried to use OpenCV to add google image: https://www.google.com/search?q=google&sxsrf=ALiCzsZhrdoHnOTmg0We4dxtguCqzma5Jg:1657603343101&source=lnms&tbm=isch&sa=X&ved=2ahUKEwiTh8bTzfL4AhWhplYBHfXNAKwQ_AUoAXoECAIQAw&biw=1492&bih=739&dpr=1.25#imgrc=PRtenhDnrVrfOM
But the quality decreases when the video elongates.
Here is the final result video : https://drive.google.com/file/d/1ArDvoX-kN9H_oLbACk3kU1Cid93SMczC/view?usp=sharing
Here is my code using OpenCV:
image = cv2.imread(path_image)
height, width, dimensions = image.shape
video = cv2.VideoCapture(path_video)
frames = []
while(True):
ret, frame = video.read()
if ret == True:
frames.append(frame)
# frame = frame.resize(frame, (width, height), fx=0, fy=0, interpolation = cv2.INTER_CUBIC)
# Press S on keyboard
# to stop the process
if cv2.waitKey(1) & 0xFF == ord('s'):
break
# Break the loop
else:
break
video2 = cv2.VideoWriter(path_video,cv2.VideoWriter_fourcc('M','J','P','G'), 30, (width, height))
for frame in frames:
video2.write(frame)
video2.write(image)
video2.release() # releasing the video generated
print("Added {}".format(image_name))
I hope to improve the quality of this video.
I guess a simple way to achieve that using moviepy would be the following:
from moviepy.editor import *
from PIL import Image
import numpy as np
maxsize = (target_width, target_height)
jpg_image = Image.open(path_to_the_image)
# modify the image's resolution to be the target one
jpg_image.thumbnail(maxsize, Image.ANTIALIAS)
jpg_image = np.array(jpg_image)
image = ImageClip(jpg_image).set_duration(target_duration)
clip = VideoFileClip(path_to_the_video)
video = concatenate([image, clip], method="compose")
video.write_videofile("output_example.mp4", fps=target_fps)
As long as you set the target resolution of the image to match the video's one you are set with just that.

How to reliably detect a barcode's 4 corners in real time video capture

I found a Stackoverflow answer for detecting barcode in an image. I am trying to apply the method in the Stackoverflow answer to realtime video capture because my current solution only detect barcodes on clean large surface. How can I apply the method to video capture Here is my code.
import cv2
import numpy as np
from pyzbar.pyzbar import decode
cap = cv2.VideoCapture(0)
cap.set(3,640)
cap.set(4,480)
while True:
success, img = cap.read()
for barcode in decode(img):
myData = barcode.data.decode('utf-8')
print(myData)
if myData in myDataList:
myOutput = 'Authorized'
myColor = (0,255,0)
else:
myOutput = 'Un-Authorized'
myColor = (0, 0, 255)
pts = np.array([barcode.polygon],np.int32)
pts = pts.reshape((-1,1,2))
cv2.polylines(img,[pts],True,myColor,5)
pts2 = barcode.rect
cv2.putText(img,myData,(pts2[0],pts2[1]),cv2.FONT_HERSHEY_SIMPLEX,
0.9,myColor,2)
cv2.imshow('Result',img)
cv2.waitKey(1)

My Screen Recorder built using OpenCV and PyAutoGUI records only one frame

I am building a Screen Recorder in Python using numpy, OpenCV and PyAutoGUI. I have used tkinter for GUI Purposes. The problem with my screen recorder is that it records only one frame when I click on the Record Screen Button and then the screen gets jammed and I can't do anything. Here is my code so far:
from tkinter import *
import cv2
import numpy as np
import pyautogui
resolution = (1366,768)
Specify video codec:
codec = cv2.VideoWriter_fourcc(*"XVID")
Specify name of Output file:
filename = "Recordings.avi"
Specify frames rate (we can choose any value and experiment with it):
fps = 30.0
Creating a VideoWriter object:
out = cv2.VideoWriter(filename, codec, fps, resolution)
def startRecording():
window.iconify()
while True:
img = pyautogui.screenshot()
# Convert the screenshot to a numpy array
frame = np.array(img)
# Convert it from BGR(Blue, Green, Red) to
# RGB(Red, Green, Blue)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Write it to the output file
out.write(frame)
def stopRecording():
cv2.destroyAllWindows()
out.release()
window.destroy()
window = Tk()
window.title("Screen Recorder")
window.geometry("400x150")
window.config(bg='pink')
recordButton = Button(window,text="Record(F9)",font=("Bell MT",20),width=20,command=startRecording)
recordButton.pack(pady=(10,0))
stopButton = Button(window,text="Stop(F10)",font=("Bell MT",20),width=20,command=stopRecording)
stopButton.pack(pady=(10,0))
mainloop()
You cannot do a blocking call in a button callback.
As you wrote it startRecording will never end and will hence block tkinter mainloop. The recording probably works but your UI becomes unresponsive.
Your best shot would be to schedule the recording (look for the after method): record one frame every x millisecond.
Here is a simplified example based on your original code (you need to complete it)
continueRecording = True # must be declared before stopRecording
window = Tk() # must be declared before recordOneFrame
def stopRecording():
global continueRecording
continueRecording = False
def recordOneFrame():
global continueRecording
img = pyautogui.screenshot()
# Convert the screenshot to a numpy array
frame = np.array(img)
# Convert it from BGR(Blue, Green, Red) to
# RGB(Red, Green, Blue)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Write it to the output file
out.write(frame)
if continueRecording:
window.after(round(1/25. * 1000),recordOneFrame)
def startRecording():
recordOneFrame()

Completly bright camera image with python/opencv (even for no light)

I want to use opencv to obtain images via an USB-camera (DMK 23UP1300). Unfortunately the images are nearly completely bright, the intensity values are all between 250 and 255, even if there is no light (tested for python2.7 and 3.7). Does anybody know what the problem could be? I also updated the drivers for the camera, no effect. I even set down the exposure time to the minimal possible value, what had no impact. Please see the attached code as a minimal example:
import cv2
class Camera:
def __init__(self):
self.initialize_value = 0
def GetImage(self):
if self.initialize_value == 0:
self.cap = cv2.VideoCapture(0)
self.cap.set(3,1280)
self.cap.set(4,1024)
self.cap.set(cv2.CAP_PROP_EXPOSURE,-13) # change exposure
self.initialize_value = 1
ret, im_orig = self.cap.read()
#cv2.imshow("Livestream_Window",im_orig)
self.im_main = cv2.cvtColor(im_orig, cv2.COLOR_BGR2GRAY)
print(self.im_main)
cv2.imshow("Livestream_Window",self.im_main)
cv2.waitKey(1)
if __name__ == "__main__":
cam=Camera() #initialize camera
ct=0
while 1:
print(ct)
ct=ct+1
cam.GetImage() #Options: Camera, Image, Video

my print("Found faces", str(len(faces))) not executed

I'm trying to detect multiple faces using opencv in python. I'm doing this in raspbian OS (raspberry Pi 3). Although the code is working properly, i.e, it's detecting a face and drawing a rectangular boundary around the face. It successfully saves the image in my local folder as well. The problem is : the statement print("Found faces", str(len(faces))) isn't working and the console remains blank. What am I missing here or where am I going wrong?
import io
import picamera
import cv2
import numpy
stream = io.BytesIO()
with picamera.PiCamera() as camera:
camera.resolution = (320, 240)
camera.hflip = True
camera.capture(stream, format='jpeg')
buff = numpy.fromstring(stream.getvalue(), dtype=numpy.uint8)
image = cv2.imdecode(buff, 1)
face_cascade = cv2.CascadeClassifier('face1.xml')
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.1, 5)
print("Found faces", str(len(faces)))
for (x,y,w,h) in faces:
cv2.rectangle(image,(x,y),(x+w,y+h),(255,255,0),2)
cv2.imwrite('result.jpg',image)

Categories