How can I make two windows show at the same time? - python

I am trying to make a game with pygame and face recognition. In order to do that I need to windows open. The one with the face recognizing one and one with the game one. But when I imported the face_recog.py , the game window wouldn't show until the face_recog would be closed. What should I do??
I tried importing in side the while of the game.py file
...python game.py code
import pygame
import face_recog
from background import *
FRAME=0
class Game:
def __init__(self):
self.width=900
self.height=600
self.screen=pygame.display.set_mode((self.width,self.height))
self.clock=pygame.time.Clock()
self.fire_rect=[530,40]
def main(self):
global FRAME
#sprite 그룹 생성
self.all_sprites=pygame.sprite.Group()
self.platforms=pygame.sprite.Group()
self.player_group=pygame.sprite.Group()
pygame.init()
#sprite 그룹에 sprite 추가
self.player1=Player((self.width/2,self.height/2),self)
self.all_sprites.add(self.player1)
self.player_group.add(self.player1)
#배경 벽 불러옴
for plat in PlatformList:
p=Platform(*plat)
self.all_sprites.add(p)
self.platforms.add(p)
#초기화
trap1=trap(self)
background_=background(self.width,self.height)
item_=item(self)
self.shot_=shot(self.screen,self)
item_.item_display(self.screen) #아이템은 사라질 수 있으므로 while 밖
while True:
#settings
time=self.clock.tick(60)
FRAME+=1
self.screen.fill((255,193,158))
#배경 그림
background_.background(self.screen)
#item_.item_display(self.screen)
item_.item_eat(self.screen)
trap1.trap_draw(self.screen,self.fire_rect)
self.shot_.shooting()
self.event()
self.all_sprites.update()
self.all_sprites.draw(self.screen)
pygame.display.flip()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit(0)
'''face_recog.py code
import sys
import os
import dlib
import glob
from skimage import io
import numpy as np
import cv2
from scipy.spatial import distance as dist #입술 사이 거리 계산 위해
import math
import pygame
from game import *
cap = cv2.VideoCapture(0) #동영상 입력부분
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi',fourcc, 20.0, (1280, 720))
predictor_path = 'shape_predictor_81_face_landmarks.dat'
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path) #LANDMARK PREDICTOR
(mStart,mEnd)=(48,54) #mouth의 시작점, 끝점 번호
MOUTH_AR_THRESH = 0.1
while(cap.isOpened()):
ret, frame = cap.read() #영상 읽어들임
frame = cv2.flip(frame, 1)
dets = detector(frame, 0) #rects
for k, d in enumerate(dets):
shape = predictor(frame, d)
landmarks = np.matrix([[p.x, p.y] for p in shape.parts()])
for num in range(shape.num_parts):
cv2.circle(frame, (shape.parts()[num].x, shape.parts()[num].y), 3, (0,255,0), -1)
A=dist.euclidean((shape.parts()[61].x,shape.parts()[61].y),(shape.parts()[67].x,shape.parts()[67].y))
B=dist.euclidean((shape.parts()[63].x,shape.parts()[63].y),(shape.parts()[65].x,shape.parts()[65].y))
C=dist.euclidean((shape.parts()[48].x,shape.parts()[48].y),(shape.parts()[54].x,shape.parts()[54].y))
mar=(A+B)/(2.0*C)
mar=round(mar,5)
if mar>MOUTH_AR_THRESH:
cv2.putText(frame,"MOUTH IS OPEN!",(30,60),cv2.FONT_HERSHEY_SIMPLEX,0.7,(0,0,255),2)
cv2.imshow('frame', frame) #윈도우 창의 제목
out.write(frame)
if cv2.waitKey(1) & 0xFF == ord('q'): #key입력을 기다림, q를 입력받으면 종료
print("q pressed")
break
cap.release()
out.release()
cv2.destroyAllWindows()

Your program is working inside a single thread. The first while loop keeps the current thread busy so that it can not handle the second loop (window) at the same time.
The solution is you must separate them into 2 different threads. You should create 2 classes, one class runs as main thread, the other runs in new thread.

Related

Python Screen Recorder nothing happens

I want to use ready made code to capture the game. But when I run it, nothing happens, but there are no errors, what could be the problem?
import numpy as np
from PIL import ImageGrab
import cv2
import time
def screen_record():
last_time = time.time()
while(True):
# 800x600 windowed mode for GTA 5, at the top left position of your main screen.
# 40 px accounts for title bar.
printscreen = np.array(ImageGrab.grab(bbox=(0,40,800,640)))
print('loop took {} seconds'.format(time.time()-last_time))
last_time = time.time()
cv2.imshow('window',cv2.cvtColor(printscreen, cv2.COLOR_BGR2RGB))
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
screen_record()

how to fix opencv video freeze few second when sound play?

i try create application motion detector, when detected will play alarm. the program is running but the problem is when sound play video will freeze few second. so how to fix that?
I was tired of looking for references, but couldn't find anything.
import cv2
import numpy as np
import os
from playsound import playsound
# Video Capture
capture = cv2.VideoCapture(1)
fgbg = cv2.createBackgroundSubtractorMOG2(50, 200, True)
# frame
frameCount = 0
while(1):
# frame value
ret, frame = capture.read()
# cek frame
if not ret:
break
frameCount += 1
# resize frame
resizedFrame = cv2.resize(frame, (0, 0), fx=0.50, fy=0.50)
# foreground
fgmask = fgbg.apply(resizedFrame)
# jumlah Pixel
count = np.count_nonzero(fgmask)
if (frameCount > 1 and count > 5000):
playsound('tune.mp3')
os.system('cls' if os.name == 'nt' else 'clear')
print('Frame: %d, Pixel Count: %d' % (frameCount, count))
cv2.imshow('Frame', resizedFrame)
cv2.imshow('Mask', fgmask)
k = cv2.waitKey(1)
if k == 27:
break
capture.release()
cv2.destroyAllWindows()
Unfortunately, opencv when You play a sound from playsound. Even if You try calling plahsound using threads it will still freeze.
However, You can use Pygame's Mixer, which doesn't interrupt opencv's window. You can play a sound by:
from pygame import mixer
mixer.init()
sound = mixer.Sound('mysound.ogg')
sound.play()
However, I think Pygame's mixer is incompatible with mp3 files. You might wanna use .wav instead, or convert your mp3 file to .ogg format.

uninterrupted while loop with python cv2.VideoCapture() and string input

I use this code to capture and display the input from my webcam.
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
while(True):
check, frame = cap.read()
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
I have a barcode scanner and want to check inside the while loop if a specific string gets scanned in.
input() interupts the stream from the webcam. I need something like cv2.waitKey() but for strings
while(True):
check, frame = cap.read()
cv2.imshow('frame',frame)
if cv2.waitString(barcode) == '123456':
# do something
if cv2.waitString(barcode) == '098765':
# do something else
I tried msvcrt, but to no avail. The stream continues but nothing gets printed.
if msvcrt.kbhit():
if msvcrt.getwche() == '280602017300':
print("Barcode scanned!")
Or is there a way to skip input() until something was entered?
UPDATE
Making some progress with the help of this post.
How to read keyboard-input?
I was able to update my code.
import threading
import queue
import time
import numpy as np
import cv2
def read_kbd_input(inputQueue):
print('Ready for keyboard input:')
while (True):
input_str = input()
inputQueue.put(input_str)
def main():
inputQueue = queue.Queue()
inputThread = threading.Thread(target=read_kbd_input, args=(inputQueue,), daemon=True)
inputThread.start()
cap = cv2.VideoCapture(0)
font = cv2.FONT_HERSHEY_DUPLEX
fontScale = 1
fontColor = (255,255,255)
lineType = 2
input_str = "test"
while (True):
check, frame = cap.read()
if (inputQueue.qsize() > 0):
input_str = inputQueue.get()
if (input_str == '280602017300'):
print("do something")
cv2.putText(frame, input_str, (10,30), font, fontScale, fontColor, lineType)
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
time.sleep(0.01)
print("End.")
if (__name__ == '__main__'):
main()
Now the only problem left is that my webcam stream is supposed to run in fullscreen. So the console will allways be in the background and therefore wont get the inputs form the keyboard or my barcode scanner.
need it the other way around

Reducing Lag in Pi Camera

I found a code that can scan a barcode using the Raspberry Pi camera V2.1.
It works as expected and can detect a barcode when I present it to the camera. But if I move the camera around a little, there is a lag in the video. I tried increasing the camera. framerate but that doesn't do anything. Neither does change the resolution. Even if I remove the dec() function, the video still looks laggy.
How can I improve the camera framerate so it does not lag?
Also the code opens up a window where I can see the video. For now it is useful for debugging, but I was wondering how I could stop the Pi from opening the video window later on?
from ftplib import FTP
from pyzbar.pyzbar import decode
import os, sys, cv2
import numpy as np
from picamera.array import PiRGBArray
from picamera import PiCamera
import imutils, time
detectedBarcode = False
def dec(frame):
global detectedBarcode
x=decode(frame)
for i in x:
detectedBarcode = True
(x, y, w, h) = i.rect
cv2.rectangle(frame,(x, y),(x + w, y + h),(0, 0, 255),2)
barcodeData = i.data.decode("utf-8")
barcodeType = i.type
print(barcodeData, type(barcodeData))
#sys.exit()
return(barcodeData,barcodeType,1)
return('','',0)
def cameraReader():
fourcc = cv2.VideoWriter_fourcc(*'X264')
camera=PiCamera()
camera.resolution=(1296,730)
camera.framerate = 30
rawCapture=PiRGBArray(camera)
cv2.namedWindow("QR Scanner",cv2.WINDOW_NORMAL)
global detectedBarcode
avg = None
for frame in camera.capture_continuous(rawCapture,format="bgr",use_video_port=False):
image=frame.array
cv2.line(image, (650, 0), (650, 1000), (0, 255,0), 2)
x,y,p=dec(image)
cv2.imshow("QR Scanner",image)
if cv2.waitKey(2) & 0xFF == ord('q'):
break
rawCapture.truncate(0)
#cap.release()
cv2.destroyAllWindows()
cameraReader()

How to use webcam as a screen of pygame?

I would like to blit images from webcam to a screen of pygame.
I'm using OS X.
I know pygame currently supports only Linux and v4l2 cameras,but,then how can we use webcam with pygame?
import cv2
import pygame
import numpy as np
pygame.init()
pygame.display.set_caption("OpenCV camera stream on Pygame")
surface = pygame.display.set_mode([1280,720])
#0 Is the built in camera
cap = cv2.VideoCapture(0)
#Gets fps of your camera
fps = cap.get(cv2.CAP_PROP_FPS)
print("fps:", fps)
#If your camera can achieve 60 fps
#Else just have this be 1-30 fps
cap.set(cv2.CAP_PROP_FPS, 60)
while True:
surface.fill([0,0,0])
success, frame = cap.read()
if not success:
break
#for some reasons the frames appeared inverted
frame = np.fliplr(frame)
frame = np.rot90(frame)
# The video uses BGR colors and PyGame needs RGB
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
surf = pygame.surfarray.make_surface(frame)
for event in pygame.event.get():
if event.type == pygame.KEYUP:
background_color = red
surface.fill(background_color)
pygame.display.update
end_time = self.time()
# Show the PyGame surface!
surface.blit(surf, (0,0))
pygame.display.flip()

Categories