Python Screen Recorder nothing happens - python

I want to use ready made code to capture the game. But when I run it, nothing happens, but there are no errors, what could be the problem?
import numpy as np
from PIL import ImageGrab
import cv2
import time
def screen_record():
last_time = time.time()
while(True):
# 800x600 windowed mode for GTA 5, at the top left position of your main screen.
# 40 px accounts for title bar.
printscreen = np.array(ImageGrab.grab(bbox=(0,40,800,640)))
print('loop took {} seconds'.format(time.time()-last_time))
last_time = time.time()
cv2.imshow('window',cv2.cvtColor(printscreen, cv2.COLOR_BGR2RGB))
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
screen_record()

Related

uninterrupted while loop with python cv2.VideoCapture() and string input

I use this code to capture and display the input from my webcam.
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
while(True):
check, frame = cap.read()
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
I have a barcode scanner and want to check inside the while loop if a specific string gets scanned in.
input() interupts the stream from the webcam. I need something like cv2.waitKey() but for strings
while(True):
check, frame = cap.read()
cv2.imshow('frame',frame)
if cv2.waitString(barcode) == '123456':
# do something
if cv2.waitString(barcode) == '098765':
# do something else
I tried msvcrt, but to no avail. The stream continues but nothing gets printed.
if msvcrt.kbhit():
if msvcrt.getwche() == '280602017300':
print("Barcode scanned!")
Or is there a way to skip input() until something was entered?
UPDATE
Making some progress with the help of this post.
How to read keyboard-input?
I was able to update my code.
import threading
import queue
import time
import numpy as np
import cv2
def read_kbd_input(inputQueue):
print('Ready for keyboard input:')
while (True):
input_str = input()
inputQueue.put(input_str)
def main():
inputQueue = queue.Queue()
inputThread = threading.Thread(target=read_kbd_input, args=(inputQueue,), daemon=True)
inputThread.start()
cap = cv2.VideoCapture(0)
font = cv2.FONT_HERSHEY_DUPLEX
fontScale = 1
fontColor = (255,255,255)
lineType = 2
input_str = "test"
while (True):
check, frame = cap.read()
if (inputQueue.qsize() > 0):
input_str = inputQueue.get()
if (input_str == '280602017300'):
print("do something")
cv2.putText(frame, input_str, (10,30), font, fontScale, fontColor, lineType)
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
time.sleep(0.01)
print("End.")
if (__name__ == '__main__'):
main()
Now the only problem left is that my webcam stream is supposed to run in fullscreen. So the console will allways be in the background and therefore wont get the inputs form the keyboard or my barcode scanner.
need it the other way around

What is the most efficient way to capture screen in python using modules eg PIL or cv2? because It takes up a lot of ram

What is the most efficient way to capture screen in python using modules eg PIL or cv2? Because It takes up a lot of ram.
I wanted to teach AI to play dino game of Chrome through screen scraping and neat but it is way to slow...
I have tried:
import numpy as np
from PIL import ImageGrab
import cv2
import time
last_time = time.time()
while True:
printscreen_pil = ImageGrab.grab(bbox= (0, 40, 800, 640))
printscreen_numpy = np.array(printscreen_pil.getdata(), dtype = 'uint8').reshape((printscreen_pil.size[1], printscreen_pil.size[0], 3))
print(f'the loop took {time.time() - last_time} seconds')
last_time = time.time()
cv2.imshow('window', printscreen_numpy)
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
>
# average time = the loop took 2.068769931793213 seconds
You can use mss which is an "An ultra fast cross-platform multiple screenshots module in pure python".
For example:
import time
import cv2
import numpy as np
from mss import mss
start_time = time.time()
mon = {'top': 200, 'left': 200, 'width': 200, 'height': 200}
with mss() as sct:
while True:
last_time = time.time()
img = sct.grab(mon)
print('The loop took: {0}'.format(time.time()-last_time))
cv2.imshow('test', np.array(img))
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
Result:
The loop took: 0.024120092391967773
Output:
The result is faster 100x than your current result.

How can I make two windows show at the same time?

I am trying to make a game with pygame and face recognition. In order to do that I need to windows open. The one with the face recognizing one and one with the game one. But when I imported the face_recog.py , the game window wouldn't show until the face_recog would be closed. What should I do??
I tried importing in side the while of the game.py file
...python game.py code
import pygame
import face_recog
from background import *
FRAME=0
class Game:
def __init__(self):
self.width=900
self.height=600
self.screen=pygame.display.set_mode((self.width,self.height))
self.clock=pygame.time.Clock()
self.fire_rect=[530,40]
def main(self):
global FRAME
#sprite 그룹 생성
self.all_sprites=pygame.sprite.Group()
self.platforms=pygame.sprite.Group()
self.player_group=pygame.sprite.Group()
pygame.init()
#sprite 그룹에 sprite 추가
self.player1=Player((self.width/2,self.height/2),self)
self.all_sprites.add(self.player1)
self.player_group.add(self.player1)
#배경 벽 불러옴
for plat in PlatformList:
p=Platform(*plat)
self.all_sprites.add(p)
self.platforms.add(p)
#초기화
trap1=trap(self)
background_=background(self.width,self.height)
item_=item(self)
self.shot_=shot(self.screen,self)
item_.item_display(self.screen) #아이템은 사라질 수 있으므로 while 밖
while True:
#settings
time=self.clock.tick(60)
FRAME+=1
self.screen.fill((255,193,158))
#배경 그림
background_.background(self.screen)
#item_.item_display(self.screen)
item_.item_eat(self.screen)
trap1.trap_draw(self.screen,self.fire_rect)
self.shot_.shooting()
self.event()
self.all_sprites.update()
self.all_sprites.draw(self.screen)
pygame.display.flip()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit(0)
'''face_recog.py code
import sys
import os
import dlib
import glob
from skimage import io
import numpy as np
import cv2
from scipy.spatial import distance as dist #입술 사이 거리 계산 위해
import math
import pygame
from game import *
cap = cv2.VideoCapture(0) #동영상 입력부분
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi',fourcc, 20.0, (1280, 720))
predictor_path = 'shape_predictor_81_face_landmarks.dat'
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path) #LANDMARK PREDICTOR
(mStart,mEnd)=(48,54) #mouth의 시작점, 끝점 번호
MOUTH_AR_THRESH = 0.1
while(cap.isOpened()):
ret, frame = cap.read() #영상 읽어들임
frame = cv2.flip(frame, 1)
dets = detector(frame, 0) #rects
for k, d in enumerate(dets):
shape = predictor(frame, d)
landmarks = np.matrix([[p.x, p.y] for p in shape.parts()])
for num in range(shape.num_parts):
cv2.circle(frame, (shape.parts()[num].x, shape.parts()[num].y), 3, (0,255,0), -1)
A=dist.euclidean((shape.parts()[61].x,shape.parts()[61].y),(shape.parts()[67].x,shape.parts()[67].y))
B=dist.euclidean((shape.parts()[63].x,shape.parts()[63].y),(shape.parts()[65].x,shape.parts()[65].y))
C=dist.euclidean((shape.parts()[48].x,shape.parts()[48].y),(shape.parts()[54].x,shape.parts()[54].y))
mar=(A+B)/(2.0*C)
mar=round(mar,5)
if mar>MOUTH_AR_THRESH:
cv2.putText(frame,"MOUTH IS OPEN!",(30,60),cv2.FONT_HERSHEY_SIMPLEX,0.7,(0,0,255),2)
cv2.imshow('frame', frame) #윈도우 창의 제목
out.write(frame)
if cv2.waitKey(1) & 0xFF == ord('q'): #key입력을 기다림, q를 입력받으면 종료
print("q pressed")
break
cap.release()
out.release()
cv2.destroyAllWindows()
Your program is working inside a single thread. The first while loop keeps the current thread busy so that it can not handle the second loop (window) at the same time.
The solution is you must separate them into 2 different threads. You should create 2 classes, one class runs as main thread, the other runs in new thread.

Save image stream with timestamp using OpenCV Python

I am using open CV, Python to save same camera Images in jpg and png format.
I am using timestamp to save the images in sequence. My code sample is following. But the problem is it only saves one image every time I execute. What will be the best solution to save the image stream with timestamp
import numpy as np
import cv2
import time
camera = cv2.VideoCapture(0)
time = time.time() #timestamp
def saveJpgImage(frame):
#process image
img_name = "opencv_frame_{}.jpg".format(time)
cv2.imwrite(img_name, frame)
def savePngImage():
#process image
img_name = "opencv_frame_{}.png".format(time)
cv2.imwrite(img_name, frame)
def main():
while True:
ret, frame = cam.read()
cv2.imshow("Camera Images", frame)
if not ret:
break
k = cv2.waitKey(1)
if k%256 == 27:
# ESC pressed
print("Escape hit, closing...")
break
elif k%256 == 32:
saveJpgImage(frame)
savePngImage(frame)
if __name__ == '__main__':
main()
You're testing when a key is pressed and calling the save function when it's pressed. If you want to call a video loop when the key is pressed please do so! (don't forget to include the escape method!)

Screen Capture with OpenCV and Python-2.7

I'm using Python 2.7 and OpenCV 2.4.9.
I need to capture the current frame that is being shown to the user and load it as an cv::Mat object in Python.
Do you guys know a fast way to do it recursively?
I need something like what's done in the example below, that captures Mat frames from a webcam recursively:
import cv2
cap = cv2.VideoCapture(0)
while(cap.isOpened()):
ret, frame = cap.read()
cv2.imshow('WindowName', frame)
if cv2.waitKey(25) & 0xFF == ord('q'):
cap.release()
cv2.destroyAllWindows()
break
In the example it's used the VideoCapture class to work with the captured image from the webcam.
With VideoCapture.read() a new frame is always being readed and stored into a Mat object.
Could I load a "printscreens stream" into a VideoCapture object? Could I create a streaming of my computer's screen with OpenCV in Python, without having to save and delete lots of .bmp files per second?
I need this frames to be Mat objects or NumPy arrays, so I can perform some Computer Vision routines with this frames in real time.
That's a solution code I've written using #Raoul tips.
I used PIL ImageGrab module to grab the printscreen frames.
import numpy as np
from PIL import ImageGrab
import cv2
while(True):
printscreen_pil = ImageGrab.grab()
printscreen_numpy = np.array(printscreen_pil.getdata(),dtype='uint8')\
.reshape((printscreen_pil.size[1],printscreen_pil.size[0],3))
cv2.imshow('window',printscreen_numpy)
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
I had frame rate problems with other solutions, mss solve them.
import numpy as np
import cv2
from mss import mss
from PIL import Image
mon = {'top': 160, 'left': 160, 'width': 200, 'height': 200}
sct = mss()
while 1:
sct.get_pixels(mon)
img = Image.frombytes('RGB', (sct.width, sct.height), sct.image)
cv2.imshow('test', np.array(img))
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
This is the updated answer for the answer by #Neabfi
import time
import cv2
import numpy as np
from mss import mss
mon = {'top': 160, 'left': 160, 'width': 200, 'height': 200}
with mss() as sct:
# mon = sct.monitors[0]
while True:
last_time = time.time()
img = sct.grab(mon)
print('fps: {0}'.format(1 / (time.time()-last_time)))
cv2.imw('test', np.array(img))
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
And to save to a mp4 video
import time
import cv2
import numpy as np
from mss import mss
def record(name):
with mss() as sct:
# mon = {'top': 160, 'left': 160, 'width': 200, 'height': 200}
mon = sct.monitors[0]
name = name + '.mp4'
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
desired_fps = 30.0
out = cv2.VideoWriter(name, fourcc, desired_fps,
(mon['width'], mon['height']))
last_time = 0
while True:
img = sct.grab(mon)
# cv2.imshow('test', np.array(img))
if time.time() - last_time > 1./desired_fps:
last_time = time.time()
destRGB = cv2.cvtColor(np.array(img), cv2.COLOR_BGRA2BGR)
out.write(destRGB)
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
record("Video")
Here's the Python 3 Implementation
This Function finds the Application in the List of Running Applications:
def capture_dynamic():
toplist, winlist = [], []
def enum_cb(hwnd, results):
winlist.append((hwnd, win32gui.GetWindowText(hwnd)))
win32gui.EnumWindows(enum_cb, toplist)
wnd = [(hwnd, title) for hwnd, title in winlist if 'spotify' in title.lower()]
if wnd:
wnd = wnd[0]
hwnd = wnd[0]
bbox = win32gui.GetWindowRect(hwnd)
img = ImageGrab.grab(bbox)
return img
else:
return None
This Function Displays the Images until the Letter 'q' is pressed:
import cv2
import numpy as np
while(True):
# Dynamic Version
screen_grab = capture_dynamic()
if(screen_grab == None):
print("No Window Found! Please Try Again")
break
screen_grab = np.array(screen_grab)
cv2.imshow('window',cv2.cvtColor(screen_grab, cv2.COLOR_BGR2RGB))
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
Make sure the Application you want to capture should be in the Foreground and not behind any other Application
Please Upvote!

Categories