The cheat protection of the game I play ensures that the following codes do not work. When I use different screen capture methods, I encounter a black screen and I tried many alternatives, but I could not get a result. Do you have any solution or alternative solution for this error?
This Error output:
import cv2
import numpy as np
import win32gui, win32ui, win32con
class WindowCapture:
w = 0
h = 0
hwnd = None
cropped_x = 0
cropped_y = 0
offset_x = 0
offset_y = 0
def __init__(self, window_name):
self.hwnd = win32gui.FindWindow(None, window_name)
if not self.hwnd:
raise Exception('Window not found: {}'.format(window_name))
window_rect = win32gui.GetWindowRect(self.hwnd)
self.w = window_rect[2] - window_rect[0]
self.h = window_rect[3] - window_rect[1]
border_pixels = 1 # <-- Change this
titlebar_pixels = 10 # <-- Change this
self.w = self.w - border_pixels # <-- Change this
self.h = self.h - titlebar_pixels - border_pixels
self.cropped_x = border_pixels
self.cropped_y = titlebar_pixels
def get_screenshot(self):
wDC = win32gui.GetWindowDC(self.hwnd)
dcObj = win32ui.CreateDCFromHandle(wDC)
cDC = dcObj.CreateCompatibleDC()
dataBitMap = win32ui.CreateBitmap()
dataBitMap.CreateCompatibleBitmap(dcObj, self.w, self.h)
cDC.SelectObject(dataBitMap)
cDC.BitBlt((0, 0), (self.w, self.h), dcObj, (self.cropped_x, self.cropped_y), win32con.SRCCOPY)
signedIntsArray = dataBitMap.GetBitmapBits(True)
img = np.fromstring(signedIntsArray, dtype='uint8')
img.shape = (self.h, self.w, 4)
dcObj.DeleteDC()
cDC.DeleteDC()
win32gui.ReleaseDC(self.hwnd, wDC)
win32gui.DeleteObject(dataBitMap.GetHandle())
img = img[...,:3]
img = np.ascontiguousarray(img)
return img
def list_window_names(self):
def winEnumHandler(hwnd, ctx):
if win32gui.IsWindowVisible(hwnd):
print(hex(hwnd), win32gui.GetWindowText(hwnd))
win32gui.EnumWindows(winEnumHandler, None)
def get_screen_position(self, pos):
return (pos[0] + self.offset_x, pos[1] + self.offset_y)
wincap = WindowCapture("Window Name")
screen = wincap.get_screenshot()
cv2.imshow("", screen)
cv2.waitKey(0)
Related
Im trying to turn my screen capture into a thread. But I dont understand the error, new to threads. Also any idea why I get a random still image only in return or blank screen when I initialize WindowCapture() with a window name.
main.py
wincap = WindowCapture()
wincap.start()
windowcapture.py
import numpy as np
import win32gui, win32ui, win32con
from threading import Thread, Lock
class WindowCapture:
# threading properties
stopped = True
lock = None
screenshot = None
# properties
w = 0
h = 0
hwnd = None
cropped_x = 0
cropped_y = 0
offset_x = 0
offset_y = 0
# constructor
def __init__(self, window_name=None):
# create a thread lock object
self.lock = Lock()
# find the handle for the window we want to capture.
# if no window name is given, capture the entire screen
if window_name is None:
self.hwnd = win32gui.GetDesktopWindow()
else:
self.hwnd = win32gui.FindWindow(None, window_name)
if not self.hwnd:
raise Exception('Window not found: {}'.format(window_name))
# get the window size
window_rect = win32gui.GetWindowRect(self.hwnd)
self.w = window_rect[2] - window_rect[0]
self.h = window_rect[3] - window_rect[1]
# account for the window border and titlebar and cut them off
border_pixels = 8
titlebar_pixels = 30
self.w = self.w - (border_pixels * 2)
self.h = self.h - titlebar_pixels - border_pixels
self.cropped_x = border_pixels
self.cropped_y = titlebar_pixels
# set the cropped coordinates offset so we can translate screenshot
# images into actual screen positions
self.offset_x = window_rect[0] + self.cropped_x
self.offset_y = window_rect[1] + self.cropped_y
def get_screenshot(self):
# get the window image data
wDC = win32gui.GetWindowDC(self.hwnd)
dcObj = win32ui.CreateDCFromHandle(wDC)
cDC = dcObj.CreateCompatibleDC()
dataBitMap = win32ui.CreateBitmap()
dataBitMap.CreateCompatibleBitmap(dcObj, self.w, self.h)
cDC.SelectObject(dataBitMap)
cDC.BitBlt((0, 0), (self.w, self.h), dcObj, (self.cropped_x, self.cropped_y), win32con.SRCCOPY)
# convert the raw data into a format opencv can read
#dataBitMap.SaveBitmapFile(cDC, 'debug.bmp')
signedIntsArray = dataBitMap.GetBitmapBits(True)
img = np.fromstring(signedIntsArray, dtype='uint8')
img.shape = (self.h, self.w, 4)
# free resources
dcObj.DeleteDC()
cDC.DeleteDC()
win32gui.ReleaseDC(self.hwnd, wDC)
win32gui.DeleteObject(dataBitMap.GetHandle())
# drop the alpha channel, or cv.matchTemplate() will throw an error like:
# error: (-215:Assertion failed) (depth == CV_8U || depth == CV_32F) && type == _templ.type()
# && _img.dims() <= 2 in function 'cv::matchTemplate'
img = img[...,:3]
# make image C_CONTIGUOUS to avoid errors that look like:
# File ... in draw_rectangles
# TypeError: an integer is required (got type tuple)
# see the discussion here:
# https://github.com/opencv/opencv/issues/14866#issuecomment-580207109
img = np.ascontiguousarray(img)
return img
# find the name of the window you're interested in.
# once you have it, update window_capture()
# https://stackoverflow.com/questions/55547940/how-to-get-a-list-of-the-name-of-every-open-window
#staticmethod
def list_window_names():
def winEnumHandler(hwnd, ctx):
if win32gui.IsWindowVisible(hwnd):
print(hex(hwnd), win32gui.GetWindowText(hwnd))
win32gui.EnumWindows(winEnumHandler, None)
# translate a pixel position on a screenshot image to a pixel position on the screen.
# pos = (x, y)
# WARNING: if you move the window being captured after execution is started, this will
# return incorrect coordinates, because the window position is only calculated in
# the __init__ constructor.
def get_screen_position(self, pos):
return (pos[0] + self.offset_x, pos[1] + self.offset_y)
# threading methods
def start(self):
self.stopped = False
t = Thread(target=self.run)
t.start()
def stop(self):
self.stopped = True
def run(self):
# TODO: you can write your own time/iterations calculation to determine how fast this is
while not self.stopped:
# get an updated image of the game
screenshot = self.get_screenshot()
# lock the thread while updating the results
self.lock.acquire()
self.screenshot = screenshot
self.lock.release()
I wrote Dino game with Arcade in Python. To move the dinosaur, I use mediapipe hand detection with a webcam. The problem is that I can not open both the webcam and the game window together.
To solve this problem I wanted to use the threading library. But the Game class had to inherit from this library, which I could not do because it should be inherit from arcade library.
please guide me. Thank you
class Game(arcade.Window):
def __init__(self):
self.w = 900
self.h = 400
self.msec = 0
self.gravity = 0.5
super().__init__(self.w , self.h ,"Dino")
arcade.set_background_color(arcade.color.WHITE)
self.dino = Dino()
self.grounds = arcade.SpriteList()
self.cap = cv2.VideoCapture(0) # Use the webcam to detect hand movements
self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands()
self.mpDraw = mp.solutions.drawing_utils
for i in range(0, self.w + 132, 132):
ground = Ground(i, 50)
self.grounds.append(ground)
self.physics_engine = arcade.PhysicsEnginePlatformer(self.dino, self.grounds, self.gravity)
def on_draw(self):
arcade.start_render()
for ground in self.grounds:
ground.draw()
self.dino.draw()
def on_update(self, delta_time: float):
self.physics_engine.update()
self.dino.update_animation()
self.dino.center_x = 200
self.msec += 0.5
self.dino.show_walking(self.msec)
# ___ start Recognize hand postures ___
success,img = self.cap.read()
imgRGB = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
results = self.hands.process(imgRGB)
lmList = []
if results.multi_hand_landmarks:
for handlandmark in results.multi_hand_landmarks:
for id,lm in enumerate(handlandmark.landmark):
h,w,_ = img.shape
cx,cy = int(lm.x*w),int(lm.y*h)
lmList.append([id,cx,cy])
self.mpDraw.draw_landmarks(img,handlandmark,self.mpHands.HAND_CONNECTIONS)
# Detect fisted hand
if lmList != []:
x1,y1 = lmList[4][1], lmList[4][2]
x2,y2 = lmList[8][1], lmList[8][2]
x3,y3 = lmList[12][1], lmList[12][2]
x4,y4 = lmList[16][1], lmList[16][2]
x5,y5 = lmList[20][1], lmList[20][2]
length54 = hypot(x5-x4 , y5-y4)
length43 = hypot(x4-x3 , y4-y3)
length32 = hypot(x3-x2 , y3-y2)
length21 = hypot(x2-x1 , y2-y1)
# ___ end Recognize hand postures ___
# Moving the dinosaur
if length21 < 50 and length32 < 50 and length43 < 50 and length54 < 50:
if self.physics_engine.can_jump():
self.dino.change_y = 15
cv2.imshow('Image',img)
for ground in self.grounds:
if ground.center_x < 0:
self.grounds.remove(ground)
self.grounds.append(Ground(self.w + 132 ,50))
class Ground(arcade.Sprite):
def __init__(self, width, height):
super().__init__()
self.picture = random.choice(['img/ground-0.png','img/ground-1.png', 'img/ground-2.png', 'img/ground-3.png', 'img/ground-4.png', 'img/ground-5.png', 'img/ground-6.png'])
self.texture = arcade.load_texture(self.picture)
self.center_x = width
self.center_y = height
self.change_x = -6
self.change_y = 0
self.width = 132
self.height = 56
class Dino(arcade.AnimatedWalkingSprite):
def __init__(self):
super().__init__()
self.walk_right_textures = [arcade.load_texture('img/dino-walk-0.png'), arcade.load_texture('img/dino-walk-1.png')]
self.walk_down_textures = [arcade.load_texture('img/dino-down-0.png'), arcade.load_texture('img/dino-down-1.png')]
self.walk_up_textures = [arcade.load_texture('img/dino-walk-1.png')]
self.walk_down_textures = [arcade.load_texture('img/dino-walk-0.png')]
self.center_x = 200
self.center_y = 233
self.change_x = 1
self.change_y = 0
self.scale = 0.3
self.bent = 0
def show_walking(self, s):
if s % 2 == 0:
self.texture = arcade.load_texture('img/dino-walk-0.png')
elif s % 3 == 0:
self.texture = arcade.load_texture('img/dino-walk-1.png')
if __name__ == '__main__':
game = Game()
arcade.run()
You can inherit your class from arcade.Window and add mediapipe code within. Simple example:
import cv2
import arcade
import mediapipe as mp
class Game(arcade.Window):
def __init__(self):
super().__init__(400, 300)
self.x = 20
self.cap = cv2.VideoCapture(0)
self.hands = mp.solutions.hands.Hands(model_complexity=0, min_detection_confidence=0.5, min_tracking_confidence=0.5)
def on_draw(self):
arcade.start_render()
arcade.draw_circle_filled(center_x=self.x, center_y=150, radius=20, color=arcade.color.RED)
arcade.draw_text(text='Move circle with a hand!', start_x=200, start_y=250, color=arcade.color.GREEN, font_size=24, anchor_x='center')
def on_update(self, delta_time):
_, image = self.cap.read()
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, (400, 300))
results = self.hands.process(image)
cv2.imshow('Game', cv2.flip(image, 1))
if results.multi_hand_landmarks:
self.x += 5
Game()
arcade.run()
Output:
I'm using python 3.9.6 and OpenCV 4.5.1
I'm trying to detect an objects on a template. My template is a real-time feed of my monitor and my objects are jpg's.
The issue: When I crop my template to speed up detection my mouse starts clicking in the wrong location.
This only happens after I've cropped my template. I think it's because I'm cropping my template at the wrong time in my script. My full monitor is (0 , 0, 1920, 1080) but I only want to capture [220:900, 270:1590]
I've followed the OpenCV documentation and a few online tutorials so far but I'm now stuck.
How do I click on img (third code block) rather than an incorrect off-set caused by cropping my template incorrectly?
I'm using win32gui to grab my template:
import numpy as np
import win32gui, win32ui, win32con
class WindowCapture:
# properties
w = 0
h = 0
hwnd = None
cropped_x = 0
cropped_y = 0
offset_x = 0
offset_y = 0
# constructor
def __init__(self, window_name=None):
# find the handle for the window we want to capture.
# if no window name is given, capture the entire screen
if window_name is None:
self.hwnd = win32gui.GetDesktopWindow()
else:
self.hwnd = win32gui.FindWindow(None, window_name)
if not self.hwnd:
raise Exception('Window not found: {}'.format(window_name))
# get the window size
window_rect = win32gui.GetWindowRect(self.hwnd)
self.w = window_rect[2] - window_rect[0]
self.h = window_rect[3] - window_rect[1]
# account for the window border and titlebar and cut them off
border_pixels = 0
titlebar_pixels = 0
self.w = self.w - (border_pixels * 2)
self.h = self.h - titlebar_pixels - border_pixels
self.cropped_x = border_pixels
self.cropped_y = titlebar_pixels
# set the cropped coordinates offset so we can translate screenshot
# images into actual screen positions
self.offset_x = window_rect[0] + self.cropped_x
self.offset_y = window_rect[1] + self.cropped_y
def get_screenshot(self):
# get the window image data
wDC = win32gui.GetWindowDC(self.hwnd)
dcObj = win32ui.CreateDCFromHandle(wDC)
cDC = dcObj.CreateCompatibleDC()
dataBitMap = win32ui.CreateBitmap()
dataBitMap.CreateCompatibleBitmap(dcObj, self.w, self.h)
cDC.SelectObject(dataBitMap)
cDC.BitBlt((0, 0), (self.w, self.h), dcObj, (self.cropped_x, self.cropped_y), win32con.SRCCOPY)
# convert the raw data into a format opencv can read
# dataBitMap.SaveBitmapFile(cDC, 'debug.bmp')
signedIntsArray = dataBitMap.GetBitmapBits(True)
img = np.fromstring(signedIntsArray, dtype='uint8')
img.shape = (self.h, self.w, 4)
# free resources
dcObj.DeleteDC()
cDC.DeleteDC()
win32gui.ReleaseDC(self.hwnd, wDC)
win32gui.DeleteObject(dataBitMap.GetHandle())
img = img[...,:3]
img = np.ascontiguousarray(img)
return img
#staticmethod
def list_window_names():
def winEnumHandler(hwnd, ctx):
if win32gui.IsWindowVisible(hwnd):
print(hex(hwnd), win32gui.GetWindowText(hwnd))
win32gui.EnumWindows(winEnumHandler, None)
And OpenCV and numpy for my object detection:
import cv2 as cv
import numpy as np
class Vision:
# properties
needle_img = None
needle_w = 0
needle_h = 0
method = None
# constructor
def __init__(self, needle_img_path, method=cv.TM_CCORR_NORMED):
self.needle_img = cv.imread(needle_img_path, cv.IMREAD_UNCHANGED)
# Save the dimensions of the needle image
self.needle_w = self.needle_img.shape[1]
self.needle_h = self.needle_img.shape[0]
# There are 6 methods to choose from:
# TM_CCOEFF, TM_CCOEFF_NORMED, TM_CCORR, TM_CCORR_NORMED, TM_SQDIFF, TM_SQDIFF_NORMED
self.method = method
def find(self, haystack_img, threshold=0.5, debug_mode=None):
# run the OpenCV algorithm
result = cv.matchTemplate(haystack_img, self.needle_img, self.method)
# Get the all the positions from the match result that exceed our threshold
locations = np.where(result >= threshold)
locations = list(zip(*locations[::-1]))
rectangles = []
for loc in locations:
rect = [int(loc[0]), int(loc[1]), self.needle_w, self.needle_h]
# Add every box to the list twice in order to retain single (non-overlapping) boxes
rectangles.append(rect)
rectangles.append(rect)
# Apply group rectangles
rectangles, weights = cv.groupRectangles(rectangles, groupThreshold=1, eps=0.5)
points = []
if len(rectangles):
line_color = (0, 255, 0)
line_type = cv.LINE_4
marker_color = (255, 0, 255)
marker_type = cv.MARKER_CROSS
# Loop over all the rectangles
for (x, y, w, h) in rectangles:
# Determine the center position
center_x = x + int(w/2)
center_y = y + int(h/2)
# Save the points
points.append((center_x, center_y))
if debug_mode == 'rectangles':
# Determine the box position
top_left = (x, y)
bottom_right = (x + w, y + h)
# Draw the box
cv.rectangle(haystack_img, top_left, bottom_right, color=line_color,
lineType=line_type, thickness=2)
elif debug_mode == 'points':
# Draw the center point
cv.drawMarker(haystack_img, (center_x, center_y),
color=marker_color, markerType=marker_type,
markerSize=40, thickness=2)
############ DISPLAYS MATCHES #############
if debug_mode:
cv.imshow('Matches', haystack_img)
return points
And then passing in both variables in a separate script here:
import cv2 as cv
import pyautogui as py
from windowcapture import WindowCapture
from vision import Vision
# initialize the WindowCapture class
# leave blank to capture the whole screen
haystack = WindowCapture()
# initialize the Vision class
needle = Vision('needle.jpg')
while(True):
# get an updated image of the game
screenshot = template.get_screenshot()
screenshotCropped = screenshot[220:900, 270:1590]
img = needle.find(screenshotCropped, 0.85, 'points')
if img:
py.moveTo(img[0])
The line causing the issue is: screenshotCropped = screenshot[220:900, 270:1590] If it's removed I click on the object correctly.
I also tried adding border_pixels and titlebar_pixels to allow me to crop directly from WindowCapture but I run into the same issue detailed above.
If I understand your code correctly, when you crop the image, you're not (yet) accounting for the X/Y offset introduced through that crop.
If I understand your example correctly, your code
screenshotCropped = screenshot[220:900, 270:1590]
is cropping from 220-900 along the y-axis (height) and 270-1590 along the x-axis (width), yes? If so, try
x_0, x_1 = 270,1590
y_0, y_1 = 220,900
screenshotCropped = screenshot[y_0:y_1, x_0:x_1]
...
if img:
x_coord = img[0][0] + x_0
y_coord = img[0][1] + y_0
py.moveTo(x_coord,y_coord)
If your cropping region changes, update your (x_0, x_1, y_0, y_1) values accordingly (in both the crop operation and the py.moveTo operation)?
I use opencv for automating games therefore i need to take screen shots of what is going on screen in real time. I use pywin32 for it while I am on windows.
Is there a way to get similar effect on macos? or any other alternatives
class WindowCapture:
# properties
w = 0
h = 0
hwnd = None
cropped_x = 0
cropped_y = 0
offset_x = 0
offset_y = 0
# constructor
def __init__(self, window_name):
# find the handle for the window we want to capture
self.hwnd = win32gui.FindWindow(None, window_name)
if not self.hwnd:
raise Exception('Window not found: {}'.format(window_name))
# get the window size
window_rect = win32gui.GetWindowRect(self.hwnd)
self.w = window_rect[2] - window_rect[0]
self.h = window_rect[3] - window_rect[1]
# account for the window border and titlebar and cut them off
border_pixels = 8
titlebar_pixels = 30
self.w = self.w - (border_pixels * 2)
self.h = self.h - titlebar_pixels - border_pixels
self.cropped_x = border_pixels
self.cropped_y = titlebar_pixels
# set the cropped coordinates offset so we can translate screenshot
# images into actual screen positions
self.offset_x = window_rect[0] + self.cropped_x
self.offset_y = window_rect[1] + self.cropped_y
def get_screenshot(self):
# get the window image data
wDC = win32gui.GetWindowDC(self.hwnd)
dcObj = win32ui.CreateDCFromHandle(wDC)
cDC = dcObj.CreateCompatibleDC()
dataBitMap = win32ui.CreateBitmap()
dataBitMap.CreateCompatibleBitmap(dcObj, self.w, self.h)
cDC.SelectObject(dataBitMap)
cDC.BitBlt((0, 0), (self.w, self.h), dcObj, (self.cropped_x, self.cropped_y), win32con.SRCCOPY)
# convert the raw data into a format opencv can read
#dataBitMap.SaveBitmapFile(cDC, 'debug.bmp')
signedIntsArray = dataBitMap.GetBitmapBits(True)
img = np.fromstring(signedIntsArray, dtype='uint8')
img.shape = (self.h, self.w, 4)
# free resources
dcObj.DeleteDC()
cDC.DeleteDC()
win32gui.ReleaseDC(self.hwnd, wDC)
win32gui.DeleteObject(dataBitMap.GetHandle())
# drop the alpha channel, or cv.matchTemplate() will throw an error like:
# error: (-215:Assertion failed) (depth == CV_8U || depth == CV_32F) && type == _templ.type()
# && _img.dims() <= 2 in function 'cv::matchTemplate'
img = img[...,:3]
# make image C_CONTIGUOUS to avoid errors that look like:
# File ... in draw_rectangles
# TypeError: an integer is required (got type tuple)
# see the discussion here:
# https://github.com/opencv/opencv/issues/14866#issuecomment-580207109
img = np.ascontiguousarray(img)
return img
# find the name of the window you're interested in.
# once you have it, update window_capture()
# https://stackoverflow.com/questions/55547940/how-to-get-a-list-of-the-name-of-every-open-window
def list_window_names(self):
def winEnumHandler(hwnd, ctx):
if win32gui.IsWindowVisible(hwnd):
print(hex(hwnd), win32gui.GetWindowText(hwnd))
win32gui.EnumWindows(winEnumHandler, None)
# translate a pixel position on a screenshot image to a pixel position on the screen.
# pos = (x, y)
# WARNING: if you move the window being captured after execution is started, this will
# return incorrect coordinates, because the window position is only calculated in
# the __init__ constructor.
def get_screen_position(self, pos):
return (pos[0] + self.offset_x, pos[1] + self.offset_y)
I use above code for windows(found it on stackoverflow just copy/pasted it and edited it a little)
I am attempting to take fast screenshots ready for processing with PIL/Numpy (~0.01s per screenshot) with Python 3.6. Ideally the window would not need to be in the foreground, i.e. even when another window is covering it, the screenshot is still successful.
So far I've modified the code for python 3 from this question: Python Screenshot of inactive window PrintWindow + win32gui
However, all it gets is black images.
import win32gui
import win32ui
from ctypes import windll
from PIL import Image
hwnd = win32gui.FindWindow(None, 'Calculator')
# Get window bounds
left, top, right, bot = win32gui.GetWindowRect(hwnd)
w = right - left
h = bot - top
hwndDC = win32gui.GetWindowDC(hwnd)
mfcDC = win32ui.CreateDCFromHandle(hwndDC)
saveDC = mfcDC.CreateCompatibleDC()
saveBitMap = win32ui.CreateBitmap()
saveBitMap.CreateCompatibleBitmap(mfcDC, w, h)
saveDC.SelectObject(saveBitMap)
result = windll.user32.PrintWindow(hwnd, saveDC.GetSafeHdc(), 1)
print(result)
bmp_info = saveBitMap.GetInfo()
bmp_str = saveBitMap.GetBitmapBits(True)
print(bmp_str)
im = Image.frombuffer(
'RGB',
(bmp_info['bmWidth'], bmp_info['bmHeight']),
bmp_str, 'raw', 'BGRX', 0, 1)
win32gui.DeleteObject(saveBitMap.GetHandle())
saveDC.DeleteDC()
mfcDC.DeleteDC()
win32gui.ReleaseDC(hwnd, hwndDC)
if result == 1:
im.save("screenshot.png")
This code worked for me with applications in background, not minimized.
import win32gui
import win32ui
def background_screenshot(hwnd, width, height):
wDC = win32gui.GetWindowDC(hwnd)
dcObj=win32ui.CreateDCFromHandle(wDC)
cDC=dcObj.CreateCompatibleDC()
dataBitMap = win32ui.CreateBitmap()
dataBitMap.CreateCompatibleBitmap(dcObj, width, height)
cDC.SelectObject(dataBitMap)
cDC.BitBlt((0,0),(width, height) , dcObj, (0,0), win32con.SRCCOPY)
dataBitMap.SaveBitmapFile(cDC, 'screenshot.bmp')
dcObj.DeleteDC()
cDC.DeleteDC()
win32gui.ReleaseDC(hwnd, wDC)
win32gui.DeleteObject(dataBitMap.GetHandle())
hwnd = win32gui.FindWindow(None, windowname)
background_screenshot(hwnd, 1280, 780)