Python bot script with CV2 library - python

please can you help me troubleshoot this code, just started to learn and can't find the solution. Thanks
I did many times some changes but still having this problem
This script is for fishing in a game with a recognize item on a picture and then run some functions
import time
import cv2
import mss
import numpy
import numpy as np
import pyautogui
from mss.windows import MSS as mss
template = cv2.imread("/Users/vk/Desktop/bot_screen/1.png", cv2.IMREAD_GRAYSCALE)
w, h, = template.shape[::-1]
color_yellow = (0, 255, 255)
mon = {'top': 80, 'left': 350, 'width': 100, 'height': 100}
def process_image(original_image):
processed_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY)
processed_image = cv2.Canny(processed_image, threshold1=200, threshold2=300)
return processed_image
def ss():
op = 1
with mss.mss() as sct:
monitor = {"top": 40, "left": 0, "width": 800, "height": 640}
while "Screen capturing":
last_time = time.time()
img = numpy.array(sct.grab(monitor))
gray_frame = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
res = cv2.matchTemplate(gray_frame, template, cv2.TM_CCOEFF_NORMED)
loc = np.where(res >= 0.8)
op += 1
print(op)
for pt in zip(*loc[::-1]):
cv2.rectangle(img, pt, (pt[0] + w, pt[1] + h), (0, 255, 0), 3)
for p in img:
pts = (pt[0],pt[1])
x = (pt[0])
y = (pt[1])
print(x)
if 100 < x < 490:
pyautogui.mouseDown(button='left')
time.sleep(2)
pyautogui.mouseUp(button='left')
x = 0
break
else:
continue
break
else:
continue
break
key = cv2.waitKey(1)
if cv2.waitKey(25) & 0xFF == ord("q"):
cv2.destroyAllWindows()
if op > 35:
return
def screen_record():
sct = mss.mss()
last_time = time.time()
while(True):
img = sct.grab(mon)
print('loop took {} seconds'.format(time.time() - last_time))
last_time = time.time()
img = np.array(img)
processed_image = process_image(img)
mean = np.mean(processed_image)
print('mean = ', mean)
if mean <= float(0.11):
print('SSSSSSSS ')
pyautogui.click(button='left')
break
return
else:
time.sleep(0.01)
continue
return
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
while "1":
time.sleep(1)
pyautogui.moveTo(431,175,duration=1)
pyautogui.mouseDown(button='left')
pyautogui.moveTo(450,200,duration=1)
pyautogui.mouseUp(button='left')
time.sleep(2)
screen_record()
time.sleep(0.01)
ss()
enter image description here
Error in the picture

You're setting it to mss.mss(), which doesn't exist. When I used the same import as you, just doing sct = mss() worked fine.

Related

PyDirectInput mouse movements are erratic and inaccurate

I am trying to write a program that plays War thunder game. It detects objects from screen, but it does not aim properly. When an object detected, it moves erratically and inaccurately.
`
def aim_at_object(target_x, target_y):
current_x, current_y = pydirectinput.position()
dx = target_x - current_x
dy = target_y - current_y
pydirectinput.move(int(dx), int(dy))
#set_cursor_pos_func = ctypes.windll.user32.SetCursorPos(ctypes.c_int(target_x),ctypes.c_int(target_y))
def detect_objects():
with mss.mss() as sct:
monitor = {'top': 50, 'left': 50, 'width': 1280, 'height': 720}
while True:
t = time.time()
img = np.array(sct.grab(monitor))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
results = model(img)
results.render()
out = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
labels, cord_thres = results.xyxyn[0][:, -1].cpu().numpy(), results.xyxyn[0][:, :-1].cpu().numpy()
try:
x = (cord_thres[0, 0] + cord_thres[0, 2]) / 2
y = (cord_thres[0, 1] + cord_thres[0, 3]) / 2
target_x, target_y = x * 1280, y * 720
aim_thread = threading.Thread(target=aim_at_object, args=(target_x, target_y))
aim_thread.start()
cv2.circle(out, (int(target_x), int(target_y)), 10, (0, 255, 0), -1)
cv2.imshow('s', out)
print("Detection and aiming successful")
except IndexError:
print("No object detected")
cv2.imshow('s', out)
print('fps: {}'.format(1 / (time.time() - t)))
if cv2.waitKey(1) == 27:
break
cv2.destroyAllWindows()
if __name__ == '__main__':
detect_thread = threading.Thread(target=detect_objects)
detect_thread.start()

Different results when calculating with floats instead of integers

Why does using float instead of int give me different results when all of my inputs are integers?
from deepface import DeepFace
import cv2
import matplotlib.pyplot as plt
import webbrowser
from turtle import *
import turtle
faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(0)
pause = 0
def happy():
print("Vani, Gỗ đàn hương, Hoa nhài và Hương thảo")
webbrowser.open("https://www.youtube.com/watch?v=LjhCEhWiKXk&list=PL1VuYyZcPYIJTP3W_x0jq9olXviPQlOe1")
tur = turtle.Screen()
tur.bgcolor("pink")
tur.title("Light")
turtle.done()
def sad():
print("Sả chanh, Cam Bergamot, Húng quế và Nhục đậu khấu")
webbrowser.open("https://www.youtube.com/watch?v=LanCLS_hIo4&list=PLWOvS8Nliu2x6LvCNnRydpT-owR8zxXB7")
tur = turtle.Screen()
tur.bgcolor("yellow")
tur.title("Light")
turtle.done()
def neutral():
print("Gừng, Bưởi và Chanh")
webbrowser.open("https://www.youtube.com/watch?v=ru0K8uYEZWw&list=PLVgakZ6MigxxNhXZae5cALEW588-sMQn6")
tur = turtle.Screen()
tur.bgcolor("orange")
tur.title("Light")
turtle.done()
def angry():
print(" Oải hương, Hoa hồng, Cúc la mã và Ylang-Ylang")
webbrowser.open("https://www.youtube.com/watch?v=qq-RGFyaq0U&list=PLefKpFQ8Pvy5aCLAGHD8Zmzsdljos-t2l")
tur = turtle.Screen()
tur.bgcolor("green")
tur.title("Light")
turtle.done()
while True:
ret, frame = cap.read()
try:
result = DeepFace.analyze(frame, actions=['emotion'])
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, 1.1, 4)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, result['dominant_emotion'], (50, 50), font, 3, (0, 0, 255), 2, cv2.LINE_4)
if result['dominant_emotion'] == "happy" and pause == 0:
happy()
if result['dominant_emotion'] == ["sad","tear"] and pause == 0:
sad()
if result['dominant_emotion'] == "angry" and pause == 0:
angry()
if result['dominant_emotion'] == "neutral" and pause == 0:
neutral()
except:
pass
cv2.imshow('Emotional Expression', frame)
if cv2.waitKey(2) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()

Why does program freeze when using time.sleep()

So I am working on a OpenCV project which would detect if a certain hand sign has taken place. That part works.
I also wanted to make it so that it waits for a few seconds so it does not count accidental movements. But when I use time.sleep() or start time end time methods it either freezes my program and always executes the program and stops following the if conditions, or worse it simply doesn't execute the if statement ever.
Here is the code snippet I used. Btw I have already tried time.sleep() It would just freeze the program and always play the clip regardless of whether it followed my if statement or not.
Is there any way to resolve this issue?
import cv2
import time
import os
import HandTrackingModule as htm
from playsound import playsound
wCam, hCam = 1920, 1080
cap = cv2.VideoCapture(2)
cap.set(3, wCam)
cap.set(4, hCam)
frame = cap.read()
pTime = 0
detector = htm.handDetector(detectionCon=1)
Player1 = []
Player2 = []
while 1 > 0:
success, img = cap.read()
img = detector.findHands(img)
lmlist = detector.findPosition(img, handNo=0, draw=False)
if len(lmlist) != 0:
Player1 = []
Player2 = []
if (lmlist[4][1] < lmlist[3][1] and lmlist[8][2] < lmlist[6][2] and lmlist[20][2] < lmlist[18][2] and lmlist[12][2] > lmlist[10][2] and lmlist[16][2] > lmlist[14][2]) == False:
StartTime = time.time()
if lmlist[4][1] < lmlist[3][1] and lmlist[8][2] < lmlist[6][2] and lmlist[20][2] < lmlist[18][2] and lmlist[12][2] > lmlist[10][2] and lmlist[16][2] > lmlist[14][2]:
EndTime = time.time()
Eyetime = EndTime - StartTime
if Eyetime > 5:
Player1.append("hello")
playsound(r'C:\Users\haris\Documents\GitHub\Haz3-jolt\Pong_with_opencv\venv\notw.mp3')
cTime = time.time()
fps = 1 / (cTime-pTime)
pTime = cTime
cv2.putText(img, f'FPS: {int(fps)}',(400,70), cv2.FONT_HERSHEY_COMPLEX, 3, (255, 0, 0), 3)
cv2.imshow("Image", img)
cv2.waitKey(1)
I also have a bonus script Import called handtrackingmodule.
import cv2
import mediapipe as mp
import time
class handDetector():
def __init__(self, mode=False, maxHands=2, detectionCon=0.5, trackCon=0.5):
self.mode = mode
self.maxHands = maxHands
self.detectionCon = detectionCon
self.trackCon = trackCon
self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands(self.mode, self.maxHands, self.detectionCon, self.trackCon)
self.mpDraw = mp.solutions.drawing_utils
def findHands(self, img, draw=True):
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.hands.process(imgRGB)
#print(results.multi_hand_landmarks)
if self.results.multi_hand_landmarks:
for handLms in self.results.multi_hand_landmarks:
if draw:
self.mpDraw.draw_landmarks(img, handLms,
self.mpHands.HAND_CONNECTIONS)
return img
def findPosition(self, img, handNo=0, draw=True):
lmList = []
if self.results.multi_hand_landmarks:
myHand = self.results.multi_hand_landmarks[handNo]
for id, lm in enumerate(myHand.landmark):
# print(id, lm)
h, w, c = img.shape
cx, cy = int(lm.x * w), int(lm.y * h)
# print(id, cx, cy)
lmList.append([id, cx, cy])
if draw:
cv2.circle(img, (cx, cy), 15, (255, 0, 255), cv2.FILLED)
return lmList
def Marks(self,frame):
myHands=[]
frameRGB=cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
results=self.hands.process(frameRGB)
if results.multi_hand_landmarks != None:
for handLandMarks in results.multi_hand_landmarks:
myHand=[]
for landMark in handLandMarks.landmark:
myHand.append((int(landMark.x*width), int(landMark.y*height)))
myHands.append(myHand)
return myHands
width=1920
height=1080
def main():
pTime = 0
cTime = 0
cap = cv2.VideoCapture(2)
detector = handDetector()
while True:
success, img = cap.read()
img = detector.findHands(img)
lmList = detector.findPosition(img)
if len(lmList) != 0:
print(lmList[4])
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
cv2.putText(img, str(int(fps)), (10, 70), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 255), 3)
cv2.imshow("Image", img)
cv2.waitKey(1)
if __name__ == "__main__":
main()
GUI (imshow) only works as long as waitKey/pollKey run continuously or frequently.
When you sleep(), you choke the GUI (imshow window). It can't update, or handle any events.
If you need to "sleep", give waitKey a suitable integer argument in milliseconds.
Be aware that waitKey can return before the time is up, e.g. if all windows were closed or if a key was pressed.

OpenCV object detecting and save videofile

I want to object detect and save the video, but the video saved only 6kb or 0kb and it can't be play
If there is no this line
x, y, width, height, area = stats[index]
it will be saved
Do you know why And is there a solution?
import cv2
import time
import numpy as np
cap = cv2.VideoCapture("rtsp://admin:admin#128.1.1.110:554")
width = int(cap.get(3))
height = int(cap.get(4))
fcc = cv2.VideoWriter_fourcc(*'XVID')
recording = False
fgbg = cv2.createBackgroundSubtractorMOG2(varThreshold=200, detectShadows=0)
while(1):
ret, frame = cap.read()
hms = time.strftime('%H_%M_%S', time.localtime())
fgmask = fgbg.apply(frame)
nlabels, labels, stats, centroids = cv2.connectedComponentsWithStats(fgmask)
for index, centroid in enumerate(centroids):
if stats[index][0] == 0 and stats[index][1] == 0:
continue
if np.any(np.isnan(centroid)):
continue
x, y, width, height, area = stats[index]
centerX, centerY = int(centroid[0]), int(centroid[1])
if area > 200:
cv2.circle(frame, (centerX, centerY), 1, (0, 255, 0), 2)
cv2.rectangle(frame, (x, y), (x + width, y + height), (0, 0, 255))
cv2.putText(frame, str(area), (centerX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255))
cv2.imshow('frame', frame)
k = cv2.waitKey(1) & 0xff
if k == ord('r') and recording is False:
path = 'test_' + str(hms) + '.avi'
print('recording start')
writer = cv2.VideoWriter(path, fcc, 30.0, (width, height))
recording = True
if recording:
writer.write(frame)
if k == ord('e'):
print('recording end')
recording = False
writer.release()
cap.release()
cv2.destroyAllWindows()
I think this will solve your problem
# importing the module
import cv2
import numpy as np
# reading the vedio
source = cv2.VideoCapture(0) // add your URL insed of "0"
# We need to set resolutions.
# so, convert them from float to integer.
frame_width = int(source.get(3))
frame_height = int(source.get(4))
recording = False
fcc = cv2.VideoWriter_fourcc(*'XVID')
size = (frame_width, frame_height)
fgbg = cv2.createBackgroundSubtractorMOG2(varThreshold=200, detectShadows=0)
result = cv2.VideoWriter('output.avi', fcc, 30, size)
# running the loop
while True:
# extracting the frames
ret, frame = source.read()
fgmask = fgbg.apply(frame)
nlabels, labels, stats, centroids = cv2.connectedComponentsWithStats(fgmask)
for index, centroid in enumerate(centroids):
if stats[index][0] == 0 and stats[index][1] == 0:
continue
if np.any(np.isnan(centroid)):
continue
x, y, width, height, area = stats[index]
centerX, centerY = int(centroid[0]), int(centroid[1])
if area > 200:
cv2.circle(frame, (centerX, centerY), 1, (0, 255, 0), 2)
cv2.rectangle(frame, (x, y), (x + width, y + height), (0, 0, 255))
cv2.putText(frame, str(area), (centerX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255))
# displaying the video
cv2.imshow("Live", frame)
k = cv2.waitKey(1) & 0xff
if k == ord('r') and recording is False:
print('recording start')
recording = True
if recording:
result.write(frame)
if k == ord('e'):
print('recording end')
recording = False
result.release()
# closing the window
cv2.destroyAllWindows()
source.release()
But unfortunately, I can not hms with the output file name.
That can try your self
If helpful this for you give 👍
Actually, you need to delete some codes.
cv2.imshow('MultiTracker', frame)
# quit on ESC button
if cv2.waitKey(1) & 0xFF == 27: # Esc pressed
break
# k = cv2.waitKey(1) & 0xff
#if k == ord('r') and recording is False:
# print('recording start')
# recording = True
#if recording:
result.write(frame)
#if k == ord('e'):
# print('recording end')
# recording = False
# result.release()
result.release()
cv2.destroyAllWindows()
cap.release()
it works for me, the reason why it is 6kb is you start write but not append frame to output avi file.

how to cancel the target object of MultiTracker in opencv-python

I'm using python-opencv to complete my undergraduate graduation project and I need to use MultiTracker to implement multi-target detection and tracking functions. However, I cannot cancel the target object after it disappeared from the screen. I'm not a student major in digital image processing, the problem bothers me a lot. Can anyone help me? The code is as follows:
import sys
import cv2
from random import randint
trackerTypes = ['BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE', 'CSRT']
trackerType = trackerTypes[6] # KCF, MOSSE的效果还能接受,KCF效果最好,MOSSE速度最快
def adjust_frame(frame):
rows, cols, ch = frame.shape
M = cv2.getRotationMatrix2D((cols, rows), 1, 1) # 三个参数分别是旋转中心,旋转角度,比例
frame = cv2.warpAffine(frame, M, (cols, rows))
frame = frame[580:670, 470:1030]
frame = cv2.resize(frame, None, fx=1.5, fy=1.5, interpolation=cv2.INTER_CUBIC)
return frame
def createTrackerByName(trackerType):
# 通过跟踪器的名字创建跟踪器
if trackerType == trackerTypes[0]:
tracker = cv2.TrackerBoosting_create()
elif trackerType == trackerTypes[1]:
tracker = cv2.TrackerMIL_create()
elif trackerType == trackerTypes[2]:
tracker = cv2.TrackerKCF_create()
elif trackerType == trackerTypes[3]:
tracker = cv2.TrackerTLD_create()
elif trackerType == trackerTypes[4]:
tracker = cv2.TrackerMedianFlow_create()
elif trackerType == trackerTypes[5]:
tracker = cv2.TrackerGOTURN_create()
elif trackerType == trackerTypes[6]:
tracker = cv2.TrackerMOSSE_create()
elif trackerType == trackerTypes[7]:
tracker = cv2.TrackerCSRT_create()
else:
tracker = None
print('Incorrect tracker name')
print('Available tracker name')
for t in trackerTypes:
print(t)
return tracker
print('Default tracking algorithm is CSRT \n'
'Available tracking algorithms are:\n')
for t in trackerTypes:
print(t, end=' ')
videoPath = r'E:\python files\vehicle identification\4.MOV' # 设置加载的视频文件路径
cap = cv2.VideoCapture(videoPath) # 创建video capture 来读取视频文件
# 读取第一帧
ret, frame = cap.read()
frame = adjust_frame(frame)
# 如果无法读取视频文件就退出
if not ret:
print('Failed to read video')
sys.exit(1)
# 选择框
bboxes = []
colors = []
# OpenCV的selectROI函数不适用于在Python中选择多个对象
# 所以循环调用此函数,直到完成选择所有对象
while True:
# 在对象上绘制边界框selectROI的默认行为是从fromCenter设置为false时从中心开始绘制框,可以从左上角开始绘制框
bbox = cv2.selectROI('MultiTracker', frame) # 返回的四个值x, y, w, h
bboxes.append(bbox)
colors.append((randint(64, 255), randint(64, 255), randint(64, 255)))
print("Press q to quit selecting boxes and start tracking")
print("Press any other key to select next object")
k = cv2.waitKey(0)
if k == 113: # q is pressed
break
print('Selected bounding boxes {}'.format(bboxes))
# 初始化MultiTracker
# 有两种方法可以初始化multitracker
# 1. tracker = cv2.MultiTracker(“CSRT”)
# 所有跟踪器都添加到这个多路程序中
# 将使用CSRT算法作为默认值
# 2. tracker = cv2.MultiTracker()
# 未指定默认算法
# 使用跟踪算法初始化MultiTracker
# 指定跟踪器类型
# 创建多跟踪器对象
multiTracker = cv2.MultiTracker_create()
# 初始化多跟踪器
for bbox in bboxes:
multiTracker.add(createTrackerByName(trackerType), frame, bbox)
# 处理视频并跟踪对象
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
frame = adjust_frame(frame)
timer = cv2.getTickCount() # 计时点1
# 获取后续帧中对象的更新位置
ret, boxes = multiTracker.update(frame)
# 绘制跟踪的对象
for i, newbox in enumerate(boxes):
p1 = (int(newbox[0]), int(newbox[1])) # x, y坐标
p2 = (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))
cv2.rectangle(frame, p1, p2, colors[i], 2, 1)
fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer) # 计时点2
cv2.putText(frame, "FPS : " + str(int(fps)), (10, 13), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (50, 170, 50), 2)
cv2.putText(frame, trackerType + " Tracker", (10, 28), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (50, 170, 50), 2)
cv2.imshow('MultiTracker', frame)
k = cv2.waitKey(1)
if k == 27:
break
elif k == ord('p'): # 按下p键可以新添加目标
bbox = cv2.selectROI('MultiTracker', frame) # 返回的四个值x, y, w, h
bboxes.append(bbox)
colors.append((randint(64, 255), randint(64, 255), randint(64, 255)))
multiTracker.add(createTrackerByName(trackerType), frame, bbox)

Categories