I'm making smartmirror with opencv. I use raspbian but, I have a problem.
Import the necessary packages
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2
def detect(img, cascade):
rects = cascade.detectMultiScale(img, scaleFactor=1.3, minNeighbors=4,
minSize=(30, 30), flags=cv2.CASCADE_SCALE_IMAGE)
if len(rects) == 0:
return []
rects[:,2:] += rects[:,:2]
return rects
def draw_rects(img, rects, color):
for x1, y1, x2, y2 in rects:
cv2.rectangle(img, (x1, y1), (x2, y2), color, 2)
Initialize the camera and grab a reference to the raw camera capture
camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = 32
rawCapture = PiRGBArray(camera, size=(640, 480))
cascade = cv2.CascadeClassifier("opencv-3.3.0/data/haarcascades/haarcascade_frontalface_alt.xml")
Allow the camera to warmup
time.sleep(0.1)
scan = 0
Capture frames from the camera
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
# grab the raw NumPy array representing the image, then initialize the timestamp
# and occupied/unoccupied text
img = frame.array
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist(gray)
rects = detect(gray, cascade)
if len(rects) != 0:
scan = 1
vis = img.copy()
draw_rects(vis, rects, (0, 255, 0))
# show the frame
cv2.imshow("Frame", vis)
key = cv2.waitKey(1) & 0xFF
# clear the stream in preparation for the next frame
rawCapture.truncate(0)
# if the `q` key was pressed, break from the loop
if scan == 1:
break
so,
In a virtual environment, I want to take variable(scan) into my file(.py).
but, I get an error, No module named cv2
how can I use the variable(scan) into my file??? help me, please.
This is my file(.py)
#smartmirror.py
from __future__ import print_function
from aplclient.discovery import build
from httplib2 import Http
from oauth2client import file, clinet, tools
from Tkinter import *
import locale
(used coding)
Related
I am doing a project to visualise hand movement from a wearable device (gloves) to a screen. I am using Mediapipe to get hand landmarks. This is to get synthetic data for my project. Now I am planning to use Blender to visualise and animate the hand movement. My questions are as follows:
Is there any alternatives for this method?
How to I import these points (as a csv file)and map them on to my blender image?
Note: I am a novice in both Python and Blender.
Any and all help would be appreciated . Thanks in advance.
I used Mediapipe to get the coordinates.
my code for Mediapipe:
'''
import cv2
import mediapipe as mp
import time
import uuid
import os
import numpy as np
cap = cv2.VideoCapture(0)
mpHands = mp.solutions.hands
hands = mpHands.Hands(max_num_hands=1)
mpDraw = mp.solutions.drawing_utils
pTime = 0
cTime = 0
while True:
success, img = cap.read()
#img = cv2.resize(img, (680,420))
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
imgRGB.flags.writeable = False
results = hands.process(imgRGB)
imgRGB.flags.writeable = True
imgRGB = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
print(results.multi_hand_landmarks)
if results.multi_hand_landmarks:
for handLms in results.multi_hand_landmarks:
for id, lm in enumerate(handLms.landmark):
#to get width and height of the image
h, w, c = img.shape
#to get center points
cx,cy = int(lm.x*w), int(lm.y*h)
print(id, cx, cy)
#lms= lm.append([id, cx, cy])
if id == 0:
cv2.circle(img, (cx,cy), 15, (255,0,255),cv2.FILLED)
mpDraw.draw_landmarks(img, handLms, mpHands.HAND_CONNECTIONS,
mpDraw.DrawingSpec(color=(201,122,76), thickness=2, circle_radius=2),
mpDraw.DrawingSpec(color=(121,44,250), thickness=4, circle_radius=4),)
cTime = time.time()
fps = 1/(cTime-pTime)
pTime = cTime
cv2.putText(img, str(int(fps)), (10,70), cv2.FONT_HERSHEY_PLAIN, 3, (255 ,125,0),2)
#cv2.imwrite(os.path.join('Output Images', '{}.jpg'.format(uuid.uuid1())),img)
cv2.imshow('Image', img)
if cv2.waitKey(10) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
''''
I am currently working on a project which makes use of a Raspberry Pi and its Camera Module v2.1.
I need to scan some barcodes with the camera of which I am using the OpenCV and pyzbar libraries.
I am running into some trouble with the image that OpenCV is returning, example below:
Image returned from running libcamera-hello:
Image returned from running my script:
As you can see the images are very different, the OpenCV image is more zoomed in.
I've tried resizing the image and even changing the size of the frame but it doesn't seem to help, the image just gets stretched!
Does anyone have any ideas on why this might be happening?
My script for capturing the images is below:
import cv2
from pyzbar import pyzbar
from gpiozero import Button
from readBarcodeData import read_text
button = Button(25)
def read_barcodes(frame):
barcodes = pyzbar.decode(frame)
for barcode in barcodes:
x, y , w, h = barcode.rect
barcode_info = barcode.data.decode('utf-8')
cv2.rectangle(frame, (x, y),(x+w, y+h), (0, 255, 0), 2)
with open("barcode_result.txt", mode ='w') as file:
file.write(str(barcode_info).replace("'", ""))
return frame
def main():
while True:
if button.is_pressed:
camera = cv2.VideoCapture(0)
ret, frame = camera.read()
while ret:
ret, frame = camera.read()
frame = read_barcodes(frame)
cv2.imshow('Barcode Scanner', frame)
if cv2.waitKey(0) & 0xFF == 27:
break
break
camera.release()
cv2.destroyAllWindows()
read_text()
if __name__ == '__main__':
main()
EDIT:
I have also tried capturing an image using the following code:
import cv2
vid = cv2.VideoCapture(0)
while(True):
# Capture the video frame
ret, frame = vid.read()
# Display the resulting frame
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# After the loop release the cap object
vid.release()
# Destroy all the windows
cv2.destroyAllWindows()
But I still get a cropped image.
EDIT 2:
Returned properties from the video capture:
CV_CAP_PROP_FRAME_WIDTH: '640.0'
CV_CAP_PROP_FRAME_HEIGHT : '480.0'
[ WARN:0] global /tmp/pip-wheel-j62hpwu1/opencv-python_19cf39855c924932a2df50dd2b502cd2/opencv/modules/videoio/src/cap_v4l.cpp (1911) getProperty VIDEOIO(V4L2:/dev/video0): Unable to get camera FPS
CAP_PROP_FPS : '-1.0'
CAP_PROP_POS_MSEC : '911170.05'
CAP_PROP_FRAME_COUNT : '-1.0'
CAP_PROP_BRIGHTNESS : '-1.0'
CAP_PROP_CONTRAST : '-1.0'
CAP_PROP_SATURATION : '-1.0'
CAP_PROP_HUE : '-1.0'
CAP_PROP_GAIN : '-1.0'
CAP_PROP_CONVERT_RGB : '1.0'
I managed to "fix" this by using the PiCamera library to capture the image and then run it through cv2:
Code for reading in PiCamera image is below:
import cv2
from time import sleep
from pyzbar import pyzbar
from gpiozero import Button
from picamera.array import PiRGBArray
import picamera
from readBarcodeData import read_text
button = Button(25)
def read_barcodes(frame):
barcodes = pyzbar.decode(frame)
for barcode in barcodes:
x, y , w, h = barcode.rect
barcode_info = barcode.data.decode('utf-8')
cv2.rectangle(frame, (x, y),(x+w, y+h), (0, 255, 0), 2)
with open("barcode_result.txt", mode ='w') as file:
file.write(str(barcode_info).replace("'", ""))
return frame
def main():
while True:
if button.is_pressed:
with picamera.PiCamera() as camera:
rawCapture = PiRGBArray(camera)
#camera.resolution = (3280, 2464)
camera.start_preview()
#sleep(1)
camera.capture(rawCapture, format="bgr")
img = rawCapture.array
#camera = cv2.VideoCapture(0)
#ret, frame = camera.read()
ret = True
while ret:
#ret, frame = img
frame = read_barcodes(img)
#cv2.imshow('Barcode Scanner', frame)
#print(frame.shape)
#if cv2.waitKey(0) & 0xFF == 27:
#break
break
cv2.destroyAllWindows()
read_text()
if __name__ == '__main__':
main()
The image captured by PiCamera seems to return the full image with no cropping so works a treat.
I am working on a "Face Recognition" project.
I have done the recognition part, my project is based on 3 code file
Face Detection (Taking samples Of Person approx 50)
Face Train (Will train the captured images)
Face Recognition (Will be able to recognize trained Images at real
time)
Now what I have to do is if an unknown person which is not in dataset detected more than 3 times in camera, the face recognition program will wait for a wile and the face detection will run which will take samples of the person and train so the next time that person is on the camera he/she will be recognizable. Here is the code For my Face Recognition
import cv2
from picamera.array import PiRGBArray
from picamera import PiCamera
import picamera
import numpy as np
import pickle
import RPi.GPIO as GPIO
from time import sleep
from subprocess import call
import time
import datetime
import boto3
from botocore.client import Config
import serial
# port = serial.Serial('/dev/ttyUSB0',9600)
now = datetime.datetime.now()
currentDate = str(now.month) + "_" + str(now.day) + "_" + str(now.year)+ "_" + str(now.hour)
cloudpath ='Videos/cctvfootage'+currentDate+'.mp4'
with open('labels', 'rb') as f:
dict= pickle.load(f)
f.close()
#setup the camera
camera =PiCamera()
camera.resolution = (600,500)
camera.framerate = 30
rawCapture = PiRGBArray(camera, size=(600, 500))
# Load prebuilt model for Frontal Face detection
faceCascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
# Create Local Binary Patterns Histograms for face recognization
recognizer = cv2.face.createLBPHFaceRecognizer()
# Load the trained mode
recognizer.load("trainer.yml")
font = cv2.FONT_HERSHEY_SIMPLEX
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'X264')
out = cv2.VideoWriter(cloudpath, fourcc, 2.0, (camera.resolution))
lastRes=''
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
frame = frame.array
cv2.rectangle(frame, (0, 0), (455, 30), (0,0,0), thickness=-1)
cv2.putText(frame, time.asctime(), (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 0), lineType=cv2.LINE_AA, thickness=2)
data = open(cloudpath, 'rb')
# Convert the captured frame into grayscale
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, scaleFactor = 1.5, minNeighbors = 5)
# For each face in faces
for (x, y, w, h) in faces:
# Create rectangle around the face
roiGray = gray[y:y+h, x:x+w]
# Recognize the face belongs to which ID
id_, conf = recognizer.predict(roiGray)
for name, value in dict.items():
if value == id_:
break
print(name)
print(conf)
#agar confidence <=70 hoga toh door open hoga wrna nhi
# Put text describe who is in the picture
if conf <= 70:
cv2.rectangle(frame,(x-20,y-20), (x+w+20,y+h+20), (0,255,0), 4)
cv2.putText(frame,name,(x,y-40), font, 1, (255,255,255), 3)
else:
cv2.rectangle(frame,(x-20,y-20), (x+w+20,y+h+20), (0,255,0), 4)
cv2.putText(frame,"Unknown", (x,y-40), font, 1, (255,255,255),3)
cv2.imshow('frame', frame)
out.write(frame)
key = cv2.waitKey(1)
rawCapture.truncate(0)
#if cross button is pressed close the cam
if key == 27:
print("Video Saved Successfully..")
break
cv2.destroyAllWindows()
Did you try deepface? Its stream function accesses your web cam and applies real time face recognition, and facial attribute analysis (age, gender and emotion prediction) as well. You can switch web cam streaming content to video as well.
#!pip install deepface
from deepface import DeepFace
DeepFace.stream("my_db")
Here, my_db is a folder stores my facial database.
I wrote a program that will play a warning sound when a face is detected (play audio file)
However, the sound file will always start playing after 10 ~ 30 seconds when the detection condition is triggered.
And if it is removed from the conditional sentence and played as background music,there is no problem.
May I ask how to solve it?
Thanks!
THE code I use to play sound:
winsound.PlaySound('1.wav', winsound.SND_FILENAME|winsound.SND_ASYNC)
The full code:
# -*- coding: utf-8 -*-
import dlib
import cv2
import imutils
import winsound
import os
import multiprocessing
import winsound
import time
from winsound import SND_ASYNC
import pygame
from pygame import mixer
#chose camera
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
#change size
cap.set(cv2. CAP_PROP_FRAME_WIDTH, 640)
cap.set(cv2. CAP_PROP_FRAME_HEIGHT, 360)
#Get the default face detector
detector = dlib.get_frontal_face_detector()
#Load 68 feature point models according to the shape_predictor method, this method is a detector for facial expression recognition
predictor = dlib.shape_predictor( 'shape_predictor_68_face_landmarks.dat')
#When the camera is turned on, each frame is detected
#pygame.mixer.init()
#mixer.music.load('incoming.mp3')
#mixer.music.play(-1)
#winsound.PlaySound('incoming.wav', winsound.SND_FILENAME| winsound.SND_ASYNC )
while(cap.isOpened()):
#Read frame information
ret, frame = cap.read()
#Detect faces
face_rects, scores, idx = detector.run(frame, 0)
#Retrieve the detection result
for i, d in enumerate(face_rects):
x1 = d.left()
y1 = d.top()
x2 = d.right()
y2 = d.bottom()
text = " %2.2f ( %d )" % (scores[i], idx[i])
#Draw a rectangular area for detecting faces
cv2.rectangle(frame, (x1, y1), (x2, y2), ( 0, 255, 0), 4, cv2. LINE_AA)
#Mark the face detection score and face direction sub-detector number
cv2.putText(frame, text, (x1, y1), cv2. FONT_HERSHEY_DUPLEX,
0.7, ( 255, 255, 255), 1, cv2. LINE_AA)
#play sound
if scores[i]>0.3 and idx[i]==0 :
print(text)
#pygame.mixer.pre_init(48000, 16, 2, 4096)
winsound.PlaySound('incoming.wav', winsound.SND_FILENAME| winsound.SND_ASYNC )
#Output to screen
cv2.imshow( "Face Detection", frame)
#If you press the ESC key, you exit
if cv2.waitKey( 10) == 27:
break
#Free memory
cap.release()
#Close all windows
cv2.destroyAllWindows()
and the gprof2dothere
I want the Raspberry pi B+ to detect circles in images. I've been looking for some code, and I tried to use them with the raspberry pi. Here is my conclusion code, but the mainly problem is that it doesn't get the image with the detected objects (I am using as an example a tennis ball), it just get the image without the draw circle and rectangle.
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2
import os
import numpy as np
os.system('sudo modprobe bcm2835-v4l2')
h=200
w=300
camera = PiCamera()
camera.resolution = (w, h)
camera.framerate = 24
rawCapture = PiRGBArray(camera, size=(w, h))
time.sleep(0.1)
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
image_RGB = frame.array
copy_RGB = image_RGB.copy()
grey = cv2.cvtColor(image_RGB, cv2.COLOR_BGR2GRAY)
img_circles = None
img_circles = cv2.HoughCircles(grey, cv2.cv.CV_HOUGH_GRADIENT, 1.2, 100)
if img_circles is not None:
img_circles = np.round(img_circles[0, :]).astype("int")
for (x, y, r) in img_circles:
cv2.circle(copy_RGB, (x, y), r, (0, 255, 0), 4)
cv2.rectangle(copy_RGB, (x - 5, y - 5),(x + 5, y + 5), (0, 128, 255, -1))
cv2.imshow("Copy with Detected Object", copy_RGB)
key = cv2.waitKey(1) & 0xFF
rawCapture.truncate(0)
if key == ord("q"):
break
Any Help is appreciated.