I wrote a program that will play a warning sound when a face is detected (play audio file)
However, the sound file will always start playing after 10 ~ 30 seconds when the detection condition is triggered.
And if it is removed from the conditional sentence and played as background music,there is no problem.
May I ask how to solve it?
Thanks!
THE code I use to play sound:
winsound.PlaySound('1.wav', winsound.SND_FILENAME|winsound.SND_ASYNC)
The full code:
# -*- coding: utf-8 -*-
import dlib
import cv2
import imutils
import winsound
import os
import multiprocessing
import winsound
import time
from winsound import SND_ASYNC
import pygame
from pygame import mixer
#chose camera
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
#change size
cap.set(cv2. CAP_PROP_FRAME_WIDTH, 640)
cap.set(cv2. CAP_PROP_FRAME_HEIGHT, 360)
#Get the default face detector
detector = dlib.get_frontal_face_detector()
#Load 68 feature point models according to the shape_predictor method, this method is a detector for facial expression recognition
predictor = dlib.shape_predictor( 'shape_predictor_68_face_landmarks.dat')
#When the camera is turned on, each frame is detected
#pygame.mixer.init()
#mixer.music.load('incoming.mp3')
#mixer.music.play(-1)
#winsound.PlaySound('incoming.wav', winsound.SND_FILENAME| winsound.SND_ASYNC )
while(cap.isOpened()):
#Read frame information
ret, frame = cap.read()
#Detect faces
face_rects, scores, idx = detector.run(frame, 0)
#Retrieve the detection result
for i, d in enumerate(face_rects):
x1 = d.left()
y1 = d.top()
x2 = d.right()
y2 = d.bottom()
text = " %2.2f ( %d )" % (scores[i], idx[i])
#Draw a rectangular area for detecting faces
cv2.rectangle(frame, (x1, y1), (x2, y2), ( 0, 255, 0), 4, cv2. LINE_AA)
#Mark the face detection score and face direction sub-detector number
cv2.putText(frame, text, (x1, y1), cv2. FONT_HERSHEY_DUPLEX,
0.7, ( 255, 255, 255), 1, cv2. LINE_AA)
#play sound
if scores[i]>0.3 and idx[i]==0 :
print(text)
#pygame.mixer.pre_init(48000, 16, 2, 4096)
winsound.PlaySound('incoming.wav', winsound.SND_FILENAME| winsound.SND_ASYNC )
#Output to screen
cv2.imshow( "Face Detection", frame)
#If you press the ESC key, you exit
if cv2.waitKey( 10) == 27:
break
#Free memory
cap.release()
#Close all windows
cv2.destroyAllWindows()
and the gprof2dothere
Related
I am working on a "Face Recognition" project.
I have done the recognition part, my project is based on 3 code file
Face Detection (Taking samples Of Person approx 50)
Face Train (Will train the captured images)
Face Recognition (Will be able to recognize trained Images at real
time)
Now what I have to do is if an unknown person which is not in dataset detected more than 3 times in camera, the face recognition program will wait for a wile and the face detection will run which will take samples of the person and train so the next time that person is on the camera he/she will be recognizable. Here is the code For my Face Recognition
import cv2
from picamera.array import PiRGBArray
from picamera import PiCamera
import picamera
import numpy as np
import pickle
import RPi.GPIO as GPIO
from time import sleep
from subprocess import call
import time
import datetime
import boto3
from botocore.client import Config
import serial
# port = serial.Serial('/dev/ttyUSB0',9600)
now = datetime.datetime.now()
currentDate = str(now.month) + "_" + str(now.day) + "_" + str(now.year)+ "_" + str(now.hour)
cloudpath ='Videos/cctvfootage'+currentDate+'.mp4'
with open('labels', 'rb') as f:
dict= pickle.load(f)
f.close()
#setup the camera
camera =PiCamera()
camera.resolution = (600,500)
camera.framerate = 30
rawCapture = PiRGBArray(camera, size=(600, 500))
# Load prebuilt model for Frontal Face detection
faceCascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
# Create Local Binary Patterns Histograms for face recognization
recognizer = cv2.face.createLBPHFaceRecognizer()
# Load the trained mode
recognizer.load("trainer.yml")
font = cv2.FONT_HERSHEY_SIMPLEX
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'X264')
out = cv2.VideoWriter(cloudpath, fourcc, 2.0, (camera.resolution))
lastRes=''
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
frame = frame.array
cv2.rectangle(frame, (0, 0), (455, 30), (0,0,0), thickness=-1)
cv2.putText(frame, time.asctime(), (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 0), lineType=cv2.LINE_AA, thickness=2)
data = open(cloudpath, 'rb')
# Convert the captured frame into grayscale
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, scaleFactor = 1.5, minNeighbors = 5)
# For each face in faces
for (x, y, w, h) in faces:
# Create rectangle around the face
roiGray = gray[y:y+h, x:x+w]
# Recognize the face belongs to which ID
id_, conf = recognizer.predict(roiGray)
for name, value in dict.items():
if value == id_:
break
print(name)
print(conf)
#agar confidence <=70 hoga toh door open hoga wrna nhi
# Put text describe who is in the picture
if conf <= 70:
cv2.rectangle(frame,(x-20,y-20), (x+w+20,y+h+20), (0,255,0), 4)
cv2.putText(frame,name,(x,y-40), font, 1, (255,255,255), 3)
else:
cv2.rectangle(frame,(x-20,y-20), (x+w+20,y+h+20), (0,255,0), 4)
cv2.putText(frame,"Unknown", (x,y-40), font, 1, (255,255,255),3)
cv2.imshow('frame', frame)
out.write(frame)
key = cv2.waitKey(1)
rawCapture.truncate(0)
#if cross button is pressed close the cam
if key == 27:
print("Video Saved Successfully..")
break
cv2.destroyAllWindows()
Did you try deepface? Its stream function accesses your web cam and applies real time face recognition, and facial attribute analysis (age, gender and emotion prediction) as well. You can switch web cam streaming content to video as well.
#!pip install deepface
from deepface import DeepFace
DeepFace.stream("my_db")
Here, my_db is a folder stores my facial database.
I found a code that can scan a barcode using the Raspberry Pi camera V2.1.
It works as expected and can detect a barcode when I present it to the camera. But if I move the camera around a little, there is a lag in the video. I tried increasing the camera. framerate but that doesn't do anything. Neither does change the resolution. Even if I remove the dec() function, the video still looks laggy.
How can I improve the camera framerate so it does not lag?
Also the code opens up a window where I can see the video. For now it is useful for debugging, but I was wondering how I could stop the Pi from opening the video window later on?
from ftplib import FTP
from pyzbar.pyzbar import decode
import os, sys, cv2
import numpy as np
from picamera.array import PiRGBArray
from picamera import PiCamera
import imutils, time
detectedBarcode = False
def dec(frame):
global detectedBarcode
x=decode(frame)
for i in x:
detectedBarcode = True
(x, y, w, h) = i.rect
cv2.rectangle(frame,(x, y),(x + w, y + h),(0, 0, 255),2)
barcodeData = i.data.decode("utf-8")
barcodeType = i.type
print(barcodeData, type(barcodeData))
#sys.exit()
return(barcodeData,barcodeType,1)
return('','',0)
def cameraReader():
fourcc = cv2.VideoWriter_fourcc(*'X264')
camera=PiCamera()
camera.resolution=(1296,730)
camera.framerate = 30
rawCapture=PiRGBArray(camera)
cv2.namedWindow("QR Scanner",cv2.WINDOW_NORMAL)
global detectedBarcode
avg = None
for frame in camera.capture_continuous(rawCapture,format="bgr",use_video_port=False):
image=frame.array
cv2.line(image, (650, 0), (650, 1000), (0, 255,0), 2)
x,y,p=dec(image)
cv2.imshow("QR Scanner",image)
if cv2.waitKey(2) & 0xFF == ord('q'):
break
rawCapture.truncate(0)
#cap.release()
cv2.destroyAllWindows()
cameraReader()
Hello so I have this program where it can capture and detect a persons face but I want to be able to turn off the capturing through a button in a web page, I already have an on button that just calls the python script but I've no luck in being able to create an off button.
I am using a Raspberry Pi 3, with Pyramid Cookiecutter for the web pages
this is my views.py
from pyramid.view import view_config
import RPi.GPIO as GPIO
import os
from .models import *
from bson import ObjectId
from pyramid.httpexceptions import HTTPFound
import sys
from mongoengine import *
GPIO.setmode(GPIO.BOARD)
GPIO.setup(10, GPIO.OUT)
#view_config(route_name='home', renderer='templates/mytemplate.jinja2')
def my_view(request):
if 'switch' in request.params:
raise SystemExit
if 'blink' in request.params:
os.system("python3 /home/pi/Desktop/pi-drowsiness-detection/pi_detect_drowsiness.py -c haarcascade_frontalface_default.xml -p shape_predictor_68_face_landmarks.dat")
if 'register-now' in request.params:
print("REGISTER")
firstname = request.params['fname']
lastname = request.params['lname']
username = request.params['username']
password = request.params['password']
if AppUsers.objects(username=username).first():
return{"error": "USERNAME ALREADY EXISTS"}
x = AppUsers(firstname=firstname,lastname=lastname,username=username,password=password)
x.save()
return {'project': 'web-app-namin'}
def app_users(request):
finame=str(request.POST.get('firstname'))
laname=str(request.POST.get('lastname'))
uname=str(request.POST.get('username'))
if AppUsers.objects(username=uname).first():
return{"error": "USERNAME ALREADY EXISTS"}
x=AppUsers(firstname=finame,lastname=laname,username=uname)
x.save()
return{"response": "DATA ADDED"}
models.py
from mongoengine import *
from datetime import datetime
import hashlib
#connect to a mongodb database
connect('database_namin')
'''hashes a string using md5 hashing algorithm
returns a 32 character-length hashed string'''
def hash_mo_to(raw_string):
hasher=hashlib.md5()
hasher.update(raw_string.encode('ascii'))
return str(hasher.hexdigest())
#default admin credentials for the system
class Admin(DynamicDocument):
username=StringField(default='admin')
password=StringField(default=hash_mo_to('admin'))
#schema for mobile application users set by the admin...
class AppUsers(DynamicDocument):
firstname=StringField()
lastname=StringField()
username=StringField()
password=StringField(default=hash_mo_to("1234"))
pi_detect_drowsiness.py
# import the necessary packages
import RPi.GPIO as GPIO
from imutils.video import VideoStream
from imutils import face_utils
import numpy as np
import argparse
import imutils
import time
import dlib
import cv2
from picamera.array import PiRGBArray
from picamera import PiCamera
from PIL import Image
led = 10
led2 = 13
GPIO.setmode(GPIO.BOARD)
GPIO.setup(led, GPIO.OUT)
GPIO.setup(led2, GPIO.OUT)
GPIO.output(led, False)
def euclidean_dist(ptA, ptB):
# compute and return the euclidean distance between the two
# points
return np.linalg.norm(ptA - ptB)
def eye_aspect_ratio(eye):
# compute the euclidean distances between the two sets of
# vertical eye landmarks (x, y)-coordinates
A = euclidean_dist(eye[1], eye[5])
B = euclidean_dist(eye[2], eye[4])
# compute the euclidean distance between the horizontal
# eye landmark (x, y)-coordinates
C = euclidean_dist(eye[0], eye[3])
# compute the eye aspect ratio
ear = (A + B) / (2.0 * C)
# return the eye aspect ratio
return ear
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-c", "--cascade", required=False,
help = "/home/pi/Desktop/pi-drowsiness-detection/haarcascade_frontalface_default.xml")
ap.add_argument("-p", "--shape-predictor", required=False,
help="path to facial landmark predictor")
ap.add_argument("-a", "--alarm", type=int, default=0,
help="boolean used to indicate if TraffHat should be used")
args = vars(ap.parse_args())
# check to see if we are using GPIO/TrafficHat as an alarm
if args["alarm"] > 0:
from gpiozero import TrafficHat
th = TrafficHat()
print("[INFO] using TrafficHat alarm...")
# define two constants, one for the eye aspect ratio to indicate
# blink and then a second constant for the number of consecutive
# frames the eye must be below the threshold for to set off the
# alarm
EYE_AR_THRESH = 0.3
EYE_AR_CONSEC_FRAMES = 16
# initialize the frame counter as well as a boolean used to
# indicate if the alarm is going off
COUNTER = 0
ALARM_ON = False
# load OpenCV's Haar cascade for face detection (which is faster than
# dlib's built-in HOG detector, but less accurate), then create the
# facial landmark predictor
print("[INFO] loading facial landmark predictor...")
detector = cv2.CascadeClassifier("/home/pi/Desktop/pi-drowsiness-detection/haarcascade_frontalface_default.xml")
predictor = dlib.shape_predictor("/home/pi/Desktop/pi-drowsiness-detection/shape_predictor_68_face_landmarks.dat")
# grab the indexes of the facial landmarks for the left and
# right eye, respectively
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
# start the video stream thread
print("[INFO] starting video stream thread...")
# vs = VideoStream(src=0).start()
vs = VideoStream(usePiCamera=True).start()
time.sleep(1.0)
# loop over frames from the video stream
while True:
# grab the frame from the threaded video file stream, resize
# it, and convert it to grayscale
# channels)
frame = vs.read()
frame = imutils.resize(frame, width=450)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# detect faces in the grayscale frame
rects = detector.detectMultiScale(gray, scaleFactor=1.1,
minNeighbors=5, minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE)
# loop over the face detections
for (x, y, w, h) in rects:
# construct a dlib rectangle object from the Haar cascade
# bounding box
rect = dlib.rectangle(int(x), int(y), int(x + w),
int(y + h))
# determine the facial landmarks for the face region, then
# convert the facial landmark (x, y)-coordinates to a NumPy
# array
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
# extract the left and right eye coordinates, then use the
# coordinates to compute the eye aspect ratio for both eyes
leftEye = shape[lStart:lEnd]
rightEye = shape[rStart:rEnd]
leftEAR = eye_aspect_ratio(leftEye)
rightEAR = eye_aspect_ratio(rightEye)
# average the eye aspect ratio together for both eyes
ear = (leftEAR + rightEAR) / 2.0
# compute the convex hull for the left and right eye, then
# visualize each of the eyes
leftEyeHull = cv2.convexHull(leftEye)
rightEyeHull = cv2.convexHull(rightEye)
cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
# check to see if the eye aspect ratio is below the blink
# threshold, and if so, increment the blink frame counter
if ear < EYE_AR_THRESH:
COUNTER += 1
# if the eyes were closed for a sufficient number of
# frames, then sound the alarm
if COUNTER >= EYE_AR_CONSEC_FRAMES:
# if the alarm is not on, turn it on
if not ALARM_ON:
ALARM_ON = True
GPIO.output(led2, True)
GPIO.output(led, True)
# check to see if the TrafficHat buzzer should
# be sounded
if args["alarm"] > 0:
th.buzzer.blink(0.1, 0.1, 10,
background=True)
# draw an alarm on the frame
print("DROWSY!!!")
cv2.putText(frame, "DROWSINESS ALERT!", (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
# otherwise, the eye aspect ratio is not below the blink
# threshold, so reset the counter and alarm
else:
COUNTER = 0
ALARM_ON = False
GPIO.output(led, False)
# draw the computed eye aspect ratio on the frame to help
# with debugging and setting the correct eye aspect ratio
# thresholds and frame counters
cv2.putText(frame, "EAR: {:.3f}".format(ear), (300, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
# show the frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
I'm making smartmirror with opencv. I use raspbian but, I have a problem.
Import the necessary packages
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2
def detect(img, cascade):
rects = cascade.detectMultiScale(img, scaleFactor=1.3, minNeighbors=4,
minSize=(30, 30), flags=cv2.CASCADE_SCALE_IMAGE)
if len(rects) == 0:
return []
rects[:,2:] += rects[:,:2]
return rects
def draw_rects(img, rects, color):
for x1, y1, x2, y2 in rects:
cv2.rectangle(img, (x1, y1), (x2, y2), color, 2)
Initialize the camera and grab a reference to the raw camera capture
camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = 32
rawCapture = PiRGBArray(camera, size=(640, 480))
cascade = cv2.CascadeClassifier("opencv-3.3.0/data/haarcascades/haarcascade_frontalface_alt.xml")
Allow the camera to warmup
time.sleep(0.1)
scan = 0
Capture frames from the camera
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
# grab the raw NumPy array representing the image, then initialize the timestamp
# and occupied/unoccupied text
img = frame.array
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist(gray)
rects = detect(gray, cascade)
if len(rects) != 0:
scan = 1
vis = img.copy()
draw_rects(vis, rects, (0, 255, 0))
# show the frame
cv2.imshow("Frame", vis)
key = cv2.waitKey(1) & 0xFF
# clear the stream in preparation for the next frame
rawCapture.truncate(0)
# if the `q` key was pressed, break from the loop
if scan == 1:
break
so,
In a virtual environment, I want to take variable(scan) into my file(.py).
but, I get an error, No module named cv2
how can I use the variable(scan) into my file??? help me, please.
This is my file(.py)
#smartmirror.py
from __future__ import print_function
from aplclient.discovery import build
from httplib2 import Http
from oauth2client import file, clinet, tools
from Tkinter import *
import locale
(used coding)
I've been following an opencv tutorial from Sir Adrian Rosebrock for a home surveillance system. This is working. I also have an analog sensor that is using an analog to digital converter which is ADS1115. This is also working.
the problem is once I insert the ADS library inside the surveillance code I get an error.
Error:
Traceback (most recent call last):
File "ss_security.py", line 17, in <module>
import Adafruit_ADS1x15
ImportError: No module named Adafruit_ADS1x15
This is the code:
# import the necessary packages
from pyimagesearch.tempimage import TempImage
from picamera.array import PiRGBArray
from picamera import PiCamera
from imutils.video import VideoStream
import warnings
import dropbox
import json
import datetime
import argparse
import imutils
import time
import cv2
import math
# Import the ADS1x15 module.
import Adafruit_ADS1x15
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-c", "--conf", required=True,
help="path to the JSON configuration file")
ap.add_argument("-p", "--picamera", type=int, default=-1,
help="whether or not the Raspberry Pi camera should be used")
args = vars(ap.parse_args())
# filter warnings, load the configuration and initialize the Dropbox
# client
warnings.filterwarnings("ignore")
conf = json.load(open(args["conf"]))
client = None
# check to see if the Dropbox should be used
if conf["use_dropbox"]:
# connect to dropbox and start the session authorization process
client = dropbox.Dropbox(conf["dropbox_access_token"])
print("[SUCCESS] dropbox account linked")
# initialize the video stream and allow the cammera sensor to warmup
vs = VideoStream(usePiCamera=args["picamera"] > 0).start()
vs.resolution = tuple(conf["resolution"])
vs.framerate = conf["fps"]
rawCapture = PiRGBArray(vs, size=tuple(conf["resolution"]))
# allow the camera to warmup, then initialize the average frame, last
# uploaded timestamp, and frame motion counter
print("[INFO] warming up...")
time.sleep(conf["camera_warmup_time"])
avg = None
lastUploaded = datetime.datetime.now()
motionCounter = 0
# loop over the frames from the video stream
while True:
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 400 pixels
frame = vs.read()
frame = imutils.resize(frame, width=500)
text = "Unoccupied"
# draw the timestamp on the frame
timestamp = datetime.datetime.now()
ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p")
cv2.putText(frame, ts, (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX,
0.35, (0, 0, 255), 1)
# convert it to grayscale, and blur it
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
# if the average frame is None, initialize it
if avg is None:
print("[INFO] starting background model...")
avg = gray.copy().astype("float")
rawCapture.truncate(0)
continue
# accumulate the weighted average between the current frame and
# previous frames, then compute the difference between the current
# frame and running average
cv2.accumulateWeighted(gray, avg, 0.5)
frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))
# threshold the delta image, dilate the thresholded image to fill
# in holes, then find contours on thresholded image
thresh = cv2.threshold(frameDelta, conf["delta_thresh"], 255,
cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=2)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
# loop over the contours
for c in cnts:
# if the contour is too small, ignore it
if cv2.contourArea(c) < conf["min_area"]:
continue
# compute the bounding box for the contour, draw it on the frame,
# and update the text
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = "Occupied"
# draw the text and timestamp on the frame
ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p")
cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, ts, (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX,
0.35, (0, 0, 255), 1)
# display temp
cv2.putText(frame, "Temp: 30 C".format(text), (250, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
# check to see if the room is occupied
if text == "Occupied":
# check to see if enough time has passed between uploads
if (timestamp - lastUploaded).seconds >= conf["min_upload_seconds"]:
# increment the motion counter
motionCounter += 1
# check to see if the number of frames with consistent motion is
# high enough
if motionCounter >= conf["min_motion_frames"]:
# check to see if dropbox sohuld be used
if conf["use_dropbox"]:
# write the image to temporary file
t = TempImage()
cv2.imwrite(t.path, frame)
# upload the image to Dropbox and cleanup the tempory image
print("[UPLOAD] {}".format(ts))
path = "/{base_path}/{timestamp}.jpg".format(
base_path=conf["dropbox_base_path"], timestamp=ts)
client.files_upload(open(t.path, "rb").read(), path)
t.cleanup()
# update the last uploaded timestamp and reset the motion
# counter
lastUploaded = timestamp
motionCounter = 0
# otherwise, the room is not occupied
else:
motionCounter = 0
# check to see if the frames should be displayed to screen
if conf["show_video"]:
# display the security feed
cv2.imshow("Security Feed", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key is pressed, break from the lop
if key == ord("q"):
break
# clear the stream in preparation for the next frame
rawCapture.truncate(0)
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
I have just inserted the library. I w\haven't done any computations/readings. As you can see I placed a default temp value.
The library is located inside pi/Adafruit_Python_ADS1x15
Steps taken:
Installed the ADS library inside the cv environment (Error: destination path already exist)
Tried from Adafruit_Python_ADS1x15 import Adafruit_ADS1x15 (Error: No module named Adafruit_Python_ADS1x15)