I used webdis in docker, with a python file in the backend:
import redis
r = redis.Redis(host='localhost', port=6379, db=0)
value = "tests"
r.publish('mychannel', value)
And a javascript frontend that uses a websocket
var time1 = null
var time2 = null
var socket = new WebSocket('ws://localhost:8765/ws');
socket.onmessage = function(e) {
var server_message = e.data;
if (server_message == "start") {
time1 = new Date().valueOf()
}
if (server_message == "end") {
time2 = new Date().valueOf()
console.log("100000 messages took ", (time2 - time1) / 1000, "seconds")
window.location.reload()
}
document.getElementById("test").innerText = server_message
}
The python script runs fine and the javascript can connect to the websocket but it's not doing anything
I am working on reactjs with python api and openCV which after uploading photo returns result with green rectangle around the face. So working on it, on clicking upload photo it returns 422(unprocessable entity). I have three main part Upload.js for frontend uploading part, main.py image api and face_detector.py for opencv part.
Upload.js
import React, { useState } from 'react'
import './Upload.css'
import axios from 'axios';
const Upload = () => {
const [file, setFile] = useState();
const handleChange = (event) => {
setFile(URL.createObjectURL(event.target.files[0]))
}
const submitForm = () => {
const formData = new FormData();
formData.append('file', file);
axios
.post('http://127.0.0.1:8000/images', formData, {
headers: {
accept: 'multipart/form-data',
}
})
.then(() => {
alert('file upload succcess');
})
.catch(() => alert("File Upload Error"))
return formData
}
return (
<>
<input className='img_choose' type="file" onChange={handleChange} />
<img src={file} className='prev_img' alt='img' />
<button className='btn_upload' onClick={submitForm}>Upload</button>
</>
);
}
export default Upload
main.py
from fastapi import FastAPI, File, UploadFile
from fastapi.responses import Response
from random import randint
from starlette.requests import Request
import uuid
from fastapi.middleware.cors import CORSMiddleware
app = FastAPI()
db = []
origins = [
"http://localhost:3000",
"http://127.0.0.1:8000/"
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
#app.get("/main")
def main():
return{"message":"Welcome"}
#app.post("/images/")
async def create_upload_file(file: UploadFile = File(...)):
file.filename = f"{uuid.uuid4()}.jpg"
contents = await file.read() # <-- Important!
db.append(contents)
return {"filename": file.filename}
#app.get("/images/")
async def read_random_file():
# get a random file from the image db
random_index = randint(0, len(db) - 1)
response = Response(content=db[random_index])
return response
Face_detector.py
import cv2
import urllib.request
import numpy as np
url = [
"http://127.0.0.1:8000/images/"
]
def url_to_image(url):
# download the image, convert it to a NumPy array, and then read
# it into OpenCV format
resp = urllib.request.urlopen(url)
image = np.asarray(bytearray(resp.read()), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
# return the image
return image
for url in url:
trained_face_data = cv2.CascadeClassifier(
'haarcascade_frontalface_default.xml')
x = y = w = h = int
image = url_to_image(url)
face_coordinates = trained_face_data.detectMultiScale(image,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE)
for (x, y, w, h) in face_coordinates:
cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.imshow("Image", image)
cv2.waitKey(0)
This is where you are doing it wrong.
setFile(URL.createObjectURL(event.target.files[0]))
You are attaching the file URL in the formData instead of the file.
use this instead
setFile(event.target.files[0])
I want to classify a video using opencv methods by using flask, the video to be classified is live-stream that is from the user.
I googled a lot, then finally I found some code and did this:-
from flask import Flask, render_template
from flask_socketio import SocketIO, emit
import time
import io
from PIL import Image
import base64,cv2
import numpy as np
# import pyshine as ps
from flask_cors import CORS,cross_origin
import imutils
# import dlib
from engineio.payload import Payload
# detector = dlib.get_frontal_face_detector()
# predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
Payload.max_decode_packets = 2048
app = Flask(__name__)
socketio = SocketIO(app,cors_allowed_origins='*' )
CORS(app)
#app.route('/', methods=['POST', 'GET'])
def index():
return render_template('index.html')
def readb64(base64_string):
idx = base64_string.find('base64,')
base64_string = base64_string[idx+7:]
sbuf = io.BytesIO()
sbuf.write(base64.b64decode(base64_string, ' /'))
pimg = Image.open(sbuf)
return cv2.cvtColor(np.array(pimg), cv2.COLOR_RGB2BGR)
def moving_average(x):
return np.mean(x)
#socketio.on('catch-frame')
def catch_frame(data):
emit('response_back', data)
global fps,prev_recv_time,cnt,fps_array
fps=30
prev_recv_time = 0
cnt=0
fps_array=[0]
#socketio.on('image')
def image(data_image):
global fps,cnt, prev_recv_time,fps_array
recv_time = time.time()
text = 'FPS: '+str(fps)
frame = (readb64(data_image))
# frame = changeLipstick(frame,[255,0,0])
# frame = ps.putBText(frame,text,text_offset_x=20,text_offset_y=30,vspace=20,hspace=10, font_scale=1.0,background_RGB=(10,20,222),text_RGB=(255,255,255))
frame = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
imgencode = cv2.imencode('.jpeg', frame,[cv2.IMWRITE_JPEG_QUALITY,40])[1]
# base64 encode
stringData = base64.b64encode(imgencode).decode('utf-8')
b64_src = 'data:image/jpeg;base64,'
stringData = b64_src + stringData
# emit the frame back
emit('response_back', stringData)
fps = 1/(recv_time - prev_recv_time)
fps_array.append(fps)
fps = round(moving_average(np.array(fps_array)),1)
prev_recv_time = recv_time
#print(fps_array)
cnt+=1
if cnt==30:
fps_array=[fps]
cnt=0
def getMaskOfLips(img,points):
""" This function will input the lips points and the image
It will return the mask of lips region containing white pixels
"""
mask = np.zeros_like(img)
mask = cv2.fillPoly(mask,[points],(255,255,255))
return mask
def changeLipstick(img,value):
""" This funciton will take img image and lipstick color RGB
Out the image with a changed lip color of the image
"""
img = cv2.resize(img,(0,0),None,1,1)
imgOriginal = img.copy()
imgColorLips=imgOriginal
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = detector(imgGray)
for face in faces:
x1,y1 = face.left(),face.top()
x2,y2 = face.right(),face.bottom()
facial_landmarks = predictor(imgGray,face)
points =[]
for i in range(68):
x = facial_landmarks.part(i).x
y = facial_landmarks.part(i).y
points.append([x,y])
points = np.array(points)
imgLips = getMaskOfLips(img,points[48:61])
imgColorLips = np.zeros_like(imgLips)
imgColorLips[:] =value[2],value[1],value[0]
imgColorLips = cv2.bitwise_and(imgLips,imgColorLips)
value = 1
value=value//10
if value%2==0:
value+=1
kernel_size = (6+value,6+value) # +1 is to avoid 0
weight = 1
weight = 0.4 + (weight)/400
imgColorLips = cv2.GaussianBlur(imgColorLips,kernel_size,10)
imgColorLips = cv2.addWeighted(imgOriginal,1,imgColorLips,weight,0)
return imgColorLips
if __name__ == '__main__':
socketio.run(app,port=9990 ,debug=True)
This is in main.py of flask
In the templates folder I have made index.html with
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Title</title>
<style>
#video {
transform: rotateY(180deg);
-webkit-transform:rotateY(180deg); /* Safari and Chrome */
-moz-transform:rotateY(180deg); /* Firefox */
}
</style>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.5.1/jquery.min.js"></script>
<script src='https://cdnjs.cloudflare.com/ajax/libs/socket.io/2.0.0/socket.io.js'></script>
</head>
<body>
<div id="container">
<video autoplay playsinline id="videoElement"></video>
<canvas id="canvas" width="400" height="300"></canvas>
</div>
<div class = 'video'>
<img id="photo" width="400" height="300">
<h1>video</h1>
</div>
<script type="text/javascript" charset="utf-8">
var socket = io.connect(window.location.protocol + '//' + document.domain + ':' + location.port);
socket.on('connect', function(){
console.log("Connected...!", socket.connected)
});
var canvas = document.getElementById('canvas');
var context = canvas.getContext('2d');
const video = document.querySelector("#videoElement");
video.width = 400;
video.height = 300;
if (navigator.mediaDevices.getUserMedia) {
navigator.mediaDevices.getUserMedia({ video: true })
.then(function (stream) {
video.srcObject = stream;
video.play();
})
.catch(function (err0r) {
});
}
const FPS = 6;
setInterval(() => {
width=video.width;
height=video.height;
context.drawImage(video, 0, 0, width , height );
var data = canvas.toDataURL('image/jpeg', 0.5);
context.clearRect(0, 0, width,height );
socket.emit('image', data);
}, 1000/FPS);
socket.on('response_back', function(image){
photo.setAttribute('src', image );
});
</script>
</body>
</html>
The problem is: I need to make sure that it works on the user's device, so when I change socketio.run() and add host='0.0.0.0' It stops having access to my webcam
How can I solve this?
also I am unable to show back the video :(
The Error in Console
polling-xhr.js:264 GET http://localhost:9990/socket.io/?EIO=3&transport=polling&t=N_GnQuZ 400 (BAD REQUEST)
Hey,
First you are using a very old socketio client version.
https://cdn.socket.io/4.4.1/socket.io.min.js
use a newer version of socketio client, thats the reason for 400 Error, if you are going to the preview window of the request on dev tools (chrome) you have notice that there are a not supported version message.
Why you dont see the webcam!? is because you have no security context, when you go away from localhost like 0.0.0.0 you need a security context on chrome based browser it means you need HTTPS otherwise "navigator.mediaDevices.getUserMedia" will return undefined.
See this post
I try to send data from a simple python programm to a node server. But no success. That's why I ask some help.
My simple python :
import requests
SIGNUP_URL = 'http://localhost:8000/timer'
def submit_form():
obj = {name:'whateever'}
resp = requests.post(SIGNUP_URL, data = obj)
if __name__ == '__main__':
submit_form()
my nodejs (light, I remove not concern lines) :
var http = require('http');
var express = require('express');
var app = express();
app.get('/', function (request, response) {
response.sendFile(__dirname + '/public/index.html');
});
var server = http.createServer(app);
var io = require('socket.io')(server);
const bodyParser = require('body-parser');
const path = require('path');
const {execFile, exec, spawn} = require ('child_process');
app.use(express.static('public'));
app.use(bodyParser.urlencoded({ extended: true }));
app.post('/timer', function(req, res){
res.sendFile(__dirname + '/public/status.html');
var test = "test";
var data = req.body;
var info = data.name;
io.emit('messageFromServer', { info });
console.log('info')
});
server.listen(8000, console.log("listening to port 8000"));
So, when I execute my python I want transfering to the server the data "name : whatever", then I want the server write the data into the console (to be sure the data is well sent), and I all is ok, I want to emit this data to my html page...
Thanks for helping me.
The answer :
python code :
import requests
SIGNUP_URL = 'http://localhost:8000/timer'
def submit_form():
obj = {'name':'whateever'}
resp = requests.post(SIGNUP_URL, data = obj)
if __name__ == '__main__':
submit_form()
nodejs code :
app.post('/timer', function(req, res){
res.sendFile(__dirname + '/public/status.html');
var info= req.body;
io.emit('messageFromServer', info);
console.log(info)
});
It works.
Im using a Wifi-Cam called Esp32-Cam and trying to connect to it with openCv. The Node server.js code makes it possible to connect to the module by it's IP address. The code for server.js is given below:
SERVER.JS:
const path = require('path');
const express = require('express');
const WebSocket = require('ws');
const app = express();
const WS_PORT = 8888;
const HTTP_PORT = 8000;
const wsServer = new WebSocket.Server({port: WS_PORT}, ()=> console.log(`WS Server is listening at
${WS_PORT}`));
let connectedClients = [];
wsServer.on('connection', (ws, req)=>{
console.log('Connected');
connectedClients.push(ws);
ws.on('message', data => {
connectedClients.forEach((ws,i)=>{
if(ws.readyState === ws.OPEN){
ws.send(data);
}else{
connectedClients.splice(i ,1);
}
})
});
});
app.get('/client',(req,res)=>res.sendFile(path.resolve(__dirname, './client.html')));
app.listen(HTTP_PORT, ()=> console.log(`HTTP server listening at ${HTTP_PORT}`));
CLÄ°ENT.HTML:
<html>
<head>
<title>Client</title>
</head>
<body>
<img src="">
<script>
const img = document.querySelector('img');
const WS_URL = 'ws:///192.168.1.33:8888';
const ws = new WebSocket(WS_URL);
let urlObject;
ws.onopen = () => console.log(`Connected to ${WS_URL}`);
ws.onmessage = message => {
const arrayBuffer = message.data;
if(urlObject){
URL.revokeObjectURL(urlObject);
}
urlObject = URL.createObjectURL(new Blob([arrayBuffer]));
img.src = urlObject;
}
</script>
</body>
I can access the streaming video on the address "http://192.168.1.33:8000/client" with my browser but not on opencv. Here's the code in OpenCv python trying to reach the camera.
OPENCV CODE:
cap = cv2.VideoCapture('http://192.168.1.33:8000/client')
print(cap.isOpened()) //it prints FALSE
while(True):
ret, frame = cap.read()
cv2.imshow('framee',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
I dont understand why i can't access the camera since this esp32 module is just like an IP camera? I would appreciate any help.
Thank you.