I'm trying to load an image with OPENCV from an io.BytesIO() structure.
Originally, the code loads the image with PIL, like below:
image_stream = io.BytesIO()
image_stream.write(connection.read(image_len))
image_stream.seek(0)
image = Image.open(image_stream)
print('Image is %dx%d' % image.size)
I tried to open with OPENCV like that:
image_stream = io.BytesIO()
image_stream.write(connection.read(image_len))
image_stream.seek(0)
img = cv2.imread(image_stream,0)
cv2.imshow('image',img)
But it seems that imread doesn't deal with BytesIO(). I'm getting an error.
I'm using OPENCV 3.3 and Python 2.7. Please, could someone help me?
Henrique
Try this:
import numpy as np
import cv2 as cv
import io
image_stream = io.BytesIO()
image_stream.write(connection.read(image_len))
image_stream.seek(0)
file_bytes = np.asarray(bytearray(image_stream.read()), dtype=np.uint8)
img = cv.imdecode(file_bytes, cv.IMREAD_COLOR)
The answer delivered by arrybn, worked for me. It was only necessary to add a cv2.waitkey(1) after cv2.imshow. Here is the code:
Server Side:
import io
import socket
import struct
import cv2
import numpy as np
server_socket = socket.socket()
server_socket.bind(('0.0.0.0', 8000))
server_socket.listen(0)
connection = server_socket.accept()[0].makefile('rb')
cv2.namedWindow("Image", cv2.WINDOW_NORMAL)
try:
while True:
image_len = struct.unpack('<L', connection.read(struct.calcsize('<L')))[0]
if not image_len:
break
image_stream = io.BytesIO()
image_stream.write(connection.read(image_len))
image_stream.seek(0)
file_bytes = np.asarray(bytearray(image_stream.read()), dtype=np.uint8)
img = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
cv2.imshow("Image", img)
cv2.waitKey(1)
finally:
connection.close()
server_socket.close()
Based on the example Capturing to a network stream
Related
I have a stereo camera connected to a Raspberry Pi. I want to get images off it, compress them, and send them to a PC continuously. I am trying to use MMAL for this.
I have this code to test it out:
from picamera import mmal, mmalobj
import cv2
import io
from threading import Event
finished = Event() # triggered whenever it finishes compressing an image
image = cv2.imread('image.png')
image = cv2.resize(image, (3200, 1300))
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
encoder = mmalobj.MMALImageEncoder()
encoder.inputs[0].format = mmal.MMAL_ENCODING_I420
encoder.inputs[0].framesize = (3200, 1300)
encoder.inputs[0].commit()
encoder.outputs[0].copy_from(encoder.inputs[0])
encoder.outputs[0].format = mmal.MMAL_ENCODING_JPEG
encoder.outputs[0].commit()
encoder.outputs[0].params[mmal.MMAL_PARAMETER_JPEG_Q_FACTOR] = 90
def image_callback(port, buf):
jpg_data.write(buf.data)
if bool(buf.flags & mmal.MMAL_BUFFER_HEADER_FLAG_FRAME_END):
finished.set()
return True
return False
# perform compression on the image 100 times
for i in range(100):
finished.clear()
encoder.inputs[0].enable(lambda port, buf: True)
encoder.outputs[0].enable(image_callback)
jpg_data = io.BytesIO()
buf = encoder.inputs[0].get_buffer()
buf.data = image[:]
encoder.inputs[0].send_buffer(buf)
finished.wait()
jpg_data.close()
encoder.inputs[0].disable()
encoder.outputs[0].disable()
This only works if I disable and reenable the ports on every iteration. However, it is slower than just using OpenCV:
import cv2
import io
image = cv2.imread('image.png')
image = cv2.resize(image, (3200, 1300))
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# perform compression on the image 100 times
for i in range(100):
success, buffer = cv2.imencode(".jpg", image)
io_buffer = io.BytesIO(buffer)
data = io_buffer.read()
Also, I found that the disabling and reenabling was taking up about half the time. Does anyone know why this is happening, and if there is a faster way? Thanks.
I want to send an image from one server to another. I don't want to save the file on disk. I directly want to read the send data. I've written a script for this.
server.py
import socket
import cv2
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 6002))
s.listen(10)
c, addr = s.accept()
print('{} connected.'.format(addr))
img = cv2.imread("test.jpg")
img = cv2.imencode('.jpg', img)[1].tostring()
c.send( str(len(img)).ljust(16));
c.send(img)
i = 0
datas = img[i:i+1024]
i = i + 1024
while datas:
datas = img[i:i+1024]
c.send(datas)
i = i + 1024
print("Done sending...")
client.py
import socket
import numpy as np
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("", 6002))
img = ""
while True:
datas = s.recv(1024)
while datas:
img = img + str(datas)[2:-1]
datas = s.recv(1024)
break
print("Done receiving")
img_np = np.fromstring(img, np.uint8)
img_np = cv2.imdecode(img_np, cv2.IMREAD_COLOR)
I'm receiving image in img but when decoding it using opencv, I'm getting empty matrix img_np.
I suggest you to use Memory-Mapped File instead of ZeroMQ as it yields less latency.
Here is an example I wrote that you can use: https://github.com/off99555/python-mmap-ipc
import cv2
import urllib
import numpy as np
stream=urllib.urlopen('http://192.168.1.5:8080/frame.mjpg')
bytes=''
while True:
bytes+=stream.read(1024)
a = bytes.find('\xff\xd8')
b = bytes.find('\xff\xd9')
if a!=-1 and b!=-1:
jpg = bytes[a:b+2]
bytes= bytes[b+2:]
i = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),cv2.CV_LOAD_IMAGE_COLOR)
cv2.imshow('i',i)
if cv2.waitKey(1) ==27:
exit(0)
The code is not throwing any error too,I have set No authentication in IPWebcam.
Its throwing error Connection Refused
try this:
import urllib
import cv2
import numpy as np
import time
# Replace the URL with your own IPwebcam shot.jpg IP:port
url='http://192.168.20.108:8080/shot.jpg'
while True:
# Use urllib to get the image from the IP camera
imgResp = urllib.urlopen(url)
# Numpy to convert into a array
imgNp = np.array(bytearray(imgResp.read()),dtype=np.uint8)
# Finally decode the array to OpenCV usable format ;)
img = cv2.imdecode(imgNp,-1)
# put the image on screen
cv2.imshow('IPWebcam',img)
#To give the processor some less stress
#time.sleep(0.1)
# Quit if q is pressed
if cv2.waitKey(1) & 0xFF == ord('q'):
break
I have written a code which works fine for a recorded video or video from webcam of my laptop.. but i need it to work with video directly from my phone.. Now i have been able to get video from phone and show it through python. But i need frames of this video to apply OpticalFlow function on it. (calcOpticalFlowFarneback).. Here's my two codes. i'll be thankful for your help.
This is code for getting video from android phone's camera to python
import cv2
import urllib2
import numpy as np
import sys
host = "192.168.1.2:8080"
if len(sys.argv)>1:
host = sys.argv[1]
hoststr = 'http://' + host + '/video'
print 'Streaming ' + hoststr
stream=urllib2.urlopen(hoststr)
bytes=''
while True:
bytes+=stream.read(1024)
a = bytes.find('\xff\xd8')
b = bytes.find('\xff\xd9')
if a!=-1 and b!=-1:
jpg = bytes[a:b+2]
bytes= bytes[b+2:]
i = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),cv2.IMREAD_COLOR)
cv2.imshow(hoststr,i)
if cv2.waitKey(1) ==27:
exit(0)
And this one is relevant portion for motion flow, notice i need frames from video
vid=cv2.VideoCapture('vidaflv.flv')
ret, frame = vid.read()
imgrayp = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
while True:
ret, frame = vid.read()
if ret:
imgray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(imgrayp,imgray,None,0.5,3,15,3,5,1.2,0)
cv2.imshow('Optical flow',draw_flow(frame,flow))
imgrayp=imgray
if cv2.waitKey(1)==ord('e'):
break
what i can't figure out is how to get a frame from live stream video code to put in my optical flow fuction..
Withourt being able to test this, I'd try something like:
import cv2
import urllib2
import numpy as np
import sys
host = "192.168.1.2:8080"
if len(sys.argv)>1:
host = sys.argv[1]
hoststr = 'http://' + host + '/video'
print 'Streaming ' + hoststr
stream=urllib2.urlopen(hoststr)
bytes=''
FirstTime=True
while True:
bytes+=stream.read(1024)
a = bytes.find('\xff\xd8')
b = bytes.find('\xff\xd9')
if a!=-1 and b!=-1:
jpg = bytes[a:b+2]
bytes= bytes[b+2:]
i = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),cv2.IMREAD_COLOR)
if FirstTime=True:
FirstTime=False
imgrayp = cv2.cvtColor(i,cv2.COLOR_BGR2GRAY)
imgray = cv2.cvtColor(i,cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(imgrayp,imgray,None,0.5,3,15,3,5,1.2,0)
cv2.imshow('Optical flow',draw_flow(frame,flow))
if cv2.waitKey(1) ==27:
exit(0)
I am working with opencv and have two video source. I am using the following code. The code works sometimes and sometimes it does not work. Is there a problem with my code. How can I make the amends...
import cv2
Channel0 = cv2.VideoCapture(0)
IsOpen0, Image0 = Channel0.read()
Channel1 = cv2.VideoCapture(1)
IsOpen1, Image1 = Channel1.read()
while IsOpen0 and IsOpen1:
IsOpen0, Image0 = Channel0.read()
IsOpen1, Image1 = Channel1.read()
cv2.imshow("Webcamera",Image0)
cv2.imshow("Panasonic",Image1)
cv2.waitKey(10)
PS It always works when I use only one video source.
I think I figured out my error. For some reason the following code works. It must have been problem with threading...
import thread
import time
import cv2
def Webcamera():
Channel0 = cv2.VideoCapture(0)
IsOpen0, Image0 = Channel0.read()
while IsOpen0:
IsOpen0, Image0 = Channel0.read()
cv2.imshow("Webcamera",Image0)
cv2.waitKey(10)
if not IsOpen0:
time.delay(0.5)
print "Error opening Web camera"
def Panasonic():
Channel1 = cv2.VideoCapture(1)
IsOpen1, Image1 = Channel1.read()
while IsOpen1:
IsOpen1, Image1 = Channel1.read()
cv2.imshow("Panasonic",Image1)
cv2.waitKey(10)
if not IsOpen1:
time.sleep(0.5)
print "Error opening Panasonic"
try:
thread.start_new_thread(Webcamera,())
thread.start_new_thread(Panasonic,())
except:
print "Error: unable to start thread"
while 1:
pass