reading an opencv image in python through a socket - python

I am trying to read an opencv image in a python socket that is sent from c++.
I am able to read the image into another c++ program or VB program and build an image but with python I don't understand what's happening.
The sending code where I send the mat.data:
char *start_s = "<S><size>43434234<cols>64<rows>64<SE>";//plus I send the image size, cols, rows, which varies, not like the static char string shown
char *end_e = "<E>";
cv::Mat image_send = some_mat;
iResult = send( ConnectSocket, start_s, (int)strlen(start_s), 0 );
iResult = send( ConnectSocket, (const char *) image_send.data, i_buffer_size, 0 );
iResult = send( ConnectSocket, end_e, (int)strlen(end_e), 0 );
This is what I have tried with the python, but haven't had any success yet. The image_cols and Image_rows are filtered from the socket, not shown here, and only the image_mat.data from the c++ mat is in the socket that I am trying to put into the image:
data = conn.recv(4757560)
if(i_Read_Image == 2) & (image_cols != 0) & (image_rows != 0):
print ("Entering")
#print(data)
data2 = np.fromstring(data, dtype='uint8')
img_np = cv2.imdecode(data2,cv2.IMREAD_COLOR )
cv2.imshow('image',img_np)
cv2.waitKey(0)
#Also tried this
#img = Image.new('RGB', (image_cols, image_rows))
#img.putdata(data)
#img5 = np.reshape(data2,(image_rows,image_cols))
i_Read_Image = 0

With the help of the comments I was able to get a working answer. The original image is in a single array RGB, this needs to be reshaped and placed into a 'RGB' image, it can be done in one line:
img = Image.fromarray(data2.reshape(image_rows,image_cols,3), 'RGB')
and when reading an opencv data array from a socket: this works:
data = conn.recv(567667)
if(i_Read_Image == 2) & (image_cols != 0) & (image_rows != 0):
data2 = np.fromstring(data, dtype='uint8')
img = Image.fromarray(data2.reshape(image_rows,image_cols,3), 'RGB')
img.show()

Related

Python to Arduino through Serial, can’t move servos

I'm currently trying to have a camera mounted on a pan tilt made of two micro servos track a face. My python code has been working and has been successfully identifying a face, but non of my servos have been moving while the Arduino is constantly flashing as if it is receiving input from the Python code. I haven't been able to get the servos to move according to my python code, but I have made simple code on the side to make sure the servos have good connections and they work fine on their own. I'm not sure what is wrong...
Python Code
import numpy as np6
import serial
import time
import sys
import cv2
arduino = serial.Serial('COM3', 9600)
time.sleep(2)
print("Connection to arduino...")
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(0)
while 1:
ret, img = cap.read()
cv2.resizeWindow('img', 500,500)
cv2.line(img,(500,250),(0,250),(0,255,0),1)
cv2.line(img,(250,0),(250,500),(0,255,0),1)
cv2.circle(img, (250, 250), 5, (255, 255, 255), -1)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3)
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),5)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
arr = {y:y+h, x:x+w}
print (arr)
print ('X :' +str(x))
print ('Y :'+str(y))
print ('x+w :' +str(x+w))
print ('y+h :' +str(y+h))
xx = int(x+(x+h))/2
yy = int(y+(y+w))/2
print (xx)
print (yy)
center = (xx,yy)
print("Center of Rectangle is :", center)
data =(“X {0: f} Y {1: f} Z” .format (xx, yy))
print ("output = '" +data+ "'")
arduino.write(data.encode())
cv2.imshow('img',img)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
Arduino Code
#include<Servo.h>
Servo servoVer; //Vertical Servo
Servo servoHor; //Horizontal Servo
int x;
int y;
int prevX;
int prevY;
void setup()
{
Serial.begin(9600);
servoVer.attach(5); //Vertical Servo to Pin 5
servoHor.attach(6); //Horizontal Servo to Pin 6
servoVer.write(90);
servoHor.write(90);
}
void Pos()
{
if(prevX != x || prevY != y)
{
int servoX = map(x, 600, 0, 70, 179);
int servoY = map(y, 450, 0, 179, 95);
servoX = min(servoX, 179);
servoX = max(servoX, 70);
servoY = min(servoY, 179);
servoY = max(servoY, 95);
servoHor.write(servoX);
servoVer.write(servoY);
}
}
void loop()
{
if(Serial.available() > 0)
{
if(Serial.read() == 'X')
{
x = Serial.parseInt();
if(Serial.read() == 'Y')
{
y = Serial.parseInt();
Pos();
}
}
while(Serial.available() > 0)
{
Serial.read();
}
}
}
One huge problem is the way you are using Serial.read. That function consumes the character out of the buffer. You don't get to read the same one twice. So let's say you send a 'Y'. The first if statement reads the Y out of the serial buffer and compares to 'X', that's false so it moves on. Then it reads something else from serial, probably a -1 if nothing is left to read. But it doesn't see the 'Y' because that was read in the first if.
What you need to do is to read from serial into a char variable and then use that char variable in your if statements.
char c = Serial.read();
if(c == 'X')...
... if (c == 'Y')...

How can i get bytes data using TCPSocket?

I want to use tcpsocket to send nv21 images between smartphone and server and convert it to rgb.I use Qt for android make the smartphone application and use python in server. When i use QByteArray to send the data, the picture is (1920*1080) so the size of data is 3110400. However i only receive 335620 bytes at server.
I want to know how can i send bypes correctly?
//QT client
void came::processFrame(const QVideoFrame& frame)
{
QVideoFrame f(frame);
QImage img;
f.map(QAbstractVideoBuffer::ReadOnly);
output.append((char*)f.bits(),f.mappedBytes());
tcpSocket->write(output,output.size());
}
//Python server
def recvfromTcpSocket(sock, blockSize=4096, accept_addr=None):
''' a function to lisen TCP socket,
and rece bytes till buffer has no more. '''
d = ''
while True:
print ("Got tcp connection: " + str(tcpServerAddr))
if accept_addr is None:
break
elif accept_addr == tcpServerAddr[0]:
break
else :
sock.close()
continue
while True:
block = sock.recv(blockSize)
d += block
if len(block) < blockSize and len(block)!=0:
print ("TCP recv done, all size: " + str(len(d)))
break
return d
if __name__ == '__main__':
data = recvfromTcpSocket(tcpServerSocket)
bin_y = data[0:rows * cols]
num_y = np.fromstring(bin_y, np.uint8)
img_y = np.reshape(num_y, (rows, cols))
bin_u = data[rows * cols::2]
num_u = np.fromstring(bin_u, np.uint8)
img_u = np.reshape(num_u, (rows / 2, cols / 2))
bin_v = data[rows * cols+1::2]
num_v = np.fromstring(bin_v, np.uint8)
img_v = np.reshape(num_v, (rows / 2, cols / 2))
enlarge_u = cv2.resize(img_u, dsize=(cols, rows), interpolation=cv2.INTER_CUBIC)
enlarge_v = cv2.resize(img_v, dsize=(cols, rows), interpolation=cv2.INTER_CUBIC)
dst = cv2.merge([img_y, enlarge_u, enlarge_v])
bgr = cv2.cvtColor(dst, cv2.COLOR_YUV2BGR)

Convert OpenCV IplImage* data to numpy array

I'm building a python wrapper of a c++ dll by means of ctypes. The afore mentioned library makes an extensive use of OpenCV2.2 (using the old C Api).
I want to convert the char* imageData field of the OpenCV's IplImage structure to a numpy array. I've search SO and the web for a few days but no solution seems to solve my problem.
Here's the problem. I've tested my implementation with 2 images: one of size 600x599 (and it's all good) and the other one of 602x600 (and here's the problem). Both are color images (3-channel images). I've tested the implementation with several images of size 602x600 and always get the image distorted.
I'm guessing there might be something weird going on with the padding added to the image by OpenCV (although i think took care of it in my implementation), but i can't quite put my finger on it.
The thing is that the second image shows all distorted after the "processing" performed in the c++ dll (none for the moment) and i can only think i'm doing something wrong converting back the IplImage data (imageData) to the numpy array.
Here goes the C++ source code:
char* do_something(IplImage *img, int* image_size)
{
// returning the image data
return get_data_from_iplimage
(
img, // doing zero processing for now
image_size
);
}
char* get_data_from_iplimage(IplImage* img, int* image_size)
{
// getting the image total size
*image_size = img->imageSize;
// copying data
char* image_data = new char[img->imageSize];
memcpy(image_data, img->imageData, img->imageSize);
// releasing the Iplimage*
cvReleaseImage(&img);
// returning the image data
return image_data;
}
Here goes the Python source code:
# Image type (IplImage)
IPL_DEPTH_SIGN = 0x80000000
IPL_DEPTH_1U = 1
IPL_DEPTH_8U = 8
IPL_DEPTH_16U = 16
IPL_DEPTH_32F = 32
IPL_DEPTH_64F = 64
IPL_DEPTH_8S = IPL_DEPTH_SIGN + IPL_DEPTH_8U
IPL_DEPTH_16S = IPL_DEPTH_SIGN + IPL_DEPTH_16U
IPL_DEPTH_32S = IPL_DEPTH_SIGN + 32
def depth2dtype(depth):
if depth is IPL_DEPTH_8U:
return np.dtype('uint8')
elif depth is IPL_DEPTH_8S:
return np.dtype('int8')
elif depth is IPL_DEPTH_16U:
return np.dtype('uint16')
elif depth is IPL_DEPTH_16S:
return np.dtype('int16')
elif depth is IPL_DEPTH_32S:
return np.dtype('int32')
elif depth is IPL_DEPTH_32F:
return np.dtype('float32')
elif depth is IPL_DEPTH_64F:
return np.dtype('float64')
else:
# This is probably a good default
return np.dtype('uint8')
def get_iplimage_ptr(img):
# None is considered as the NULL pointer
if img is None:
return None # the same thing as 'return img'
# getting image dimensions and data
height, width, n_channels = get_ndarray_dimensions(img)
img_data = img.tostring()
# creating the image header
cv_img = cv2.cv.CreateImageHeader((width, height), cv2.cv.IPL_DEPTH_8U, n_channels)
width_step = img.dtype.itemsize * n_channels * width # creating the famous 'width_step' parameter
cv2.cv.SetData(cv_img, None, width_step)
# setting the data (img is a numpy array)
ipl = iplimage_t.from_address(id(cv_img))
ipl_img_ptr = ipl.ipl_ptr.contents
ipl_img_ptr.imageData = img_data
# returning the OpenCV2.2 compatible image (IplImage*)
return ipl_img_ptr
def get_ndarray_dimensions(img):
# getting image shape information
img_shape = img.shape
img_shape_length = len(img_shape)
# validating parameters
if img_shape_length <= 1 or img_shape_length > 3:
raise ArgumentError('Invalid image information. We support images with 1, 2 or 3 channels only.')
# getting the amount of channels
nc = 1 if img_shape_length == 2 else img_shape[2]
# building the processed image
h, w = img_shape[0], img_shape[1]
# returning the height, width and nChannels
return h, w, nc
def build_ndarray_from_data(str_data, img_shape):
# getting image dimensions
height, width, n_channels = img_shape
# getting the ndarray datatype
dtype = depth2dtype(IPL_DEPTH_8U)
# building a numpy ndarray from the string data
ndarray = np.fromstring(str_data, dtype)
# reshaping to 'height' rows
# width_step = len(str_data) / height
ndarray = ndarray.reshape(height, -1)
# removing the padding added by opencv to each row
cols = dtype.itemsize * width * n_channels
ndarray = ndarray[:, :cols]
# reshaping to the final ndarray dimensions
ndarray = ndarray.reshape((height, width, n_channels))
# returning the numpy array that represents the image
return ndarray
# python wrapper function to the c++ function
def do_something(img):
# getting the IplImage*
iplimage_ptr = get_iplimage_ptr(img)
# calling the c++ function
image_size = c_int(0)
byte_data = __c_do_something(iplimage_ptr, byref(image_size))
str_data = string_at(byte_data, image_size.value)
# getting the image dimensions
img_shape = get_ndarray_dimensions(img)
# building the processed image
proc_img = build_ndarray_from_data(str_data, img_shape)
# returning the processed image
return proc_img
# does something ('pointer' to the c function)
__c_do_something = c_func(
'do_something', _cdll, c_byte_p,
('img', POINTER(IplImage), 1), # IplImage *img
('image_size', c_int_p, 1), # int* image_size
)
I apologize for the length of source code (although there are a few definitions missing), but i guess that "explicit is better than implicit", jeje.
Any help would be appreciated.
PD: If it helps i'm using Python 2.7, Numpy 1.7.1, OpenCV2.2 (precompiled), Visual Studio 2013 (Visual C++) and Windows 8.1.
I might be wrong, but... for me you can just convert IPlImage to Mat and than convert it to python numpy array. Of course you can do this the other way too - numpy array to Mat and Mat to IPlImage. Here there is code which works great for OpenCV 2.x (tested on Opencv 2.4.10, but should work for 2.2 as well). If it won't work for you version, it should at least be a good hint and help you write boost python converters for your version.
If - for some reason - you can't convert IplImage to Mat, let us know why so we can try to make other solution.
After a few days stucked with this problem i think i finally reached a solution. Instead of passing the imageData (char*) i decided to replicate the OpenCV IplImage structure in Python with ctypes. Then, built the numpy array from the received IplImage pointer.
By the way i still don't know what was happening before, but i guess that there was something crazy going on with the conversion of the char* imageData to a string in Python (0 values-translated as null characters-, etc., etc.).
The C++ snippet is a bit simpler now as i don't need to "extract" the imageData from the image. Here it goes:
IplImage* do_something(IplImage *img)
{
// doing nothing
return img;
}
In the Python side, the code is somewhat similar to the old one. There are, however some key aspects:
First, a 'cv.iplimage' is built.
The 'cv.iplimage' is then converted to a 'cv.cvmat'.
Finally, the 'cv.cvmat' is converted to a numpy array.
Here goes the code:
# Image type (IplImage)
IPL_DEPTH_SIGN = 0x80000000
IPL_DEPTH_1U = 1
IPL_DEPTH_8U = 8
IPL_DEPTH_16U = 16
IPL_DEPTH_32F = 32
IPL_DEPTH_64F = 64
IPL_DEPTH_8S = IPL_DEPTH_SIGN + IPL_DEPTH_8U
IPL_DEPTH_16S = IPL_DEPTH_SIGN + IPL_DEPTH_16U
IPL_DEPTH_32S = IPL_DEPTH_SIGN + 32
# subclassing the ctypes.Structure class to add new features
class _Structure(Structure):
def __repr__(self):
"""
Print the fields
"""
res = []
for field in self._fields_:
res.append('%s=%s' % (field[0], repr(getattr(self, field[0]))))
return self.__class__.__name__ + '(' + ','.join(res) + ')'
class IplTileInfo(_Structure):
_fields_ = []
class IplROI(_Structure):
_fields_ = \
[
# 0 - no COI (all channels are selected)
# 1 - 0th channel is selected ...
('coi', c_int),
('xOffset', c_int),
('yOffset', c_int),
('width', c_int),
('height', c_int),
]
# ipl image header
class IplImage(_Structure):
def __repr__(self):
"""
Print the fields
"""
res = []
for field in self._fields_:
if field[0] in ['imageData', 'imageDataOrigin']:
continue
res.append('%s=%s' % (field[0], repr(getattr(self, field[0]))))
return self.__class__.__name__ + '(' + ','.join(res) + ')'
IplImage._fields_ = [
("nSize", c_int),
("ID", c_int),
("nChannels", c_int),
("alphaChannel", c_int),
("depth", c_int),
("colorModel", c_char * 4),
("channelSeq", c_char * 4),
("dataOrder", c_int),
("origin", c_int),
("align", c_int),
("width", c_int),
("height", c_int),
("roi", POINTER(IplROI)),
("maskROI", POINTER(IplImage)),
("imageID", c_void_p),
("tileInfo", POINTER(IplTileInfo)),
("imageSize", c_int),
("imageData", c_byte_p),
("widthStep", c_int),
("BorderMode", c_int * 4),
("BorderConst", c_int * 4),
("imageDataOrigin", c_char_p)]
class iplimage_t(_Structure):
_fields_ = \
[
('ob_refcnt', c_ssize_t),
('ob_type', py_object),
('ipl_ptr', POINTER(IplImage)),
('data', py_object),
('offset', c_size_t)
]
# gets the dimensions of a numpy ndarray
def get_ndarray_dimensions(img):
# getting image shape information
img_shape = img.shape
img_shape_length = len(img_shape)
# validating parameters
if img_shape_length <= 1 or img_shape_length > 3:
raise ArgumentError('Invalid image information. We support images with 1, 2 or 3 channels only.')
# getting the amount of channels
nc = 1 if img_shape_length == 2 else img_shape[2]
# building the processed image
h, w = img_shape[0], img_shape[1]
# returning the height, width and nChannels
return h, w, nc
def build_ndarray_from_data(iplimage_ptr, img_shape):
# getting image dimensions
height, width, n_channels = img_shape
# getting the IplImage*
iplimage = iplimage_ptr.contents
# creating the image header
cv_img = cv2.cv.CreateImageHeader((width, height), IPL_DEPTH_8U, n_channels)
# getting the char* from byte data
str_data = string_at(iplimage.imageData, iplimage.imageSize)
# setting the image data
cv2.cv.SetData(cv_img, str_data, iplimage.widthStep)
# building a CvMat image
cv_mat = cv_img[:, :]
# building the ndarray from the CvMat image
ndarray = np.asarray(cv_mat)
# returing the built ndarray
return ndarray
# python wrapper function to the c++ function
def do_something(img):
# getting the IplImage*
iplimage_ptr = get_iplimage_ptr(img)
# calling the c++ function
ipl_ptr = __c_do_something(iplimage_ptr)
# getting the image dimensions
img_shape = get_ndarray_dimensions(img)
# building the processed image
proc_img = build_ndarray_from_data(ipl_ptr, img_shape)
# returning the processed image
return proc_img
# does something ('pointer' to the c function)
__c_do_something = c_func(
'do_something', _cdll, POINTER(IplImage),
('img', POINTER(IplImage), 1), # IplImage *img
)
Hope it helps ;).
PS: I apologize for the length of the code, but i tried to provide the closest to a working example. Loading the compiled C++ .dll with ctypes is up to you (:.
you don't need Python IplImage ,just do this
C file:
void *return_frame;
extern "C" void* get_rgb_frame(){
return return_frame;
}
#define FRAME_BUFSIZE (1920 * 1080 * 3 + 1)
return_frame = malloc(FRAME_BUFSIZE);
memset(return_frame, 0x00, FRAME_BUFSIZE + 1);
IplImage* pImg = cvLoadImage("test.jpg",-1);
memcpy(return_frame, 1920 * 1080 * 3);
cvReleaseImage(&pImg);
Python file:
dll.get_rgb_frame.restype = c_void_p
yv12_img = dll.get_rgb_frame()
imagedata = string_at(yv12_img, 1920 * 1080 * 3)
cv_img = cv2.cv.CreateImageHeader((1920, 1080), cv2.cv.IPL_DEPTH_8U, 3)
cv2.cv.SetData(cv_img, imagedata, 3 * 1920)
cv_mat = cv_img[:]
array = np.asarray(cv_mat)
cv2.imshow('jinfeng', array)
and you can show image in Python

data type errors for input images of cv2.calcOpticalFlowPyrLK

I'm running opencv 2.4.1 using python bindings and am having difficulty calculating the optical flow.
Specifically this section of code:
#calculate the opticalflow
if prev_saturation_thresh_img==None:
prev_saturation_thresh_img=saturation_img
if i >=0:
prev_img=prev_saturation_thresh_img
next_img=saturation_thresh_img
p1, st, err = cv2.calcOpticalFlowPyrLK(prev_img,next_img,tracks_np,**lk_params)
Returns the error:
<unknown> is not a numpy array
So then I try to convert the images to numpy arrays:
prev_img=prev_saturation_thresh_img
next_img=saturation_thresh_img
Now I have a new error:
<unknown> data type = 17 is not supported
In a last-ditch effort I convert the images to cvmat (from iplimage) before converting it to a numpy array, just to see what happens
error: ..\..\..\OpenCV-2.4.1\modules\video\src\lkpyramid.cpp:607: error: (-215) nextPtsMat.checkVector(2, CV_32F, true) == npoints
So now I'm stuck. Below is the code in it's entirety for reference
import cv
import cv2
import numpy as np
class Target:
def __init__(self):
self.capture = cv.CaptureFromFile("raw_gait_cropped.avi")
def run(self):
#initiate font
font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 3, 8)
#instantiate images
img_size=cv.GetSize(cv.QueryFrame(self.capture))
hsv_img=cv.CreateImage(img_size,8,3)
saturation_img=cv.CreateImage(img_size,8,1)
saturation_thresh_img=cv.CreateImage(img_size,8,1)
prev_saturation_thresh_img=None
#create params for GoodFeaturesToTrack and calcOpticalFlowPyrLK
gftt_params = dict( cornerCount=11,
qualityLevel=0.2,
minDistance=5,
mask=None,
useHarris=True
)
lk_params = dict( winSize = (15, 15),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03),
flags = cv2.OPTFLOW_USE_INITIAL_FLOW,
minEigThreshold=1
)
tracks=[]
writer=cv.CreateVideoWriter("angle_tracking.avi",cv.CV_FOURCC('M','J','P','G'),30,cv.GetSize(hsv_img),1)
i=0
while True:
#grab a frame from the video capture
img=cv.QueryFrame(self.capture)
#break the loop when the video is over
if img == None:
break
#convert the image to HSV
cv.CvtColor(img,hsv_img,cv.CV_BGR2HSV)
#Get Saturation channel
cv.MixChannels([hsv_img],[saturation_img],[(1,0)])
#Apply threshold to saturation channel
cv.InRangeS(saturation_img,145,255,saturation_thresh_img)
#locate initial features to track
if i==0:
eig_image=temp_image = cv.CreateMat(img.height, img.width, cv.CV_32FC1)
for (x,y) in cv.GoodFeaturesToTrack(saturation_thresh_img, eig_image, temp_image, **gftt_params):
tracks.append([(x,y)])
cv.Circle(saturation_thresh_img,(int(x),int(y)),5,(255,255,255),-1,cv.CV_AA,0)
tracks_np=np.float32(tracks).reshape(-1,2)
print tracks
#calculate the opticalflow
if prev_saturation_thresh_img==None:
prev_saturation_thresh_img=saturation_img
if i >=0:
prev_img=prev_saturation_thresh_img
next_img=saturation_thresh_img
p1, st, err = cv2.calcOpticalFlowPyrLK(prev_img,next_img,tracks_np,**lk_params)
prev_saturation_thresh_img=saturation_img
i=i+1
print i
#display frames to users
cv.ShowImage("Raw Video",img)
cv.ShowImage("Saturation Channel",saturation_img)
cv.ShowImage("Saturation Thresholded",saturation_thresh_img)
# Listen for ESC or ENTER key
c = cv.WaitKey(7) % 0x100
if c == 27 or c == 10:
break
#close all windows once video is done
cv.DestroyAllWindows()
if __name__=="__main__":
t = Target()
t.run()
OpenCV can be very picky about the data formats it accepts. The following code extract works for me:
prev = cv.LoadImage('images/'+file_list[0])
prev = np.asarray(prev[:,:])
prev_gs = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)
current = cv.LoadImage('images/'+file)
current = np.asarray(current[:,:])
current_gs = cv2.cvtColor(current, cv2.COLOR_BGR2GRAY)
features, status, track_error = cv2.calcOpticalFlowPyrLK(prev_gs, current_gs, good_features, None,
**lk_params)
Note the [:,:] when converting from images to numpy arrays, I have found that they are required.
I hope that this may solve your problem.

Background extraction in Python using open CV

I am trying to extract a background image from a video so I can detect moving objects in it.
I have found functions like cv2.BackgroundSubtractorMOG(), however I just can't get it to work.
Does someone have some experience using this ?
I have created object mog = cv2.BackgroundSubtractorMOG(300,-1,-1,-1)
Then I try mog.apply(Nmat,Nforemat,-1), but that doesnt seem to work, I get the following
error:
......\OpenCV-2.4.0\modules\video\src\bgfg_gaussmix.cpp:117: error: (-215) CV_MAT_DEPTH(frameType) == CV_8U
Nmat and N foremat are numpy arrays because i was also getting an error if they weren't.
Here is work in progress...
import cv
import cv2
import numpy as np
if __name__ == '__main__':
cv.NamedWindow("test1", cv.CV_WINDOW_AUTOSIZE)
cv.NamedWindow("test2", cv.CV_WINDOW_AUTOSIZE)
capture = cv.CreateFileCapture('test.avi')
frame = cv.QueryFrame(capture)
img = cv.CreateImage(cv.GetSize(frame),8,1)
thresh = cv.CreateImage(cv.GetSize(frame),8,1)
foreground = cv.CreateImage(cv.GetSize(frame),8,1)
foremat = cv.GetMat(foreground)
Nforemat = np.array(foremat, dtype=np.float32)
thresh = cv.CreateImage(cv.GetSize(img),8,1)
mog = cv2.BackgroundSubtractorMOG()
loop = True
nframes=0
while(loop):
frame = cv.QueryFrame(capture)
mat = cv.GetMat(frame)
Nmat = np.array(mat, dtype=np.float32)
cv.CvtColor(frame,img,cv.CV_BGR2GRAY)
if (frame == None):
break
mog.apply(Nmat,Nforemat,-1)
cv.Threshold(img,thresh,100,255,cv.CV_THRESH_BINARY)
cv.ShowImage("test1", thresh)
cv.ShowImage("test2",frame)
char = cv.WaitKey(50)
if (char != -1):
if (char == 27):
break
cv.DestroyWindow("test1")
cv.DestroyWindow("test2")
change
Nmat = np.array(mat, dtype=np.float32)
for
Nmat = np.array(mat, dtype=np.uint8)
Why are you using these lines:
thresh = cv.CreateImage(cv.GetSize(img),8,1)
and
cv.Threshold(img,thresh,100,255,cv.CV_THRESH_BINARY)
?

Categories