How to read data Asynchronously in C# from python script - python

I am trying to read python script output continuously (In python I am tracking a colored ball and printing it's x, y and radius) from WPF C#.
I tried socket communication
using (var requester = new ZSocket(ZSocketType.REQ))
{
// Connect
requester.Connect("tcp://127.0.0.1:5555");
for (; ; )
{
string requestText = "Hello";
Console.Write("Sending {0}...", requestText);
// Send
requester.Send(new ZFrame(requestText));
// Receive
using (ZFrame reply = requester.ReceiveFrame())
{
Console.WriteLine(" Received: {0} {1}!", requestText, reply.ReadString());
}
}
}
I tried calling python from C# Process
private void Window_Loaded(object sender, RoutedEventArgs e)
{
string python = #"C:\Desktop\test\venv\Scripts\python.exe";
// python app to call
string myPythonApp = #"C:\Users\Desktop\test\main.py";
// Create new process start info
ProcessStartInfo myProcessStartInfo = new ProcessStartInfo(python);
// make sure we can read the output from stdout
myProcessStartInfo.UseShellExecute = false;
myProcessStartInfo.RedirectStandardOutput = true;
myProcessStartInfo.Arguments = myPythonApp;
Process myProcess = new Process();
// assign start information to the process
myProcess.StartInfo = myProcessStartInfo;
// start the process
myProcess.OutputDataReceived += MyProcess_OutputDataReceived;
myProcess.Start();
myProcess.BeginOutputReadLine();
// Read the standard output of the app we called.
// in order to avoid deadlock we will read output first
// and then wait for process terminate:
//StreamReader myStreamReader = myProcess.StandardOutput;
//string myString = myStreamReader.ReadLine();
//await myStreamReader.ReadAsync(result, 0, (int)myStreamReader.BaseStream.Length);
// wait exit signal from the app we called and then close it.
//myProcess.WaitForExit();
//myProcess.Close();
// write the output we got from python app
//Console.WriteLine("Value received from script: " + myString);
}
private void MyProcess_OutputDataReceived(object sender, DataReceivedEventArgs e)
{
Console.WriteLine("++++");
}
Python writes data to txt file, C# reads from that file
Console.WriteLine("hi");
for (; ; )
{
if (File.Exists(textFile))
{
using (StreamReader file = new StreamReader(textFile))
{
int counter = 0;
string ln;
while ((ln = file.ReadLine()) != null)
{
Console.WriteLine(ln);
counter++;
}
file.Close();
}
}
All the cases python is holding the thread and not allowing c# to access it. Now I am working 2 method i.e. getting data from Process, but MyProcess_OutputDataReceived is not triggering.
import sys
from collections import deque
from PIL import Image, ImageOps, ImageDraw
import numpy as np
import argparse
import imutils
import cv2
import time
import pandas as pd
import matplotlib.pyplot as plt
#import zmq
#context = zmq.Context()
#socket = context.socket(zmq.REP)
#socket.bind("tcp://*:5555")
#object tracking def start
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=64,
help="max buffer size")
args = vars(ap.parse_args())
greenLower = (29, 86, 6)
greenUpper = (64, 255, 255)
pts = deque(maxlen=args["buffer"])
if not args.get("video", False):
camera = cv2.VideoCapture(0)
else:
camera = cv2.VideoCapture(args["video"])
Data_Features = ['x', 'y', 'time']
Data_Points = pd.DataFrame(data=None, columns=Data_Features, dtype=float)
start = time.time()
#object tracking def end
#file1 = open("myfile.txt","w")
#L = ["This is Delhi \n","This is Paris \n","This is London \n"]
#file1.write("Hello \n")
#file1.writelines(L)
#file1.close()
while True:
# Wait for next request from client
#message = socket.recv()
#print("Received request: %s" % message)
# Do some 'work'
#time.sleep(1)
# Send reply back to client
#socket.send("World")
#object track vids start
(grabbed, frame) = camera.read()
current_time = time.time() - start
if args.get("video") and not grabbed:
break
frame = imutils.resize(frame, width=1600)
# frame2 = imutils.resize
#blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, greenLower, greenUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
center = None
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# only proceed if the radius meets a minimum size
if (radius < 300) & (radius > 10):
# draw the circle and centroid on the frame,
# then update the list of tracked points
cv2.circle(frame, (int(x), int(y)), int(radius),
(0, 255, 255), 2)
sys.stdout.write(str(x) + '\n')
#print(str(x))
#file1 = open("myfile.txt", "w")
#file1.write(str(x))
#file1.close()
#time.sleep(1)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
# Save The Data Points
Data_Points.loc[Data_Points.size / 3] = [x, y, current_time]
pts.appendleft(center)
for i in range(1, len(pts)):
# if either of the tracked points are None, ignore
# them
if pts[i - 1] is None or pts[i] is None:
continue
# otherwise, compute the thickness of the line and
# draw the connecting lines
thickness = int(np.sqrt(args["buffer"] / float(i + 1)) * 2.5)
cv2.line(frame, pts[i - 1], pts[i], (0, 0, 255), thickness)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
#object tracking vids end
h = 0.2
X0 = -3
Y0 = 20
time0 = 0
theta0 = 0.3
# Applying the correction terms to obtain actual experimental data
Data_Points['x'] = Data_Points['x'] - X0
Data_Points['y'] = Data_Points['y'] - Y0
Data_Points['time'] = Data_Points['time'] - time0
# Calulataion of theta value
Data_Points['theta'] = 2 * np.arctan(
Data_Points['y'] * 0.0000762 / h) # the factor correspons to pixel length in real life
Data_Points['theta'] = Data_Points['theta'] - theta0
# Creating the 'Theta' vs 'Time' plot
plt.plot(Data_Points['theta'], Data_Points['time'])
plt.xlabel('Theta')
plt.ylabel('Time')
# Export The Data Points As cvs File and plot
Data_Points.to_csv('Data_Set.csv', sep=",")
plt.savefig('Time_vs_Theta_Graph.svg', transparent=True)
# cleanup the camera and close any open windows
camera.release()
cv2.destroyAllWindows()
Please help me.
Thank you in advance.
Ranjith

Related

How to call a function once in a while loop

I have a robotic code, that does the following:
camera starts processing and taking images
Mounting Holes (hough transform) function detection is activated
The holes are drawn on the image
approachcirlce function moves robot towards one of the set coordinates
I have two issues :
The mounting holes keep getting called even after detecting the coordinates once.
The robot in the approachcircle function cant move to one coordinates then onto the other. It keeps going back and forth as the x and y aren't specifically set to finish the first set of coordinates first. i.e : between two circles it does not reach either centers as expected. it never reaches the center of a detected circle if its more than one
I want the code to call the mountingholes function once and have the robot to move to each recorded coordinates, after the intial set of coordinates is done. I will have the robot move to another area and start doing the process again. I'm assuming the problem is that the functions are in the camera processing loop which is run indefinitely
The code is below:
##Def:
def approachcircle (r,t,z):
move = robot.Pose()*transl(r,t,z)
robot.MoveL(move)
def approacharea (z):
move = robot.Pose()*transl(0,0,z)
robot.MoveL(move)
def MountingHoles(img,thresh,r):
minR = r
CannyHighT = 50
min_points = 15 #param2
img_1= cv.cvtColor(img,cv.COLOR_BGR2GRAY)
#img3 = cv2.inRange(img_1, thresh, 255)
circles = cv.HoughCircles(img_1,cv.HOUGH_GRADIENT, 1, 2*minR, param1=CannyHighT,
param2=min_points, minRadius=minR, maxRadius=220)
return circles
#Installation
from robolink import * # RoboDK API
from robodk import * # Robot toolbox
RDK = Robolink()
pose = eye()
ITEM_TYPE_ROBOT
RDK = robolink.Robolink()
robot = RDK.Item('TM12X')
import_install('cv2', 'opencv-python')
import cv2 as cv
import numpy as np
import numpy
#----------------------------------
# Settings
PROCESS_COUNT = -1 # How many frames to process before exiting. -1 means indefinitely.
CAM_NAME = "Camera"
DISPLAY_SETTINGS = True
WDW_NAME_PARAMS = 'RoboDK - Blob detection parameters'
DISPLAY_RESULT = True
WDW_NAME_RESULTS = 'RoboDK - Blob detections1'
# Calculate absolute XYZ position clicked from the camera in absolute coordinates:
cam_item = RDK.Item(CAM_NAME, ITEM_TYPE_CAMERA)
if not cam_item.Valid():
raise Exception("Camera not found! %s" % CAM_NAME)
cam_item.setParam('Open', 1) # Force the camera view to open
#----------------------------------------------
# Create an resizable result window
if DISPLAY_RESULT:
cv.namedWindow(WDW_NAME_RESULTS) #, cv.WINDOW_NORMAL)
#----------------------------------------------
# capture = cv.VideoCapture(0)
# retval, image = capture.read()
#----------------------------------------------
# Process camera frames
count = 0
while count < PROCESS_COUNT or PROCESS_COUNT < 0:
print("=============================================")
print("Processing image %i" % count)
count += 1
#----------------------------------------------
# Get the image from RoboDK
bytes_img = RDK.Cam2D_Snapshot("", cam_item)
if bytes_img == b'':
raise
# Image from RoboDK are BGR, uchar, (h,w,3)
nparr = np.frombuffer(bytes_img, np.uint8)
img = cv.imdecode(nparr, cv.IMREAD_UNCHANGED)
if img is None or img.shape == ():
raise
#----------------------------------------------
# Detect blobs
keypoints = MountingHoles(img,250,50)
i = 0
#----------------------------------------------
# Display the detection to the user (reference image and camera image side by side, with detection results)
if DISPLAY_RESULT:
# Draw detected blobs and their id
i = 0
for keypoint in keypoints[0,:]:
cv.putText(img, str(i), (int(keypoint[0]), int(keypoint[1])), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 1, cv.LINE_AA)
cv.circle(img, (int(keypoint[0]), int(keypoint[1])), int(keypoint[2]), (0, 0, 255), 15)
#
i += 1
# Resize the image, so that it fits your screen
img = cv.resize(img, (int(img.shape[1] * .75), int(img.shape[0] * .75)))#
cv.imshow(WDW_NAME_RESULTS, img)
key = cv.waitKey(500)
if key == 27:
break # User pressed ESC, exit
if cv.getWindowProperty(WDW_NAME_RESULTS, cv.WND_PROP_VISIBLE) < 1:
break # User killed the window, exit
#--------------------------------------------------------------------------------------------
# Movement functions
r=0
t=0
i=0
#approacharea(200)
for keypoint in keypoints[0,:]:
#print("id:%i coord=(%0.0f, %0.0f)" % (i, X, Y))
X= int(keypoint[0])-320
Y=int(keypoint[1])-240
r=int(keypoint[2])
print("id:%i coord=(%0.0f, %0.0f)" % (i, X, Y))
if X!= 0 or Y!=0 :
r=X*0.1
t=Y*0.1
approachcircle(r,t,0)
i+=1

How to project PyBullet simulation coordinates to rendered Frame pixel coordinates using OpenCV?

How can I convert an objects position in PyBullet to pixel coordinates & draw a line onto the frame using PyBullet & OpenCV?
We would like to do this because PyBullet native addUserDebugLine() function is not available in DIRECT mode.
import pybullet as p
import numpy as np
import time
import pybullet_data
import cv2
VIDEO_RESOLUTION = (1280, 720)
MY_COLORS = [(255,0,0), (0,255,0), (0,0,255)]
def capture_frame(base_pos=[0,0,0], _cam_dist=3, _cam_yaw=45, _cam_pitch=-45):
_render_width, _render_height = VIDEO_RESOLUTION
view_matrix = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=base_pos,
distance=_cam_dist,
yaw=_cam_yaw,
pitch=_cam_pitch,
roll=0,
upAxisIndex=2)
proj_matrix = p.computeProjectionMatrixFOV(
fov=90, aspect=float(_render_width) / _render_height,
nearVal=0.01, farVal=100.0)
(_, _, px, _, _) = p.getCameraImage(
width=_render_width, height=_render_height, viewMatrix=view_matrix,
projectionMatrix=proj_matrix, renderer=p.ER_TINY_RENDERER) # ER_BULLET_HARDWARE_OPENGL)
rgb_array = np.array(px, dtype=np.uint8)
rgb_array = np.reshape(rgb_array, (_render_height, _render_width, 4))
rgb_array = rgb_array[:, :, :3]
return rgb_array, view_matrix, proj_matrix
def render():
frame, vmat, pmat = capture_frame()
p1, cubeOrn = p.getBasePositionAndOrientation(1)
p2, cubeOrn = p.getBasePositionAndOrientation(2)
frame, view_matrix, proj_matrix = capture_frame()
frame = cv2.resize(frame, VIDEO_RESOLUTION)
points = {}
# reshape matrices
my_order = 'C'
pmat = np.array(proj_matrix).reshape((4,4), order=my_order)
vmat = np.array(view_matrix).reshape((4,4), order=my_order)
fmat = vmat.T # pmat.T
# compute origin from origin point in simulation
origin = np.array([0,0,0,1])
frame_origin = (fmat # origin)[:3]*np.array([1280, 640, 0]) + np.array([640, 360, 0])
# define unit vectors
unit_vectors = [ np.array([1,0,0,1]),
np.array([0,1,0,1]),
np.array([0,0,1,1]) ]
for col_id, unit_vector in enumerate(unit_vectors):
cur_point = (fmat # unit_vector)[:3]*np.array([1280, 640, 0]) + np.array([640, 360, 0])
cv2.line(frame, (640,360), (int(cur_point[0]),int(cur_point[1])), color=MY_COLORS[col_id], thickness=2)
cv2.imwrite("my_rendering.jpg", frame)
print(p1,p2)
if __name__ == '__main__':
physicsClient = p.connect(p.DIRECT)#or p.DIRECT for non-graphical version
p.setAdditionalSearchPath(pybullet_data.getDataPath()) #optionally
p.setGravity(0,0,-10)
planeId = p.loadURDF("plane.urdf")
startPos = [1,0,0.2]
startOrientation = p.getQuaternionFromEuler([0,0,0])
boxId = p.loadURDF("r2d2.urdf",startPos, startOrientation)
startPos = [0,2,0.2]
boxId = p.loadURDF("r2d2.urdf",startPos, startOrientation)
#set the center of mass frame (loadURDF sets base link frame) startPos/Ornp.resetBasePositionAndOrientation(boxId, startPos, startOrientation)
for i in range (2400):
if i == 2399:
render()
p.stepSimulation()
p.disconnect()
The expected output would be the following frame but with the origin-coordinate frame drawn correctly. E.g. X, Y, and Z axis are colored Red, Blue, and Green respectively.
Since the two R2D2 robots are positioned at [1,0,0] and [0,1,0] respectively, we can see that the coordinate frame is off. (See image below)
We tried the following:
transposing the matrices
not transposing the matrices
changing the order of how we compute fmat e.g. pmat # vmat instead of vmat # pmat etc.
Any help is appreciated.
After a lot of fiddling, I came to a solution.
Playing with it for a while, I came to a point where it looked almost OK except for a rotation of the axes given by the yaw angle. So, I did a second call to computeViewMatrixFromYawPitchRoll but with the opposite yaw in order to compute the transformation for the axes.
Unfortunately, I'm not sure about why this works... But it works!
Note: base_pos, _cam_dist, _cam_yaw and _cam_pitch have been displaced into render() Note also: the up direction has been reversed too (don't ask why... :-) ) A pretty messy explanation, I must admit...
import pybullet as p
import numpy as np
import time
import pybullet_data
import cv2
import os
VIDEO_RESOLUTION = (1280, 720)
MY_COLORS = [(255,0,0), (0,255,0), (0,0,255)]
K=np.array([[1280,0,0],[0,720,0],[0,0,1]])
def capture_frame(base_pos, _cam_dist, _cam_yaw, _cam_pitch):
_render_width, _render_height = VIDEO_RESOLUTION
view_matrix = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=base_pos,
distance=_cam_dist,
yaw=_cam_yaw,
pitch=_cam_pitch,
roll=0,
upAxisIndex=2)
proj_matrix = p.computeProjectionMatrixFOV(
fov=90, aspect=float(_render_width) / _render_height,
nearVal=0.01, farVal=100.0)
(_, _, px, _, _) = p.getCameraImage(
width=_render_width, height=_render_height, viewMatrix=view_matrix,
projectionMatrix=proj_matrix, renderer=p.ER_TINY_RENDERER) # ER_BULLET_HARDWARE_OPENGL)
rgb_array = np.array(px, dtype=np.uint8)
rgb_array = np.reshape(rgb_array, (_render_height, _render_width, 4))
rgb_array = rgb_array[:, :, :3]
return rgb_array, view_matrix, proj_matrix
def render():
p1, cubeOrn = p.getBasePositionAndOrientation(1)
p2, cubeOrn = p.getBasePositionAndOrientation(2)
base_pos=[0,0,0]
_cam_dist=3
_cam_yaw=45
_cam_pitch=-30
frame, view_matrix, proj_matrix = capture_frame(base_pos, _cam_dist, _cam_yaw, _cam_pitch)
frame = cv2.resize(frame, VIDEO_RESOLUTION)
points = {}
# inverse transform
view_matrix = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=base_pos,
distance=_cam_dist,
yaw=-_cam_yaw,
pitch=_cam_pitch,
roll=0,
upAxisIndex=2)
my_order = 'C'
pmat = np.array(proj_matrix).reshape((4,4), order=my_order)
vmat = np.array(view_matrix).reshape((4,4), order=my_order)
fmat = pmat # vmat.T
# compute origin from origin point in simulation
origin = np.array([0,0,0,1])
frame_origin = (fmat # origin)[:3]*np.array([1280, 720, 0]) + np.array([640, 360, 0])
# define unit vectors
unit_vectors = [ np.array([1,0,0,1]),
np.array([0,1,0,1]),
np.array([0,0,-1,1]) ]
for col_id, unit_vector in enumerate(unit_vectors):
cur_point = (fmat # unit_vector)[:3]*np.array([1280, 720, 0]) + np.array([640, 360, 0])
cv2.line(frame, (640,360), (int(cur_point[0]),int(cur_point[1])), color=MY_COLORS[col_id], thickness=2)
cv2.imwrite("my_rendering.jpg", frame)
print(p1,p2)
if __name__ == '__main__':
physicsClient = p.connect(p.DIRECT)#or p.DIRECT for non-graphical version
#physicsClient = p.connect(p.GUI)#or p.DIRECT for non-graphical version
p.setAdditionalSearchPath(pybullet_data.getDataPath()) #optionally
p.setGravity(0,0,-10)
planeId = p.loadURDF("plane.urdf")
#arrows = p.loadURDF("arrows.urdf")
startPos = [1,0,0.2]
startOrientation = p.getQuaternionFromEuler([0,0,0])
boxId = p.loadURDF("r2d2.urdf",startPos, startOrientation)
startPos = [0,2,0.2]
boxId = p.loadURDF("r2d2.urdf",startPos, startOrientation)
#set the center of mass frame (loadURDF sets base link frame) startPos/Ornp.resetBasePositionAndOrientation(boxId, startPos, startOrientation)
for i in range (2400):
if i == 2399:
render()
p.stepSimulation()
p.disconnect()
Here is the result:
Best regards.

Using OpenCV homography with python error in method findHomography

I have a goal to do homography on a live video by capturing my screen and processing it.
In order to do so, I took the code from this link, and manipulated it inside a while loop as follows:
from __future__ import print_function
import cv2 as cv
import numpy as np
from windowcapture import WindowCapture
# initialize the WindowCapture class
capture = WindowCapture('My Window')
bar_img = cv.imread('hammer.jpg',cv.IMREAD_GRAYSCALE)
while(True):
# get an updated image of the game
screenshot = capture.get_screenshot()
screenshot = cv.cvtColor(screenshot,cv.IMREAD_GRAYSCALE)
if bar_img is None or screenshot is None:
print('Could not open or find the images!')
exit(0)
#-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
minHessian = 400
detector = cv.SIFT_create()
keypoints_obj, descriptors_obj = detector.detectAndCompute(bar_img, None)
keypoints_scene, descriptors_scene = detector.detectAndCompute(screenshot, None)
#-- Step 2: Matching descriptor vectors with a FLANN based matcher
# Since SURF is a floating-point descriptor NORM_L2 is used
matcher = cv.DescriptorMatcher_create(cv.DescriptorMatcher_FLANNBASED)
knn_matches = matcher.knnMatch(descriptors_obj, descriptors_scene, 2)
#-- Filter matches using the Lowe's ratio test
ratio_thresh = 0.75
good_matches = []
for m,n in knn_matches:
if m.distance < ratio_thresh * n.distance:
good_matches.append(m)
#-- Draw matches
img_matches = np.empty((max(bar_img.shape[0], screenshot.shape[0]), bar_img.shape[1]+screenshot.shape[1], 3), dtype=np.uint8)
cv.drawMatches(bar_img, keypoints_obj, screenshot, keypoints_scene, good_matches, img_matches, flags=cv.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
#-- Localize the object
obj = np.empty((len(good_matches),2), dtype=np.float32)
scene = np.empty((len(good_matches),2), dtype=np.float32)
for i in range(len(good_matches)):
#-- Get the keypoints from the good matches
obj[i,0] = keypoints_obj[good_matches[i].queryIdx].pt[0]
obj[i,1] = keypoints_obj[good_matches[i].queryIdx].pt[1]
scene[i,0] = keypoints_scene[good_matches[i].trainIdx].pt[0]
scene[i,1] = keypoints_scene[good_matches[i].trainIdx].pt[1]
H, _ = cv.findHomography(obj, scene, cv.RANSAC)
#-- Get the corners from the image_1 ( the object to be "detected" )
obj_corners = np.empty((4,1,2), dtype=np.float32)
obj_corners[0,0,0] = 0
obj_corners[0,0,1] = 0
obj_corners[1,0,0] = bar_img.shape[1]
obj_corners[1,0,1] = 0
obj_corners[2,0,0] = bar_img.shape[1]
obj_corners[2,0,1] = bar_img.shape[0]
obj_corners[3,0,0] = 0
obj_corners[3,0,1] = bar_img.shape[0]
scene_corners = cv.perspectiveTransform(obj_corners, H)
#-- Draw lines between the corners (the mapped object in the scene - image_2 )
cv.line(img_matches, (int(scene_corners[0,0,0] + bar_img.shape[1]), int(scene_corners[0,0,1])),\
(int(scene_corners[1,0,0] + bar_img.shape[1]), int(scene_corners[1,0,1])), (0,255,0), 4)
cv.line(img_matches, (int(scene_corners[1,0,0] + bar_img.shape[1]), int(scene_corners[1,0,1])),\
(int(scene_corners[2,0,0] + bar_img.shape[1]), int(scene_corners[2,0,1])), (0,255,0), 4)
cv.line(img_matches, (int(scene_corners[2,0,0] + bar_img.shape[1]), int(scene_corners[2,0,1])),\
(int(scene_corners[3,0,0] + bar_img.shape[1]), int(scene_corners[3,0,1])), (0,255,0), 4)
cv.line(img_matches, (int(scene_corners[3,0,0] + bar_img.shape[1]), int(scene_corners[3,0,1])),\
(int(scene_corners[0,0,0] + bar_img.shape[1]), int(scene_corners[0,0,1])), (0,255,0), 4)
#-- Show detected matches
cv.imshow('Good Matches & Object detection', img_matches)
cv.waitKey()
if cv.waitKey(1) == ord('q'):
cv.destroyAllWindows()
break
print('Done.')
The class WindowCapture that I used uses win32gui to capture the window (maybe it makes a difference if I used it like this and not imread?)
I get the following error when I run the code:
C:\Users\Tester\AppData\Local\Temp\pip-req-build-1i5nllza\opencv\modules\calib3d\src\fundam.cpp:385: error: (-28:Unknown error code -28) The input arrays should have at least 4 corresponding point sets to calculate Homography in function 'cv::findHomography'
Any idea why it happens?

Python to Arduino through Serial, can’t move servos

I'm currently trying to have a camera mounted on a pan tilt made of two micro servos track a face. My python code has been working and has been successfully identifying a face, but non of my servos have been moving while the Arduino is constantly flashing as if it is receiving input from the Python code. I haven't been able to get the servos to move according to my python code, but I have made simple code on the side to make sure the servos have good connections and they work fine on their own. I'm not sure what is wrong...
Python Code
import numpy as np6
import serial
import time
import sys
import cv2
arduino = serial.Serial('COM3', 9600)
time.sleep(2)
print("Connection to arduino...")
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(0)
while 1:
ret, img = cap.read()
cv2.resizeWindow('img', 500,500)
cv2.line(img,(500,250),(0,250),(0,255,0),1)
cv2.line(img,(250,0),(250,500),(0,255,0),1)
cv2.circle(img, (250, 250), 5, (255, 255, 255), -1)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3)
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),5)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
arr = {y:y+h, x:x+w}
print (arr)
print ('X :' +str(x))
print ('Y :'+str(y))
print ('x+w :' +str(x+w))
print ('y+h :' +str(y+h))
xx = int(x+(x+h))/2
yy = int(y+(y+w))/2
print (xx)
print (yy)
center = (xx,yy)
print("Center of Rectangle is :", center)
data =(“X {0: f} Y {1: f} Z” .format (xx, yy))
print ("output = '" +data+ "'")
arduino.write(data.encode())
cv2.imshow('img',img)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
Arduino Code
#include<Servo.h>
Servo servoVer; //Vertical Servo
Servo servoHor; //Horizontal Servo
int x;
int y;
int prevX;
int prevY;
void setup()
{
Serial.begin(9600);
servoVer.attach(5); //Vertical Servo to Pin 5
servoHor.attach(6); //Horizontal Servo to Pin 6
servoVer.write(90);
servoHor.write(90);
}
void Pos()
{
if(prevX != x || prevY != y)
{
int servoX = map(x, 600, 0, 70, 179);
int servoY = map(y, 450, 0, 179, 95);
servoX = min(servoX, 179);
servoX = max(servoX, 70);
servoY = min(servoY, 179);
servoY = max(servoY, 95);
servoHor.write(servoX);
servoVer.write(servoY);
}
}
void loop()
{
if(Serial.available() > 0)
{
if(Serial.read() == 'X')
{
x = Serial.parseInt();
if(Serial.read() == 'Y')
{
y = Serial.parseInt();
Pos();
}
}
while(Serial.available() > 0)
{
Serial.read();
}
}
}
One huge problem is the way you are using Serial.read. That function consumes the character out of the buffer. You don't get to read the same one twice. So let's say you send a 'Y'. The first if statement reads the Y out of the serial buffer and compares to 'X', that's false so it moves on. Then it reads something else from serial, probably a -1 if nothing is left to read. But it doesn't see the 'Y' because that was read in the first if.
What you need to do is to read from serial into a char variable and then use that char variable in your if statements.
char c = Serial.read();
if(c == 'X')...
... if (c == 'Y')...

python pyparrot image processing question

I'm trying to build code that wants to fly a drone with a camera with demoMamboVisionGUI.py below. When the code is executed, the camera screen comes up and press the button to start the flight. The code above displays four cam screens and detects a straight line while detecting a specified color value, blue (BGR2HSV). Using these two codes, the camera recognizes the blue straight line and flies forward little by little, and turns left and right at a certain angle, recognizes the bottom of the specified color (red), lands, and starts flying with another button. I want to make a code that recognizes green and lands. I would appreciate it if you could throw a simple hint.
enter image description here
import cv2
import numpy as np
def im_trim(img):
x = 160
y = 50
w = 280
h = 180
img_trim = img[y:y + h, x:x + w]
return img_trim
def go():
minimum = 9999;
min_theta=0;
try:
cap = cv2.VideoCapture(0)
except:
return
while True:
ret, P = cap.read()
img1 = P
cv2.imshow('asdf',img1)
img_HSV = cv2.cvtColor(img1, cv2.COLOR_BGR2HSV)
img_h, img_s, img_v = cv2.split(img_HSV)
cv2.imshow("HSV", img_HSV)
lower_b = np.array([100, 80, 100])
upper_b = np.array([120, 255, 255])
blue = cv2.inRange(img_HSV, lower_b, upper_b)
cv2.imshow('root',blue)
edges = cv2.Canny(blue, 50, 150, apertureSize =3)
lines = cv2.HoughLines(edges, 1, np.pi/180, threshold = 100)
if lines is not None:
for line in lines:
r, theta = line[0]
#if (r<minimum and r>0) and (np.rad2deg(theta)>-90 and np.rad2deg(theta)<90):
#minimum = r
#min_theta = theta
#if (r > 0 and r < 250) and (np.rad2deg(theta) > 170 or np.rad2deg(theta) < 10):
# self.drone_object.fly_direct(pitch=0, roll=-7, yaw=0, vertical_movement=0,
# duration=1)
#print("right")
#elif (r > 400 and r < 650) and (np.rad2deg(theta) > 170 or np.rad2deg(theta) < 10):
# self.drone_object.fly_direct(pitch=0, roll=7, yaw=0, vertical_movement=0,
# duration=1)
print(r, np.rad2deg(theta))
#이하 if문을 while 문을 통해 반복하여 길 경로를 직진경로로 만든 이후 진행
#if(np.rad2deg(min_theta)>=몇도이상 or 이하):
# 이하 -> 왼쪽턴, 이상 -> 오른쪽턴, 사이 -> 직진
a = np.cos(theta)
b = np.sin(theta)
x0 = a * r
y0 = b * r
x1 = int(x0 + 1000 * (-b))
y1 = int(y0 + 1000 * a)
x2 = int(x0 - 1000 * (-b))
y2 = int(y0 - 1000 * a)
cv2.line(img1, (x1,y1), (x2,y2), (0,255,0), 3)
cv2.imshow('hough',img1)
k = cv2.waitKey(1)
if k == 27:
break
cv2.destroyAllWindows()
if __name__ == "__main__":
go()
print("??")
================================================================================================
"""
Demo of the Bebop vision using DroneVisionGUI that relies on libVLC. It is a different
multi-threaded approach than DroneVision
Author: Amy McGovern
"""
from pyparrot.Minidrone import Mambo
from pyparrot.DroneVisionGUI import DroneVisionGUI
import cv2
# set this to true if you want to fly for the demo
testFlying = True
class UserVision:
def __init__(self, vision):
self.index = 0
self.vision = vision
def save_pictures(self, args):
# print("in save pictures on image %d " % self.index)
img = self.vision.get_latest_valid_picture()
if (img is not None):
filename = "test_image_%06d.png" % self.index
# uncomment this if you want to write out images every time you get a new one
#cv2.imwrite(filename, img)
self.index +=1
#print(self.index)
def demo_mambo_user_vision_function(mamboVision, args):
"""
Demo the user code to run with the run button for a mambo
:param args:
:return:
"""
mambo = args[0]
if (testFlying):
print("taking off!")
mambo.safe_takeoff(5)
if (mambo.sensors.flying_state != "emergency"):
print("flying state is %s" % mambo.sensors.flying_state)
print("Flying direct: going up")
mambo.fly_direct(roll=0, pitch=0, yaw=0, vertical_movement=15, duration=2)
print("flip left")
print("flying state is %s" % mambo.sensors.flying_state)
success = mambo.flip(direction="left")
print("mambo flip result %s" % success)
mambo.smart_sleep(5)
print("landing")
print("flying state is %s" % mambo.sensors.flying_state)
mambo.safe_land(5)
else:
print("Sleeeping for 15 seconds - move the mambo around")
mambo.smart_sleep(15)
# done doing vision demo
print("Ending the sleep and vision")
mamboVision.close_video()
mambo.smart_sleep(5)
print("disconnecting")
mambo.disconnect()
if __name__ == "__main__":
# you will need to change this to the address of YOUR mambo
mamboAddr = "B0:FC:36:F4:37:F9"
# make my mambo object
# remember to set True/False for the wifi depending on if you are using the wifi or the BLE to connect
mambo = Mambo(mamboAddr, use_wifi=True)
print("trying to connect to mambo now")
success = mambo.connect(num_retries=3)
print("connected: %s" % success)
if (success):
# get the state information
print("sleeping")
mambo.smart_sleep(1)
mambo.ask_for_state_update()
mambo.smart_sleep(1)
print("Preparing to open vision")
mamboVision = DroneVisionGUI(mambo, is_bebop=False, buffer_size=200,
user_code_to_run=demo_mambo_user_vision_function, user_args=(mambo, ))
userVision = UserVision(mamboVision)
mamboVision.set_user_callback_function(userVision.save_pictures, user_callback_args=None)
mamboVision.open_video()
==========================================================================

Categories