Subscribing to a ROS sensor_msg/Image using python - python

I'm trying to subscribe to a ROS node published by a vrep vision sensor. Here is my code, which works fine when using my built-in webcam:
import rospy
from sensor_msgs.msg import Image
from std_msgs.msg import String
from cv_bridge import CvBridge, CvBridgeError
import cv2
import numpy as np
import tensorflow as tf
import classify_image
class RosTensorFlow():
def __init__(self):
classify_image.maybe_download_and_extract()
self._session = tf.Session()
classify_image.create_graph()
self._cv_bridge = CvBridge()
self._sub = rospy.Subscriber('image', Image, self.callback, queue_size=1)
self._pub = rospy.Publisher('result', String, queue_size=1)
self.score_threshold = rospy.get_param('~score_threshold', 0.1)
self.use_top_k = rospy.get_param('~use_top_k', 5)
def callback(self, image_msg):
try:
cv_image = self._cv_bridge.imgmsg_to_cv2(image_msg, desired_encoding="passthrough")
except CvBridgeError as e:
print(e)
# copy from
# classify_image.py
image_data = cv2.imencode('.jpg', cv_image)[1].tostring()
# Creates graph from saved GraphDef.
softmax_tensor = self._session.graph.get_tensor_by_name('softmax:0')
predictions = self._session.run(
softmax_tensor, {'DecodeJpeg/contents:0': image_data})
predictions = np.squeeze(predictions)
# Creates node ID --> English string lookup.
node_lookup = classify_image.NodeLookup()
top_k = predictions.argsort()[-self.use_top_k:][::-1]
for node_id in top_k:
human_string = node_lookup.id_to_string(node_id)
score = predictions[node_id]
if score > self.score_threshold:
rospy.loginfo('%s (score = %.5f)' % (human_string, score))
self._pub.publish(human_string)
def main(self):
rospy.spin()
if __name__ == '__main__':
classify_image.setup_args()
rospy.init_node('rostensorflow')
tensor = RosTensorFlow()
tensor.main()
and my non-threaded child-script in vrep, which i basically copied from the rosInterfaceTopicPublisherAndSubscriber.ttt tutorial:
function sysCall_init()
activeVisionSensor=sim.getObjectHandle('Vision_sensor')
pub=simROS.advertise('image', 'sensor_msgs/Image')
simROS.publisherTreatUInt8ArrayAsString(pub) -- treat uint8 arrays as strings (much faster, tables/arrays are kind of slow in Lua)
end
function sysCall_sensing()
-- Publish the image of the active vision sensor:
local data,w,h=sim.getVisionSensorCharImage(activeVisionSensor)
d={}
d['header']={seq=0,stamp=simROS.getTime(), frame_id="a"}
d['height']=h
d['width']=w
d['encoding']='rgb8'
d['is_bigendian']=1
d['step']=w*3
d['data']=data
simROS.publish(pub,d)
print("published")
end
function sysCall_cleanup()
simROS.shutdownPublisher(pub)
end
I'm running the script with:
python image_recognition.py
I'm getting no error message, but there is just no output. Can you please give me a hint what I need to change? I did a lot of research but without any success so far.

Got it working.
For people interested, I was running:
rostopic list -v
which told me that the node was actually published as:
/vrep_ros_interface/image
I just changed that in
rospy.Subscriber
and the script worked :)

Related

Why is my Python Subscriber/Publisher node not publishing anything? Thank you

The instructions for the question are below
Question report 2.7. You are now ready to make a ROS node to perform real-time
extraction of the centroid of the line from the images acquired by the camera of your
robot.
File name: image_segment_node.py
Subscribes to topics:
raspicam_node/image (type sensor_msgs/Image )
Publishes to topics:
• /image/segmented (type sensor_msgs/Image )
• /image/centroid (type geometry_msgs/PointStamped )
Description: This node should import the file image_processing.py to use all the
functions that you have developed so far. For each new image received from the camera,
the node should perform the same operations described in Question provided 2.3, but
without visualization of the results. Instead, the node should:
Publish the segmented image with the line on the topic /image/segmented .
Insert the computed horizontal centroid in the x field of a PointStamped message,
add the current time to the header.stamp field, and then publish the message
on the topic /image/centroid
The code I have for image_segment_node.py is below
#!/usr/bin/env python
"""This is a node to perform real-time extraction of the centroid of the line from images acquired by camera of robot and export data."""
import rospy
import numpy as np
import image_processing as ip
from cv_bridge import CvBridge
from sensor_msgs.msg import Image
from geometry_msgs.msg import PointStamped
import cv2
def callback(msg):
global pub1
global pub2
rospy.loginfo("started")
img = CvBridge.imgmsg_to_cv2(msg, 'bgr8')
img = cv2.imread(img)
lb, ub = ip.classifier_parameters()
img_seg = cv2.inRange(img, lb, ub)
x = ip.image_centroid_horizontal(img_seg)
img_seg = ip.image_one_to_three_channels(img_seg)
img_seg = ip.image_line_vertical(img_seg, x)
pub1.publish(img_seg)
point1 = PointStamped()
point1.point = x
point1.header = rospy.Time.now()
pub2.publish(point1)
def main():
global pub1
global pub2
rospy.init_node('image_segment_node')
rospy.Subscriber("/raspicam_node/image", Image, callback=callback, queue_size=1)
pub1 = rospy.Publisher("/image/segmented", Image, queue_size=10)
pub2 = rospy.Publisher("/image/centroid", PointStamped, queue_size=10)
rate = rospy.Rate(10)
# rospy.spin()
if __name__ == '__main__':
try:
main()
finally:
pass
Terminal tells me this
drand21#roslab:~/ros_ws/src/me416_lab/nodes$ rostopic echo /image/centroid
WARNING: topic [/image/centroid] does not appear to be published yet
Can you try the code below, I changed only the structure. There doesn't seem any problem with it, but warning is clearly about "pub2" not publishing.
#!/usr/bin/env python
"""This is a node to perform real-time extraction of the centroid of the line from images acquired by camera of robot and export data."""
import rospy
import numpy as np
import image_processing as ip
from cv_bridge import CvBridge
from sensor_msgs.msg import Image
from geometry_msgs.msg import PointStamped
import cv2
class image_segment():
# Class function for creating map relations
def __init__(self):
rospy.init_node('image_segment_node')
self.rate = rospy.Rate(10)
# Publishers
self.pub1 = rospy.Publisher("/image/segmented", Image, queue_size=10)
self.pub2 = rospy.Publisher("/image/centroid", PointStamped, queue_size=10)
# Subscribers
rospy.Subscriber("/raspicam_node/image", Image, callback=self.callback_im, queue_size=1)
def callback_im(self, msg):
rospy.loginfo("started")
img = CvBridge.imgmsg_to_cv2(msg, 'bgr8')
img = cv2.imread(img)
lb, ub = ip.classifier_parameters()
img_seg = cv2.inRange(img, lb, ub)
x = ip.image_centroid_horizontal(img_seg)
img_seg = ip.image_one_to_three_channels(img_seg)
img_seg = ip.image_line_vertical(img_seg, x)
self.pub1.publish(img_seg)
point1 = PointStamped()
point1.point = x
point1.header = rospy.Time.now()
self.pub2.publish(point1)
def main(self):
while not rospy.is_shutdown():
###
self.rate.sleep()
if __name__ == '__main__':
try:
im_class = image_segment()
im_class.main()
finally:
pass

Is there are error with my publisher and subscriber? Nothing is being published

My node for ROS is supposed to subscribe to topic "'raspicam_node/image' (type sensor_msgs/Image)", and publish to both topics "'/image/segmented' (type sensor_msgs/Image)" and "'/image/centroid' (type geometry_msgs/PointStamped)".
Thank you for any and all help.
Other instructions for the question are
"This node should import the file image_processing.py to use all the
functions that you have developed so far. For each new image received from the camera,
the node should perform the same operations described in Question provided 2.3, but
without visualization of the results. Instead, the node should:
Publish the segmented image with the line on the topic /image/segmented .
Insert the computed horizontal centroid in the x field of a PointStamped message,
add the current time to the header.stamp field, and then publish the message
on the topic /image/centroid"
'''
#!/usr/bin/env python
"""This is a node to perform real-time extraction of the centroid of the line from images acquired by camera of robot and export data."""
import rospy
import numpy as np
import image_processing as ip
from cv_bridge import CvBridge
from sensor_msgs.msg import Image
from geometry_msgs.msg import PointStamped
import cv2
class ImageSegmentNode(object):
def __init__(self):
rospy.Subscriber("/raspicam_node/image", Image, callback=self.callback)
self.pub_r1 = rospy.Publisher("/image/segmented", Image, queue_size=10)
self.pub_r2 = rospy.Publisher("/image/centroid", PointStamped, queue_size=10)
self.bridge = CvBridge()
def callback(self, msg):
img = self.bridge.imgmsg_to_cv2(msg, 'bgr8')
img = cv2.imread(img)
lb, ub = ip.classifier_parameters()
img_seg = cv2.inRange(img, lb, ub)
x = ip.image_centroid_horizontal(img_seg)
img_seg = ip.image_one_to_three_channels(img_seg)
img_seg = ip.image_line_vertical(img_seg, x)
self.pub_r1.publish(img_seg)
point1 = PointStamped()
point1.point = x
point1.header = rospy.Time.now()
self.pub_r2.publish(point1)
if __name__ == '__main__':
rospy.init_node('image_segment_node')
isn = ImageSegmentNode()
rospy.loginfo("Node has been started.")
rospy.spin()

must have same datatype, got tf.uint8 != tf.float32 (behavioral cloning simulator)

While trying the project of car behavioral cloning, I successfully trained my model and generated the file model-001.h5.
now in order to test the model, it must successfully drive the vehicle in Udacity self-driving simulator this is done by running the drive.py and the generated model-001.h5 file
python drive.py model-001.h5
however, I receive this error.
x and y must have the same dtype, got tf.uint8 != tf.float32
x and y must have the same dtype, got tf.uint8 != tf.float32
x and y must have the same dtype, got tf.uint8 != tf.float32
Error message
I don't know if this is error is from drive.py or model.py or utils.py
but here is the drive.py code here:
import argparse
import base64
from datetime import datetime
import os
import shutil
import numpy as np
import socketio
import eventlet
import eventlet.wsgi
from PIL import Image
from flask import Flask
from io import BytesIO
from tensorflow.keras.models import load_model
import utils
sio = socketio.Server()
app = Flask(__name__)
model = None
prev_image_array = None
MAX_SPEED = 25.0
MIN_SPEED = 10.0
speed_limit = MAX_SPEED
#sio.on('telemetry')
def telemetry(sid, data):
if data:
# The current steering angle of the car
steering_angle = float(data["steering_angle"])
# The current throttle of the car
throttle = float(data["throttle"])
# The current speed of the car
speed = float(data["speed"])
# The current image from the center camera of the car
image = Image.open(BytesIO(base64.b64decode(data["image"])))
# save frame
if args.image_folder != '':
timestamp = datetime.utcnow().strftime('%Y_%m_%d_%H_%M_%S_%f')[:-3]
image_filename = os.path.join(args.image_folder, timestamp)
image.save('{}.jpg'.format(image_filename))
try:
image = np.asarray(image) # from PIL image to numpy array
image = utils.preprocess(image) # apply the preprocessing
image = np.array([image]) # the model expects 4D array
# predict the steering angle for the image
steering_angle = float(model.predict(image, batch_size=1))
# lower the throttle as the speed increases
# if the speed is above the current speed limit, we are on a downhill.
# make sure we slow down first and then go back to the original max speed.
global speed_limit
if speed > speed_limit:
speed_limit = MIN_SPEED # slow down
else:
speed_limit = MAX_SPEED
throttle = 1.0 - steering_angle**2 - (speed/speed_limit)**2
print('{} {} {}'.format(steering_angle, throttle, speed))
send_control(steering_angle, throttle)
except Exception as e:
print(e)
else:
# NOTE: DON'T EDIT THIS.
sio.emit('manual', data={}, skip_sid=True)
#sio.on('connect')
def connect(sid, environ):
print("connect ", sid)
send_control(0, 0)
def send_control(steering_angle, throttle):
sio.emit(
"steer",
data={
'steering_angle': steering_angle.__str__(),
'throttle': throttle.__str__()
},
skip_sid=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Remote Driving')
parser.add_argument(
'model',
type=str,
help='Path to model h5 file. Model should be on the same path.'
)
parser.add_argument(
'image_folder',
type=str,
nargs='?',
default='',
help='Path to image folder. This is where the images from the run will be saved.'
)
args = parser.parse_args()
model = load_model(args.model)
if args.image_folder != '':
print("Creating image folder at {}".format(args.image_folder))
if not os.path.exists(args.image_folder):
os.makedirs(args.image_folder)
else:
shutil.rmtree(args.image_folder)
os.makedirs(args.image_folder)
print("RECORDING THIS RUN ...")
else:
print("NOT RECORDING THIS RUN ...")
# wrap Flask application with engineio's middleware
app = socketio.Middleware(sio, app)
# deploy as an eventlet WSGI server
eventlet.wsgi.server(eventlet.listen(('', 4567)), app)
even though the telemetry(sid, data) has an if statement to change the incoming data to float format
It'd be helpful to see on what line you get an error.
It's not obvious to me where the "if statement to change the incoming data to float format" that you refer to is. I see that the model is called on whatever image is loaded, in whatever datatype it comes from. I predict that the image is stored as uint8, the most efficient storage for a (0,255) colour representation.
So try:
image = np.asarray(image, dtype=np.float32)
To create an array of the correct datatype for your model, which I am presuming is tf.float32.

Invalid pointer error whily running python in C++ using pybind11 and pytorch

While running the following python code in C++ using pybind11, pytorch 1.6.0, I get "Invalid Pointer" error. In python, the code runs successfully without any error. Whats the reason? How can I solve this problem?
import torch
import torch.nn.functional as F
import numpy as np
import cv2
import torchvision
import eval_widerface
import torchvision_model
def resize(image, size):
image = F.interpolate(image.unsqueeze(0), size=size, mode="nearest").squeeze(0)
return image
# define constants
model_path = '/path/to/model.pt'
image_path = '/path/to/image_pad.jpg'
scale = 1.0 #Image resize scale (2 for half size)
font = cv2.FONT_HERSHEY_SIMPLEX
MIN_SCORE = 0.9
image_bgr = cv2.imread(image_path)
image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)#skimage.io.imread(args.image_path)
cv2.imshow("input image",image_bgr)
cv2.waitKey()
cv2.destroyAllWindows()
# load pre-trained model
return_layers = {'layer2':1,'layer3':2,'layer4':3}
RetinaFace = torchvision_model.create_retinaface(return_layers)
print('RetinaFace.state_dict().')
retina_dict = RetinaFace.state_dict()
the following function generates error.
def create_retinaface(return_layers,backbone_name='resnet50',anchors_num=3,pretrained=True):
print('In create_retinaface.')
print(resnet.__dict__)
backbone = resnet.__dict__[backbone_name](pretrained=pretrained)
print('backbone.')
# freeze layer1
for name, parameter in backbone.named_parameters():
print('freeze layer 1.');
# if 'layer2' not in name and 'layer3' not in name and 'layer4' not in name:
# parameter.requires_grad_(False)
if name == 'conv1.weight':
# print('freeze first conv layer...')
parameter.requires_grad_(False)
model = RetinaFace(backbone,return_layers,anchor_nums=3)
return model
The statement backbone = resnet.__dict__ [backbone_name](pretrained=pretrained) generated error that looks like
*** Error in `./p': munmap_chunk(): invalid pointer: 0x00007f4461866db0 ***
======= Backtrace: =========
/usr/lib64/libc.so.6(+0x7f3e4)[0x7f44736b43e4]
/usr/local/lib64/libopencv_gapi.so.4.1(_ZNSt10_HashtableISsSsSaISsENSt8__detail9_IdentityESt8equal_toISsESt4hashISsENS1_18_Mod_range_hashingENS1_20_Default_ranged_hashENS1_20_Prime_rehash_policyENS1_17_Hashtable_traitsILb1ELb1ELb1EEEE21_M_insert_unique_nodeEmmPNS1_10_Hash_nodeISsLb1EEE+0xc9)[0x7f4483dee1a9]
/home/20face/.virtualenvs/torch/lib64/python3.6/site-packages/torch/lib/libtorch_python.so(+0x4403b5)[0x7f4460bb73b5]
/home/20face/.virtualenvs/torch/lib64/python3.6/site-packages/torch/lib/libtorch_python.so(+0x44570a)[0x7f4460bbc70a]
/home/20face/.virtualenvs/torch/lib64/python3.6/site-packages/torch/lib/libtorch_python.so(+0x275b20)[0x7f44609ecb20]
/usr/lib64/libpython3.6m.so.1.0(_PyCFunction_FastCallDict+0x147)[0x7f4474307167]
/usr/lib64/libpython3.6m.so.1.0(+0x1507df)[0x7f44743727df]
/usr/lib64/libpython3.6m.so.1.0(_PyEval_EvalFrameDefault+0x3a7)[0x7f44743670f7]
/usr/lib64/libpython3.6m.so.1.0(+0x1505ca)[0x7f44743725ca]
/usr/lib64/libpython3.6m.so.1.0(+0x150903)[0x7f4474372903]
/usr/lib64/libpython3.6m.so.1.0(_PyEval_EvalFrameDefault+0x3a7)[0x7f44743670f7]
/usr/lib64/libpython3.6m.so.1.0(+0x14fb69)[0x7f4474371b69]
/usr/lib64/libpython3.6m.so.1.0(_PyFunction_FastCallDict+0x24f)[0x7f44743739ff]
/usr/lib64/libpython3.6m.so.1.0(_PyObject_FastCallDict+0x10e)[0x7f44742ca1de]
/usr/lib64/libpython3.6m.so.1.0(_PyObject_Call_Prepend+0x61)[0x7f44742ca2f1]
/usr/lib64/libpython3.6m.so.1.0(PyObject_Call+0x43)[0x7f44742c9f63]
/usr/lib64/libpython3.6m.so.1.0(+0xfa7e5)[0x7f447431c7e5]
/usr/lib64/libpython3.6m.so.1.0(+0xf71e2)[0x7f44743191e2]
/usr/lib64/libpython3.6m.so.1.0(PyObject_Call+0x43)[0x7f44742c9f63]
/usr/lib64/libpython3.6m.so.1.0(_PyEval_EvalFrameDefault+0x2067)[0x7f4474368db7]
/usr/lib64/libpython3.6m.so.1.0(PyEval_EvalCodeEx+0x24f)[0x7f4474372c9f]
This line is causing the error because it assumes __dict__ has a backbone_name element:
backbone = resnet.__dict__[backbone_name](pretrained=pretrained)
When that isn't the case, it basically tries to access invalid memory. Check __dict__ first with an if statement or make sure that it has the backbone_name element before trying to use it.

How to classify images using Spark and Caffe

I am using Caffe to do image classification, can I am using MAC OS X, Pyhton.
Right now I know how to classify a list of images using Caffe with Spark python, but if I want to make it faster, I want to use Spark.
Therefore, I tried to apply the image classification on each element of an RDD, the RDD created from a list of image_path. However, Spark does not allow me to do so.
Here is my code:
This is the code for image classification:
# display image name, class number, predicted label
def classify_image(image_path, transformer, net):
image = caffe.io.load_image(image_path)
transformed_image = transformer.preprocess('data', image)
net.blobs['data'].data[...] = transformed_image
output = net.forward()
output_prob = output['prob'][0]
pred = output_prob.argmax()
labels_file = caffe_root + 'data/ilsvrc12/synset_words.txt'
labels = np.loadtxt(labels_file, str, delimiter='\t')
lb = labels[pred]
image_name = image_path.split(images_folder_path)[1]
result_str = 'image: '+image_name+' prediction: '+str(pred)+' label: '+lb
return result_str
This this the code generates Caffe parameters and apply the classify_image method on each element of the RDD:
def main():
sys.path.insert(0, caffe_root + 'python')
caffe.set_mode_cpu()
model_def = caffe_root + 'models/bvlc_reference_caffenet/deploy.prototxt'
model_weights = caffe_root + 'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'
net = caffe.Net(model_def,
model_weights,
caffe.TEST)
mu = np.load(caffe_root + 'python/caffe/imagenet/ilsvrc_2012_mean.npy')
mu = mu.mean(1).mean(1)
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_transpose('data', (2,0,1))
transformer.set_mean('data', mu)
transformer.set_raw_scale('data', 255)
transformer.set_channel_swap('data', (2,1,0))
net.blobs['data'].reshape(50,
3,
227, 227)
image_list= []
for image_path in glob.glob(images_folder_path+'*.jpg'):
image_list.append(image_path)
images_rdd = sc.parallelize(image_list)
transformer_bc = sc.broadcast(transformer)
net_bc = sc.broadcast(net)
image_predictions = images_rdd.map(lambda image_path: classify_image(image_path, transformer_bc, net_bc))
print image_predictions
if __name__ == '__main__':
main()
As you can see, here I tried to broadcast the caffe parameters, transformer_bc = sc.broadcast(transformer), net_bc = sc.broadcast(net)
The error is:
RuntimeError: Pickling of "caffe._caffe.Net" instances is not enabled
Before I am doing the broadcast, the error was :
Driver stacktrace.... Caused by: org.apache.spark.api.python.PythonException: Traceback (most recent call last):....
So, do you know, is there any way I can classify images using Caffe and Spark but also take advantage of Spark?
When you work with complex, non-native objects initialization has to moved directly to the workers for example with singleton module:
net_builder.py:
import cafe
net = None
def build_net(*args, **kwargs):
... # Initialize net here
return net
def get_net(*args, **kwargs):
global net
if net is None:
net = build_net(*args, **kwargs)
return net
main.py:
import net_builder
sc.addPyFile("net_builder.py")
def classify_image(image_path, transformer, *args, **kwargs):
net = net_builder.get_net(*args, **kwargs)
It means you'll have to distribute all required files as well. It can be done either manually or using SparkFiles mechanism.
On a side note you should take a look at the SparkNet package.

Categories