I'm currently working with Detectron2 for people detectios in videos, I've been trying to run the following code to read a video file, make the prediction frame by frame and record a video with the processed frames, but I am getting an empty video file. The environment that I've created for this is located in Colaboratory and has the following versions (python 3.6 , opencv 4.2.30).
I'm new on this but I would truly appreciate if someone could give me an idea
This is the code
#!/usr/bin/env python3
# -- coding: utf-8 --
import detectron2
from detectron2.utils.logger import setup_logger
setup_logger()
# import some common libraries
import numpy as np
import cv2
# import some common detectron2 utilities
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog
import time
cap = cv2.VideoCapture('piloto legger 1.mp4')
hasFrame, frame = cap.read()
FPS = cap.get(cv2.CAP_PROP_FPS)
frame_width = frame.shape[1]
frame_height = frame.shape[0]
video_writer = cv2.VideoWriter('out.mp4', cv2.VideoWriter_fourcc('m', 'p', '4', 'v'), FPS, (frame_width, frame_height))
while cv2.waitKey(1) < 0:
hasFrame, frame = cap.read()
if not hasFrame:
cv2.waitKey()
break
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file("COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml"))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7 # set threshold for this model
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml")
predictor = DefaultPredictor(cfg)
outputs = predictor(frame)
v = Visualizer(frame[:,:,::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.2)
v = v.draw_instance_predictions(outputs["instances"].to("cpu"))
imagen = (v.get_image()[:, :, ::-1])
cv2.imwrite('POSE detectron2.png', imagen)
video_writer.write(imagen)
cap.release()
video_writer.release()
cv2.destroyAllWindows()
I used your code as a starting point, and took some ideas from the Detectron2 examples in order to make it work.
The problem seems to have been something with the fourcc-argument of the VideoWriter, but may also have been related to your code using Visualizer instead of VideoVisualizer (and with a scale of 1.2, which made the image the wrong size for the VideoWriter).
The code below works for me (and is also a lot faster, as the predictor and visualizer are defined outside of the loop):
#!/usr/bin/env python3
# -- coding: utf-8 --
import detectron2
from detectron2.utils.logger import setup_logger
setup_logger()
# import some common libraries
import numpy as np
import tqdm
import cv2
# import some common detectron2 utilities
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.video_visualizer import VideoVisualizer
from detectron2.utils.visualizer import ColorMode, Visualizer
from detectron2.data import MetadataCatalog
import time
# Extract video properties
video = cv2.VideoCapture('video-input.mp4')
width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
frames_per_second = video.get(cv2.CAP_PROP_FPS)
num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
# Initialize video writer
video_writer = cv2.VideoWriter('out.mp4', fourcc=cv2.VideoWriter_fourcc(*"mp4v"), fps=float(frames_per_second), frameSize=(width, height), isColor=True)
# Initialize predictor
cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7 # set threshold for this model
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
predictor = DefaultPredictor(cfg)
# Initialize visualizer
v = VideoVisualizer(MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), ColorMode.IMAGE)
def runOnVideo(video, maxFrames):
""" Runs the predictor on every frame in the video (unless maxFrames is given),
and returns the frame with the predictions drawn.
"""
readFrames = 0
while True:
hasFrame, frame = video.read()
if not hasFrame:
break
# Get prediction results for this frame
outputs = predictor(frame)
# Make sure the frame is colored
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
# Draw a visualization of the predictions using the video visualizer
visualization = v.draw_instance_predictions(frame, outputs["instances"].to("cpu"))
# Convert Matplotlib RGB format to OpenCV BGR format
visualization = cv2.cvtColor(visualization.get_image(), cv2.COLOR_RGB2BGR)
yield visualization
readFrames += 1
if readFrames > maxFrames:
break
# Create a cut-off for debugging
num_frames = 120
# Enumerate the frames of the video
for visualization in tqdm.tqdm(runOnVideo(video, num_frames), total=num_frames):
# Write test image
cv2.imwrite('POSE detectron2.png', visualization)
# Write to video file
video_writer.write(visualization)
# Release resources
video.release()
video_writer.release()
cv2.destroyAllWindows()
Related
I am looking to; open, process and save multiple TIFFs in Python.
I have the following code to open, process and save 1 (one) TIFF, but I have trouble with multiple files:
import skimage.io
import skimage.viewer
import skimage
import skimage.io
# Read 1 image.TIF:
image = skimage.io.imread(fname=path)
image[2,1]= 1.0
# Process the file (make binary)
gray_image = skimage.color.rgb2gray(image)
# Blur the image to denoise (larger sigma = more noise removed)
blurred_image = skimage.filters.gaussian(gray_image, sigma=5)
# Adding threshold, t:
t = 0.8
binary_mask = blurred_image < t
# Save the file to another location:
skimage.io.imsave(fname=path, arr = binary_mask)
Any help is appreciated!
Here's a multiprocessing approach that may help:
import skimage
from concurrent.futures import ProcessPoolExecutor
from glob import glob
import os.path
source_dir = '<your source directory>'
target_dir = '<your target directory>'
filetype = '*.tif'
def process(path):
image = skimage.io.imread(fname=path)
image[2,1] = 1.0
gray_image = skimage.color.rgb2gray(image)
blurred_image = skimage.filters.gaussian(gray_image, sigma=5)
outpath = os.path.join(target_dir, os.path.basename(path))
arr = blurred_image < 0.8
skimage.io.imsave(fname=outpath, arr=arr)
def main():
with ProcessPoolExecutor() as executor:
filelist = glob(os.path.join(source_dir, filetype))
executor.map(process, filelist)
if __name__ == '__main__':
main()
Use glob to identify all the files matching the *.tif pattern then utilise the ProcessPoolExecutor's map function to process each file in its own process. As the processing is mainly CPU intensive, multiprocessing is likely to be the best fit for this
Is it necessary that this be parallelized? It's not a huge bit of processing that you are performing. If you don't need parallel processing you can just run a for loop on your images
import skimage.io
import skimage.viewer
import skimage
import skimage.io
import os
import glob
# set up an in and out directory
in_dir = 'directory\with\images'
out_ir = 'directory\for\procecessed\images'
# make a list of all of the raw image files
os.chdir(in_dir)
filelist = glob.glob('*.png') # change to whatever file pattern you need here
for file_iter in filelist:
os.chdir(in_dir)
image = skimage.io.imread(fname=file_iter)
image[2,1]= 1.0
# Process the file (make binary)
gray_image = skimage.color.rgb2gray(image)
# Blur the image to denoise (larger sigma = more noise removed)
blurred_image = skimage.filters.gaussian(gray_image, sigma=5)
# Adding threshold, t:
t = 0.8
binary_mask = blurred_image < t
# Save the file to another location:
out_filename = file_iter[:-4] + 'processed.png' # make new filename based on old filename
os.chdir(out_dir)
skimage.io.imsave(fname=out_filename, arr = binary_mask)
so as the title says, im trying to compare a cv2 generated live window to a jpeg i have already saved to help create a decently advanced macro im working on.
my solution to this was to repeatedly save the live stream to a file on my pc so that i instead only have to compare image to image but its proving to be more difficult than i thought, idk what exactly is wrong either (i was following only the first part of this https://www.youtube.com/watch?v=ks4MPfMq8aQ&ab_channel=sentdex tutorial for the creation of the live window)
import numpy as np
import PIL
import keyboard
from PIL import ImageGrab
import cv2
import time
last_time = time.time()
path = r'C:\Users\srajp\OneDrive\Desktop\Pranav (temporary)\gaming.jpg'
img = cv2.imread(path)
path2 = r'C:\Users\srajp\OneDrive\Desktop\Pranav (temporary)\maging.jpg'
img2 = cv2.imread(path2)
window_name = 'hackerman'
time.sleep(3)
while True:
screen = np.array(ImageGrab.grab(bbox=(0,0,1920,1008)))
last_time = time.time()
cv2.imshow(window_name, cv2.cvtColor(screen, cv2.COLOR_BGR2RGB))
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
result=cv2.imwrite(path2, screen)
if result==False:
print("Error in saving file")
img22 = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)
difference = cv2.subtract(img22, img)
r, g, b = cv2.split(difference)
if cv2.countNonZero(r) == 0 and cv2.countNonZero(g) == 0 and cv2.countNonZero(b) == 0:
print('wNIuafiuwfaygewfia')
thank you for answering in advance, and i apoligise for any confusing or errored formating as this is my first post, i will read the replies if there are any tomorrow as it is late at night in my timezone as of writing this
https://i.stack.imgur.com/nCT0o.jpg
this is the image that saves live
https://i.stack.imgur.com/XyNix.jpg
thats the default image,
the default image is just my desktop for now but the theory is that i can add multiple if statements for different scenarios that occur while using the macro
For a current project, I am trying to set up a video recognition program leveraging TensorFlow 2 and OpenCV (Mac OS Catalina).
When running the below script with Python 3 through terminal or via Jupyter, the green "wecam light" is indicating that the camera is switched on and no error messages appear. However, there is not video image/window showing on my screen. I have tried various solutions, including adding camera screen frame data, none of which worked.
Does anyone know a smart tweak to make the camera image/window visible?
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
import cv2
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
from utils import label_map_util
from utils import visualization_utils as vis_util
# Define the video stream
cap = cv2.VideoCapture(0) # Change only if you have more than one webcams
# What model to download.
# Models can bee found here: https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md
MODEL_NAME = 'ssd_inception_v2_coco_2017_11_17'
MODEL_FILE = MODEL_NAME + '.tar.gz'
DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')
# Number of classes to detect
NUM_CLASSES = 90
# Download Model
opener = urllib.request.URLopener()
opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)
tar_file = tarfile.open(MODEL_FILE)
for file in tar_file.getmembers():
file_name = os.path.basename(file.name)
if 'frozen_inference_graph.pb' in file_name:
tar_file.extract(file, os.getcwd())
# Load a (frozen) Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# Loading label map
# Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(
label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Helper code
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
# Detection
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
while True:
# Read frame from camera
ret, image_np = cap.read()
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
# Extract image tensor
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Extract detection boxes
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Extract detection scores
scores = detection_graph.get_tensor_by_name('detection_scores:0')
# Extract detection classes
classes = detection_graph.get_tensor_by_name('detection_classes:0')
# Extract number of detectionsd
num_detections = detection_graph.get_tensor_by_name(
'num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8)
# Display output
cv2.imshow('object detection', cv2.resize(image_np, (800, 600)))
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
Have you tried passing -1 or 1 as the device index of the VideoCapture? Just in case you haven't tried it yet.
But
First of all, you should know where it went wrong. We should verify if the system reads the frames properly.
You can try implementing this to test if your camera is running and being read properly:
cap = cv.VideoCapture(0)
if not cap.isOpened():
print("Cannot open camera")
exit()
while True:
# Capture frame-by-frame
ret, frame = cap.read()
# if frame is read correctly ret is True
if not ret:
print("Can't receive frame (stream end?). Exiting ...")
break
# Our operations on the frame come here
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
# Display the resulting frame
cv.imshow('frame', gray)
if cv.waitKey(1) == ord('q'):
break
# When everything done, release the capture
cap.release()
cv.destroyAllWindows()
cap.read() returns a bool (True/False). If the frame is read correctly, it will be True. So you can check for the end of the video by checking this returned value.
Sometimes, cap may not have initialized the capture. In that case, this code shows an error. You can check whether it is initialized or not by the method cap.isOpened(). If it is True, OK. Otherwise open it using cap.open().
with this, it will help you and us to determine what part has gone wrong and can suggest furthermore solutions.
After this, if the test shows no error, this link will be a little bit related.
You can check it out.
Provide us the result from this so we can inspect furthermore.
I'm working on multiple object tracking, I'm using the TensorFlow API to generate detections. I have managed to modify it a bit to make it return coordinates of the detected objects, now I want to feed the coordinates (bounding boxes) to an object tracker (CRST or KCF).
However running both detection and tracking simultaneously would be too computationally expensive.
Is there any other methods to pass the coordinates or pause the detection?
Below is the detection code.
And in this link is the tracking code https://github.com/spmallick/learnopencv/blob/master/MultiObjectTracker/multiTracker.py
import numpy as np
import os
import six.moves.urllib as urllib
import sys
sys.path.insert(0,r'C:\Users\Ahmed.DESKTOP-KJ6U1BJ\.spyder-py3\TensorFlow\models\research\object_detection')
import tarfile
import tensorflow as tf
import zipfile
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
import cv2
import imutils
from protos import string_int_label_map_pb2
from utils import visualization_utils2 as vis_util
def scale(bbox, width, height):
x = int(bbox[0]*width)
y = int(bbox[1]*height)
w = int(bbox[2]*width)
h = int(bbox[3]*height)
return (x,y,w,h)
W = 800
H = 600
videopath = "file:///C:/Users/Ahmed.DESKTOP-KJ6U1BJ/.spyder-py3/soccer4.mp4"
cap = cv2.VideoCapture(videopath)
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
# # Model preparation
# Any model exported using the `export_inference_graph.py` tool can be loaded here simply by changing `PATH_TO_CKPT` to point to a new .pb file.
# By default we use an "SSD with Mobilenet" model here. See the [detection model zoo](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md) for a list of other models that can be run out-of-the-box with varying speeds and accuracies.
# What model to download.
MODEL_NAME = 'ssd_mobilenet_v1_coco_2017_11_17'
MODEL_FILE = MODEL_NAME + '.tar.gz'
DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = r'C:\Users\Ahmed.DESKTOP-KJ6U1BJ\.spyder-py3\TensorFlow\models\research\object_detection\data\mscoco_label_map.pbtxt'
NUM_CLASSES = 90
# ## Download Model ( uncomment if the model isn't downloaded / comment if you alredy have the model)
"""
opener = urllib.request.URLopener()
opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)
tar_file = tarfile.open(MODEL_FILE)
for file in tar_file.getmembers():
file_name = os.path.basename(file.name)
if 'frozen_inference_graph.pb' in file_name:
tar_file.extract(file, os.getcwd())
"""
# ## Load a (frozen) Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# ## Loading label map
# Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine
import label_map_util
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# # Detection
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
while True :
ret, image_np = cap.read()
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
# Definite input and output Tensors for detection_graph
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
boxes2 = np.squeeze(boxes)
max_boxes_to_draw =boxes2.shape[0]
scores2 = np.squeeze(scores)
min_score_thresh=0.7
classes2 = np.squeeze(classes).astype(np.int32)
for i in range(min(max_boxes_to_draw, boxes2.shape[0])):
if boxes2 is None or scores2[i] > min_score_thresh:
class_name = category_index[classes2[i]]['name']
print ("This box is gonna get used", scale(boxes2[i], W , H), class_name)
cv2.imshow('Object Detection',cv2.resize(image_np,(800,600)))
k = cv2.waitKey(1) & 0xff
if k == 27:
cv2.destroyAllWindows()
cap.release()
cv2.destroyAllWindows()
cap.release
you could count frames with a simple counter in the while True loop and "pause" the detection with an if statement before session.run like:
frame_count = 0
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
while True :
ret, image_np = cap.read()
#the first frame and every 10 frames do the detection
if frame_count == 0:
###detection here
#restart counter (from -10 to 0)
frame_count = -10
##do tracking here
frame_count += 1
This way the actual detection is done for the first frame and then every 10th frame, so in the other 9 frames you can do whatever you want.
I am trying to do a gender recognition program, below is the code..
import caffe
import os
import numpy as np
import sys
import cv2
import time
#Models root folder
models_path = "./models"
#Loading the mean image
mean_filename=os.path.join(models_path,'./mean.binaryproto')
proto_data = open(mean_filename, "rb").read()
a = caffe.io.caffe_pb2.BlobProto.FromString(proto_data)
mean_image = caffe.io.blobproto_to_array(a)[0]
#Loading the gender network
gender_net_pretrained=os.path.join(models_path,
'./gender_net.caffemodel')
gender_net_model_file=os.path.join(models_path,
'./deploy_gender.prototxt')
gender_net = caffe.Classifier(gender_net_model_file, gender_net_pretrained)
#Reshaping mean input image
mean_image = np.transpose(mean_image,(2,1,0))
#Gender labels
gender_list=['Male','Female']
#cv2 Haar Face detector
face_cascade=cv2.CascadeClassifier(os.path.join
(models_path,'haarcascade_frontalface_default.xml'))
#Getting prediction from live camera
cap = cv2.VideoCapture(0)
while True:
ret,frame = cap.read()
if ret is True:
start_time = time.time()
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rects = face_cascade.detectMultiScale(frame_gray, 1.3, 5)
#Finding the largest face
if len(rects) >= 1:
rect_area = [rects[i][2]*rects[i][3] for i in xrange(len(rects))]
rect = rects[np.argmax(rect_area)]
x,y,w,h = rect
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
roi_color = frame[y:y+h, x:x+w]
#Resizing the face image
crop = cv2.resize(roi_color, (256,256))
#Subtraction from mean file
#input_image = crop -mean_image
input_image = rect
#Getting the prediction
start_prediction = time.time()
prediction = gender_net.predict([input_image])
gender = gender_list[prediction[0].argmax()]
print("Time taken by DeepNet model: {}").format(time.time()-start_prediction)
print prediction,gender
cv2.putText(frame,gender,(x,y), cv2.FONT_HERSHEY_SIMPLEX, 1,(0,255,0),2)
print("Total Time taken to process: {}").format(time.time()-start_time)
#Showing output
cv2.imshow("Gender Detection",frame)
cv2.waitKey(1)
#Delete objects
cap.release()
cv2.killAllWindows()
When I am running the I am getting an error:
a = caffe.io.caffe_pb2.BlobProto.FromString(proto_data)
AttributeError: 'module' object has no attribute 'io'
How Can I solve it. I am using cnn_gender_age_prediction model. I want to make a real time gender recognition program using python and cnn_gender_age model.
io is a module in caffe package. Basically when you type import caffe, it will not automatically try to import all modules in caffe package including io. There are two solutions.
First one: import caffe.io manually
import caffe
import caffe.io
Second one: update to the latest caffe version, in which you should find a line in __init__.py under python/caffe directory:
from . import io