ModuleNotFoundError: No module named 'samples.coco' - python

Could someone help me with the error that is giving in the file within a Mask R-CNN project:
test_model.py
Someone with experience in instance segmentation, could help me with this error that occurred running on Google Colab
(Settings: Tensorflow: 1.13.1 and Keras: 2.1.6)
import os
import sys
import random
import math
import numpy as np
import skimage.io
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import cv2
import time
from mrcnn.config import Config
from datetime import datetime
# Root directory of the project
ROOT_DIR = os.getcwd()
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn import utils
import mrcnn.model as modellib
from mrcnn import visualize
# import coco config
sys.path.append(os.path.join(ROOT_DIR, "samples/coco")) # To find local version
# import coco
from samples.coco import coco
# from pycocotools.coco import COCO
# Diretório para salvar 'logs' e 'modelo treinado'
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# Local path to trained weights file
COCO_MODEL_PATH = os.path.join(MODEL_DIR ,"mask_rcnn_shapes_0080.h5")
# Download COCO trained weights from Releases if needed
if not os.path.exists(COCO_MODEL_PATH):
utils.download_trained_weights(COCO_MODEL_PATH)
print("cuiwei***********************")
# Directory of images to run detection on
IMAGE_DIR = os.path.join(ROOT_DIR, "/content/gdrive/My Drive/Fish-characteristic-measurement/Complete_code/images")
class ShapesConfig(Config):
"""Configuration for training on the toy shapes dataset.
Derives from the base Config class and overrides values specific
to the toy shapes dataset.
"""
# Give the configuration a recognizable name
NAME = "shapes"
# Train on 1 GPU and 8 images per GPU. We can put multiple images on each
# GPU because the images are small. Batch size is 8 (GPUs * images/GPU).
GPU_COUNT = 1
IMAGES_PER_GPU = 1
# Number of classes (including background)
NUM_CLASSES = 1 + 80 # background + 1 shapes
# Use small images for faster training. Set the limits of the small side
# the large side, and that determines the image shape.
IMAGE_MIN_DIM = 704
IMAGE_MAX_DIM = 1024
# Use smaller anchors because our image and objects are small
RPN_ANCHOR_SCALES = (8*6,16*6,32 * 6, 64 * 6, 128 * 6)#anchor side in pixels
# Reduce training ROIs per image because the images are small and have
# few objects. Aim to allow ROI sampling to pick 33% positive ROIs.
TRAIN_ROIS_PER_IMAGE =400
# Use a small epoch since the data is simple
STEPS_PER_EPOCH = 100
# use small validation steps since the epoch is small
VALIDATION_STEPS = 50
#import train_tongue
#class InferenceConfig(coco.CocoConfig):
class InferenceConfig(ShapesConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
# Create model object in inference mode.
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
# Load weights trained on MS-COCO
model.load_weights(COCO_MODEL_PATH, by_name=True)
# COCO Class names
# Index of the class in the list is its ID. For example, to get ID of
# the teddy bear class, use: class_names.index('teddy bear')
class_names = ['BG','sfish_eye']
#'fish_knife','fish_eye','fish_pupil','fish_body','sfish_knife','sfish_eye','sfish_pupil','sfish_body'
# Load a random image from the images folder
count = os.listdir(IMAGE_DIR)
for i in range(0,len(count)):
path = os.path.join(IMAGE_DIR, count[i])
if os.path.isfile(path):
file_names = next(os.walk(IMAGE_DIR))[2]
image = skimage.io.imread(os.path.join(IMAGE_DIR, count[i]))
# Run detection
results = model.detect([image], verbose=1)
r = results[0]
visualize.display_instances(count[i],image, r['rois'], r['masks'], r['class_ids'],
class_names, r['scores'])
ERROR PRESENTED BY THE ABOVE CODE:
ModuleNotFoundError Traceback (most recent call last)
<ipython-input-14-815378d5aae5> in <module>
29 sys.path.append(os.path.join(ROOT_DIR, "samples/coco")) # To find local version
30 # import coco
---> 31 from samples.coco import coco
32 # from pycocotools.coco import COCO
33
ModuleNotFoundError: No module named 'samples.coco'
---------------------------------------------------------------------------
NOTE: If your import is failing due to a missing package, you can
manually install dependencies using either !pip or !apt.
To view examples of installing some common dependencies, click the
"Open Examples" button below.
---------------------------------------------------------------------------

Related

Displaying image from a pytorch model

Having read a paper about demoireing image, I want to see how effective the method is. Given that the whole dataset is 100gbs, I only used 1gb worth of data to train a new one. And in the code below, I'm trying to display the image spitted out by the model. However, the image color is either messed up or in grayscale and holds no visual feature to the source of it which is a moire infected image. So I want to know if it was the small train dataset for the model to behave such way or me not showing the image properly
Example:
source
from_model
other
The code that i tried to display it:
import numpy as np
import os
import math
import torch
from tqdm import tqdm
from utils import MoirePic
from torch.utils.data import DataLoader
from torchvision.io import read_image
from PIL import Image
from torchvision import transforms
import matplotlib.pyplot as plt
def psnr(img1, img2):
mse = np.mean((img1 - img2) ** 2)
if mse == 0:
return 100
return 10 * math.log10(1 / mse)
def Test():
device = "cpu"
root = './Train_Data2'
dataset = MoirePic(os.path.join(root, 'source'),
os.path.join(root, 'target'))
test_loader = DataLoader(dataset=dataset, batch_size=1, drop_last=False)
model = torch.load('./moire_best.pth',map_location=torch.device('cpu') )
model.eval()
loop = tqdm(enumerate(test_loader), total=len(test_loader), leave=False)
psnr_all=0
for idx, (data, target) in loop:
with torch.no_grad():
output = model(data).cpu()
transform = transforms.ToPILImage()
img = transform(output[0])
img.show()
print(psnr(output[0].numpy(), target[0].numpy()))
Test()
The PSNR i got from them 2 is 19.55170616098589
My trained model - https://drive.google.com/file/d/1xuCX7A48MvJU4V3BkvwFLjgccOE2_eBi/view?usp=sharing
The link to the paper : https://paperswithcode.com/paper/moire-photo-restoration-using-multiresolution
The link to the implementation: https://github.com/ZhengJun-AI/MoirePhotoRestoration-MCNN

How to prune an existing tensorflow/keras model trained on imagenet

I am trying to prune InceptionNetV3 from keras trained on imagenet, right now I am using a tensorflow-datasets which has a subset of imagenet which I use for pruning. Currently my pruned models do not work and returns garbage data when tested using the same dataset it was pruned on. How do I prune without losing all accuracy? Here is my code:
Imports:
import logging
import tempfile
from pathlib import Path
import tensorflow as tf
from tensorflow import keras
import numpy as np
import tensorflow_datasets as tfds
from cv2 import cv2 # Pylint now views cv2 as a library
import tensorflow_model_optimization as tfmot
All of these imports are up to date, I'm currently using Python 3.10.1.
Here is the code I am using to prune the model.
v2_path = 'C:\\temp\\imagenet_v2'
inception_image_size = (299, 299)
image_count = 5
batch_size = 512
epochs = 4
dataset = tfds.load(name='imagenet_v2', split='test', data_dir=v2_path)
numpy_dataset = tfds.as_numpy(dataset)
layer_count = 313
count = [1]
def main():
v2_full_path = 'C:\\temp\\imagenet_v2\\downloads\\extracted\\TAR_GZ.s3-us-west-2_image_image-match-frequ8MN_35JZFrGeoTI82aIgjNtpWbosMu7yp_w5ODXJynw.tar.gz\\imagenetv2-matched-frequency-format-val'
dataset_train = tf.keras.utils.image_dataset_from_directory(directory=v2_full_path, image_size=inception_image_size,
label_mode='categorical')
inception_model = tf.keras.applications.InceptionV3(weights='imagenet',
pooling='avg',
input_shape=(299, 299, 3))
def apply_pruning_to_dense(layer):
count[0] += 1 # Python throws a fit if I use a normal variable, but doesn't mind layer_count
if layer_count - count[0] < 5:
return tfmot.sparsity.keras.prune_low_magnitude(layer)
return layer
model_for_pruning = tf.keras.models.clone_model(
inception_model,
clone_function=apply_pruning_to_dense,
)
inception_model = tf.keras.applications.InceptionV3(weights="imagenet")
logdir = tempfile.mkdtemp()
callbacks = [
tfmot.sparsity.keras.UpdatePruningStep(),
tfmot.sparsity.keras.PruningSummaries(log_dir=logdir),
]
model_for_pruning.compile(loss='categorical_crossentropy',
optimizer=keras.optimizers.SGD(learning_rate=1e-3),
metrics=['accuracy'])
model_for_pruning.fit(dataset_train,
batch_size=batch_size,
epochs=epochs,
callbacks=callbacks,
use_multiprocessing=True)
save_test_model(inception_model, ".tflite")
save_test_model(model_for_pruning, "_prune.tflite")
When I run the model through model_for_pruning.fit(...) the accuracy rating is only around 1% - 2%. Though it used to be around .16% per epoch. I fixed this by adding label_mode='categorical' when obtaining the dataset which leads me to believe that the issue is somehow with either my dataset or how I use it.
The resulting pruned tensorflow lite model has a 0% accuracy rating when tested against the imagenet_v2 subset while the upruned one gets around a 40% accuracy rating.

How to create a function that returns outputs from tensorflow 1.x model given input?

It might look like a silly question but I have only worked with tf 2.x with eager execution so I have no idea about functionality of tf 1.x. When I run the code twice, (code given below works fine for 1 time) it throws an error. It makes kind of sense because it uses static graphs and all.
Let us suppose we have a code like:
def predict(my_input):
# process the input, access the global `tf1.x` model and return result
return result
QUESTION:
Where exactly am I supposed to put this predict function or how should I create the predict so that I could use it with some serving package like flask / fastapi etc?
This code is extracted from the test_model.py file of the DPED paper
! git clone https://github.com/aiff22/DPED
%cd ./DPED
CODE
import imageio
from PIL import Image
import numpy as np
import tensorflow as tf
from models import resnet
import utils
import os
import sys
import matplotlib.pyplot as plt
# process arguments
phone, dped_dir, test_subset, iteration, resolution, use_gpu = ('iphone_orig', 'dped/', 'full', 'all', 'orig', 'true')
tf.compat.v1.disable_v2_behavior() # disable tf 2.x workings
# get all available image resolutions
res_sizes = utils.get_resolutions()
# get the specified image resolution
IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_SIZE = utils.get_specified_res(res_sizes, phone, resolution)
# disable gpu if specified
config = tf.compat.v1.ConfigProto(device_count={'GPU': 0}) if use_gpu == "false" else None
# create placeholders for input images
x_ = tf.compat.v1.placeholder(tf.float32, [None, IMAGE_SIZE])
x_image = tf.reshape(x_, [-1, IMAGE_HEIGHT, IMAGE_WIDTH, 3])
# generate enhanced image
enhanced = resnet(x_image)
with tf.compat.v1.Session(config=config) as sess:
# load pre-trained model
saver = tf.compat.v1.train.Saver()
saver.restore(sess, "models_orig/" + phone)
########### this is the part which reads the image path and returns the results as enhanced_image ##########
image = np.float16(np.array(Image.fromarray(imageio.imread(path))
.resize([res_sizes[phone][1], res_sizes[phone][0]]))) / 255
image_crop = utils.extract_crop(image, resolution, phone, res_sizes)
image_crop_2d = np.reshape(image_crop, [1, IMAGE_SIZE])
# get enhanced image
enhanced_2d = sess.run(enhanced, feed_dict={x_: image_crop_2d})
enhanced_image = np.reshape(enhanced_2d, [IMAGE_HEIGHT, IMAGE_WIDTH, 3])

Tensorflow Lite, Image size is zero error

Actually, my question is very simple. I would like to use my own data in tensorflow lite model. So, i wrote these line of codes:
root_path = r"C:\Users\90531\Desktop\dataset\b"
image_path = os.path.join(os.path.dirname(root_path), '1602854451425')
data = DataLoader.from_folder(image_path)
Also, this is the error that I encountered:
File "C:\Users\90531\AppData\Roaming\Python\Python39\site-packages\tensorflow_examples\lite\model_maker\core\data_util\image_dataloader.py", line 73, in from_folder
raise ValueError('Image size is zero')
ValueError: Image size is zero
This happens when the Dataloader cannot infer the labels of your images. The images should be divided into subfolders according to the class they belong to:
from tflite_model_maker.image_classifier import DataLoader
import seedir as sd
image_path = '/content/images'
sd.seedir(image_path, style='spaces', indent=4, anystart='- ')
data = DataLoader.from_folder(image_path)
- images/
- class1/
- result_image.png
- class2/
- result_image1.png
INFO:tensorflow:Load image with size: 2, num_label: 2, labels: class1, class2.
in google colab, use the following code:
import tensorflow as tf
data_path = tf.keras.utils.get_file(
'flower_photos',
'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',
untar=True)
from tflite_model_maker import image_classifier
from tflite_model_maker.image_classifier import DataLoader
# Load input data specific to an on-device ML app.
data = DataLoader.from_folder(data_path)
train_data, test_data = data.split(0.9)
# Customize the TensorFlow model.
model = image_classifier.create(train_data)
# Evaluate the model.
loss, accuracy = model.evaluate(test_data)
# Export to Tensorflow Lite model and label file in `export_dir`.
model.export(export_dir='/tmp/')

MMDetection loading from own training checkpoint for inference produces garbage detections

I've trained up a very simple model using the MMDetection colab tutorial and then verifying the result using:
img = mmcv.imread('/content/mmdetection/20210301_145246_123456.jpg')
img = cv2.resize(img, (0,0), fx=0.25, fy=0.25)
model.cfg = cfg
result = inference_detector(model, img)
show_result_pyplot(model, img, result)
confirms that it's working great.
I then follow the same steps as for training but instead I load my own training checkpoint, and I don't train. Then running the verification snippet above produces garbage results.
Here's that in code
from mmcv import Config
cfg = Config.fromfile('configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py')
from mmdet.apis import set_random_seed
# Modify dataset type and path
cfg.dataset_type = 'SamplesDataset'
cfg.data_root = 'samples_dataset/'
cfg.data.test.type = 'SamplesDataset'
cfg.data.test.data_root = 'samples_dataset/'
cfg.data.test.ann_file = 'train.txt'
cfg.data.test.img_prefix = 'o2h'
cfg.data.train.type = 'SamplesDataset'
cfg.data.train.data_root = 'samples_dataset/'
cfg.data.train.ann_file = 'train.txt'
cfg.data.train.img_prefix = 'o2h'
cfg.data.val.type = 'SamplesDataset'
cfg.data.val.data_root = 'samples_dataset/'
cfg.data.val.ann_file = 'val.txt'
cfg.data.val.img_prefix = 'o2h'
# modify num classes of the model in box head
cfg.model.roi_head.bbox_head.num_classes = 1
# We can still use the pre-trained Mask RCNN model though we do not need to
# use the mask branch
# cfg.load_from = 'checkpoints/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco_bbox_mAP-0.408__segm_mAP-0.37_20200504_163245-42aa3d00.pth'
cfg.load_from = './experiments/epoch_1.pth'
# Set up working dir to save files and logs.
cfg.work_dir = './experiments'
# The original learning rate (LR) is set for 8-GPU training.
# We divide it by 8 since we only use one GPU.
cfg.optimizer.lr = 0.02 / 8
cfg.lr_config.warmup = None
cfg.log_config.interval = 10
cfg.runner = dict(type='EpochBasedRunner', max_epochs=1)
cfg.total_epochs = 1
# Change the evaluation metric since we use customized dataset.
cfg.evaluation.metric = 'mAP'
# We can set the evaluation interval to reduce the evaluation times
# cfg.evaluation.interval = 12
# We can set the checkpoint saving interval to reduce the storage cost
cfg.checkpoint_config.interval = 1
# Set seed thus the results are more reproducible
cfg.seed = 0
set_random_seed(0, deterministic=False)
cfg.gpu_ids = range(1)
# We can initialize the logger for training and have a look
# at the final config used for training
# print(f'Config:\n{cfg.pretty_text}')
from mmdet.datasets import build_dataset
from mmdet.models import build_detector
from mmdet.apis import train_detector
# Build dataset
# datasets = [build_dataset(cfg.data.train)]
# Build the detector
model = build_detector(cfg.model)
# Add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
# Create work_dir
# mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# train_detector(model, datasets, cfg, distributed=False, validate=True)
Obviously, I wouldn't normally do all that just for validating my model, but this is one of many debugging steps for me, as my goal is to download and run the model locally. This is what I'm trying to do locally:
import sys
import glob
import time
sys.path.insert(0, '../mmdetection')
from mmdet.apis import init_detector, inference_detector, show_result_pyplot
from mmdet.models import build_detector
import mmcv
import numpy as np
file_paths = glob.glob('samples/o2h/*.jpg')
cfg = mmcv.Config.fromfile('../mmdetection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py')
cfg.model.roi_head.bbox_head.num_classes = 1
cfg.load_from = 'models/mmdet_faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.pth' # my own checkpoint
model = build_detector(cfg.model)
model.CLASSES = ('hash',)
model.cfg = cfg
file_path = np.random.choice(file_paths)
print(file_path)
start = time.time()
result = inference_detector(model, file_path)
print(f"Time taken for inference: {time.time() - start:.2f}s")
show_result_pyplot(model, file_path, result)
One of the mistakes in your code is that you have not updated num_classes for mask_head.
Our aim here should be to replicate the same config file that was used for training should also be used for testing/validation. If you have trained the model using 1 num_classes for bbox_head and mask_head in the config file but for validation/testing you are using 80 num_classes as default, then that will cause a mismatch in the testing process, leading to garbage detections and segmentations.
There are 2 solutions for achieving the required result:
Change the num_classes in config file before doing inference
Save the model and config file as pickle, as soon as training is completed.
Note: The first solution is standard but the second solution is more simpler
1. Change the num_classes in config file before doing inference.
First, find the total number of classes in your dataset. Here num_classes is total number of classes in the training dataset.
Locate to this path:
mmdetection/configs/model_name (model_name is name used for training)
Here, inside model_name folder, find the ..._config.py that you have used for training.
Inside this config file, if you have found model = dict(...) then change the num_classes for each of these keys: bbox_head, mask_head.
bbox_head might be list. so, change num_classes for each keys in the list.
If model = dict(...) is not found, then at the first line there is
_base_ = '...' So, open that config file and check whether model=dict(...) is found or not. If not found keep on opening the file location of _base_.
After changing the num_classes, use this code for inference:
Code after changing the num_classes:
from mmdet.apis import init_detector, inference_detector
import mmcv
import numpy as np
import cv2
import os
import matplotlib.pyplot as plt
%matplotlib inline
config_file = './configs/scnet/scnet_x101_64x4d_fpn_20e_coco.py' #(I have used SCNet for training)
checkpoint_file = 'tutorial_exps/epoch_40.pth' #(checkpoint saved after training)
model = init_detector(config_file, checkpoint_file, device='cuda:0') #loading the model
img = 'test.png'
result = inference_detector(model, img)
#visualize the results in a new window
im1 = cv2.imread(img)[:,:,::-1]
#im_ones = np.ones(im1.shape, dtype='uint')*255
# model.show_result(im_ones, result, out_file='fine_result6.jpg')
plt.imshow(model.show_result(im1, result))
2. Save the model and config as pickle as soon as training is completed.
Another simple solution is to save both model and config as pickle as soon as the training is completed, irrespective of depending on mmdetection to do it.
Note: The pickle files should be saved right after training is completed.
Code for saving as pickle:
import pickle
with open('mdl.pkl','wb') as f:
pickle.dump(model, f)
with open('cfg.pkl','wb') as f:
pickle.dump(cfg, f)
You can use this model/config wherever and whenever you want. For inference with the saved model, use this:
import pickle, mmcv
from mmdet.apis import inference_detector, show_result_pyplot
model = pickle.load(open('mdl.pkl','rb'))
cfg = pickle.load(open('cfg.pkl','rb'))
img = mmcv.imread('images/test.png')
model.cfg = cfg
result = inference_detector(model, img)
show_result_pyplot(model, img, result)

Categories