print the count same objects by open cv library - python

hi i use open cv in python to count palm tree in my project. i use this code but this code just answer for simple pictures like coins. but my image is palm trees and when i use this code its do not count right. it count plam tree 2 while real palms number is about 100. how i can fix this problem and solve my problems?
thanks.
the code:
import cv2
import numpy as np
from matplotlib import pyplot as plt
# Read image
I = cv2.imread('drops.jpg',0);
# Threshold
IThresh = (I>=118).astype(np.uint8)*255
# Remove from the image the biggest conneced componnet
# Find the area of each connected component
connectedComponentProps = cv2.connectedComponentsWithStats(IThresh, 8, cv2.CV_32S)
IThreshOnlyInsideDrops = np.zeros_like(connectedComponentProps[1])
IThreshOnlyInsideDrops = connectedComponentProps[1]
stat = connectedComponentProps[2]
maxArea = 0
for label in range(connectedComponentProps[0]):
cc = stat[label,:]
if cc[cv2.CC_STAT_AREA] > maxArea:
maxArea = cc[cv2.CC_STAT_AREA]
maxIndex = label
# Convert the background value to the foreground value
for label in range(connectedComponentProps[0]):
cc = stat[label,:]
if cc[cv2.CC_STAT_AREA] == maxArea:
IThreshOnlyInsideDrops[IThreshOnlyInsideDrops==label] = 0
else:
IThreshOnlyInsideDrops[IThreshOnlyInsideDrops == label] = 255
# Fill in all the IThreshOnlyInsideDrops as 0 in original IThresh
IThreshFill = IThresh
IThreshFill[IThreshOnlyInsideDrops==255] = 0
IThreshFill = np.logical_not(IThreshFill/255).astype(np.uint8)*255
plt.imshow(IThreshFill)
# Get numberof drops and cover precntage
connectedComponentPropsFinal = cv2.connectedComponentsWithStats(IThreshFill, 8, cv2.CV_32S)
NumberOfDrops = connectedComponentPropsFinal[0]
CoverPresntage = float(np.count_nonzero(IThreshFill==0)/float(IThreshFill.size))
# Print
print "Number of drops = " + str(NumberOfDrops)
print "Cover precntage = " + str(CoverPresntage)
i want count palm tree and i try different codes but dont result.

Related

I have one case, where numpy.where() does not work as expected. Where is my error?

I have a binary segmentation map as output from a neural network (niftii format) and want to maintain only the biggest island, to get rid of unwanted false positives.
I am able to achieve this with:
import nibabel as nib
import numpy as np
from scipy.ndimage import label
vol = 'PATH_TO_VOLUME'
elements_in_biggest_island = 0
biggest_index = 0
aNii = nib.load(vol)
a = aNii.get_fdata()
s = np.ones((3,3,3), dtype = 'uint8')
labelled_array, num_features = label(a, structure=s)
for i in range (1, num_features + 1):
tempArray = labelled_array
if (np.count_nonzero(tempArray == i) > elements_in_biggest_island):
elements_in_biggest_island = np.count_nonzero(tempArray == i)
biggest_index = I
print("Biggest Island was at index ", biggest_index, " with a total of ", elements_in_biggest_island, " members.")
labelled_array[labelled_array == biggest_index] = 1.0
labelled_array[labelled_array < biggest_index ] = 0.0
labelled_array[labelled_array > biggest_index] = 0.0
ni_img = nib.Nifti1Image(labelled_array, aNii.affine)
nib.save(ni_img, f'PATH_TO_PROCESSED_VOL')
But the "thresholding" is very inefficient. In another application I work with numpy.where(), which generates a good speedup compared to the shown way of thresholding.
My approach was, to remove the array[array>I] == x lines by:
labelled_array = np.where(labelled_array==biggest_index, 1, 0)
This exact line works perfectly in another application, but here I only get a black 3D volume, which does not work for me.
Is anybody able to point out the mistake, that I have made?

Dlib training error: error about aspect ratio and area size when all conditions are actually true

I use dlib.train_simple_object_detector to create detector for steel bars in bunch. This is my sample:
It has SAME box for every bar (created via Duplicate RectBox in labelImg) and size of box is 122x118 (14396 in area).
This is my training code:
import dlib
import cv2.cv2 as cv2
import os
import time
import sys
from xml.dom import minidom
if len(sys.argv) != 4:
print("Usage: python train.py /path/to/images/ /path/to/boxes/ /path/to/result.svm")
print("Images and boxes are named like 1.jpg and 1.xml")
exit(1)
data = {}
image_indexes = [int(img_name.split(".")[0]) for img_name in os.listdir(sys.argv[1])]
# np.random.shuffle(image_indexes)
image_indexes.sort()
# parse rectangle data
for index in image_indexes:
if index in [0]:
continue
rects = minidom.parse("{}/{}.xml".format(sys.argv[2], index)).getElementsByTagName("bndbox")
img = cv2.imread(os.path.join(sys.argv[1], str(index) + ".jpg"))
for rect in rects:
xmin = int(rect.getElementsByTagName("xmin")[0].firstChild.data)
xmax = int(rect.getElementsByTagName("xmax")[0].firstChild.data)
ymin = int(rect.getElementsByTagName("ymin")[0].firstChild.data)
ymax = int(rect.getElementsByTagName("ymax")[0].firstChild.data)
dlib_box = dlib.rectangle(left=xmin, top=ymin, right=xmax, bottom=ymax)
if index in data:
data[index][1].append(dlib_box)
else:
data[index] = (img, [dlib_box])
# train
percent = 0.8
split = int(len(data) * percent)
images = [tuple_value[0] for tuple_value in data.values()]
bounding_boxes = [tuple_value[1] for tuple_value in data.values()]
options = dlib.simple_object_detector_training_options()
options.add_left_right_image_flips = False
options.C = 5
options.num_threads = 16
options.epsilon = 0.01
# options.be_verbose = True
st = time.time()
detector = dlib.train_simple_object_detector(images[:split], bounding_boxes[:split], options)
print("Training complete. Time taken: {:.2f} seconds.".format(time.time() - st))
print("Training Metrics: {}".format(dlib.test_simple_object_detector(images[:split], bounding_boxes[:split], detector)))
detector.save(sys.argv[3])
When I run it with this sample it gives an error:
Error! An impossible set of object boxes was given for training. All the boxes
need to have a similar aspect ratio and also not be smaller than about 400
pixels in area.
But it's not true. They definitely have same aspect ratio as they are same boxes and they do have area > 400 (about 14000 actually). Why do this happen?

Get Class Label in Faster -RCNN with gluoncv

I'm trying to count the number of vehicles in images using Faster-RCNN implementation in gluoncv as shown in here. I want to get the string label of the image. For example, in the following image, the string label would be 'bus'. How can I get it?
An image of a bus
Following is my implementation.
import os
import glob
from matplotlib import pyplot as plt
from gluoncv import model_zoo, data, utils
vehiclesum1 = []
for filename in glob.glob('/home/xx/PythonCode/test/*.jpg'):
x, orig_img = data.transforms.presets.rcnn.load_test(filename)
box_ids, scores, bboxes = net(x)
ax = utils.viz.plot_bbox(orig_img, bboxes[0], scores[0], box_ids[0], class_names=net.classes)
# I want to identify this label1
vehiclesum1.append(label1.count('car') + label1.count('truck') + label1.count('motorcycle') + label1.count('bus'))
plt.show()
How about something like this?
# map class ID to classes
id2string = [i:name for i, name in enumerate(net.classes)]
# filter on score.
thresh = 0.8
top_classIDs = [c for c, s in zip(box_ids[0], scores[0]) if s > thresh]
# convert IDs to class names into "label1"
label1 = [id2string[c] for c in top_classIDs]

I would like to know how to calculate the percentage of a color in an image

I would like to know how to calculate the percentage of a color in an image, the image below represents 100%:
already this, when the level decreases:
I wanted to learn correctly how do I get the percentage that the bar has at the moment, I tried to use the Matplotlib library, but I could not get the expected result, could anyone help me please? I do not need something ready, someone to teach me ...
I think you want to calculate the progress by looking at the image
I'm not sure if there's a library to this specific thing but here's my simple approach to it,
you can compare images to get until which column they are similar and then can calculate the % task done, let me demonstrate..
!wget https://i.stack.imgur.com/jnxX3.png
a = plt.imread( './jnxX3.png')
plt.imshow( a )
This shall load the image with 100% completion in variable a
c =a
c = c[: , 0:c.shape[1] - 50]
aa = np.zeros( dtype= float , shape=( 11,50, 3 ))
c = np.append( c, aa , axis= 1 )
plt.imshow( c)
plt.imshow( c )
made a sample incomplete image which you should have provided
def status( complete_img , part_image):
"""inputs must be numpy arrays """
complete_img = complete_img[:, 1: ] # as the first pixel column doesn't belong to % completion
part_image = part_image[:, 1:]
counter = 0
while(counter < part_image.shape[1] and counter < complete_img.shape[1]):
if (complete_img[:, counter ] == part_image[:,counter]).all():
counter += 1
else :
break
perc = 100*( float(counter) / complete_img.shape[1])
return
status( a ,c ) # this will return % columns similar in the two images
A proposition:
import numpy as np
from PIL import Image
from urllib.request import urlopen
full = np.asarray(Image.open(urlopen("https://i.stack.imgur.com/jnxX3.png")))
probe = np.asarray(Image.open(urlopen("https://i.stack.imgur.com/vx5zt.png")))
# crop the images to the same shape
# (this step should be avoided, best compare equal shaped arrays)
full = full[:,1:probe.shape[1]+1,:]
def get_percentage(full, probe, threshold):
def profile_red(im):
pr = im[:,:,0] - im[:,:,1]
return pr[pr.shape[0]//2]
def zero(arr):
z = np.argwhere(np.abs(np.diff(np.sign(arr))).astype(bool))
if len(z):
return z[0,0]
else:
return len(arr)
full_red = profile_red(full)
probe_red = profile_red(probe)
mask = full_red > threshold
diff = full_red[mask] - probe_red[mask]
x0 = zero(diff - threshold)
percentage = x0 / diff.size * 100
err = 2./diff.size * 100
return percentage, err
print("{:.1f} p\m {:.1f} %".format(*get_percentage(full, probe, 75.0)))
Result:
94.6 p\m 2.2 %
You're looking for the Pillow library. There are two ways to measure color, Hue, Saturation, Luminance (HSL), and Red, Blue, Green (RGB). There are functionsto do both in the library.

Use diagonal fill to eliminate 8-connectivity of the background in Python (similar to bwmorph diag in MATLAB)

I'm looking for a way to connect 8-connected pixels in Python, similar to MATLAB's bwmorph 'diag' function:
BW = bwmorph(BW, 'diag')
For example,
0 1 0 0 1 0
1 0 0 -> 1 1 0
0 0 0 0 0 0
Thanks in advance!
Misha
That works, thanks! Here's the python code:
def bwmorphDiag(bw):
# filter for 8-connectivity of the background
f = np.array(([1, -1, 0],[-1, 1, 0],[0, 0, 0]),dtype = np.int)
# initialize result with original image
bw = bw.astype(np.int)
res2 = bw.copy().astype(np.bool)
for ii in range(4): # all orientations
# add results where sum equals 2 -> two background pixels on the
# diagonal with 2 foreground pixels on the crossing mini-anti-diagonal
res2 = res2 | (ndimage.filters.convolve(np.invert(bw),f) == 2)
f = np.rot90(f) # rotate filter to next orientation
return res2
you can achieve the same result using simple image filtering. I did it in MATLAB, but it should be straight forward to do it in python as well:
% random binary image
bw = rand(50) > 0.5;
% result using bwmorph(bw,'diag')
res1 = bwmorph(bw,'diag');
% filter for 8-connectivity of the background
f = [1 -1 0;-1 1 0;0 0 0];
% initialize result with original image
res2 = bw;
for ii = 1:4 % all orientations
% add results where sum equels 2 -> two background pixels on the
% diagonal with 2 foreground pixels on the crossing mini-anti-diagonal
res2 = res2 | ( imfilter(double(~bw),f) == 2 );
f = rot90(f); % rotate filter to next orientation
end
isequal(res2,res1) % yes
I was actually looking for the same python equivalent, the bwmorph('diag') of MATLAB. But since I couldn't find it I eventually decided to code it. Please check the MATLAB help for bwmorph and the diag option to get further info about what it does.
import numpy as np
import scipy.ndimage.morphology as smorph
import skimage.morphology as skm
class bwmorph:
#staticmethod
def diag(imIn):
strl = np.array([
[[0,1,0],[1,0,0],[0,0,0]],
[[0,1,0],[0,0,1],[0,0,0]],
[[0,0,0],[1,0,0],[0,1,0]],
[[0,0,0],[0,0,1],[0,1,0]],
[[0,1,0],[1,0,0],[0,1,0]],
[[0,1,0],[1,0,1],[0,0,0]],
[[0,1,0],[0,0,1],[0,1,0]],
[[0,0,0],[1,0,1],[0,1,0]]
],dtype=np.uint8)
bwIm = np.zeros(imIn.shape,dtype=int)
imIn = np.array(imIn)
imIn = imIn/np.max(np.max(imIn)) #normalizing to be added later
for i in range(7):
bwIm = bwIm + smorph.binary_hit_or_miss(imIn,strl[i,:,:])
bwIm = ((bwIm>0) + imIn)>0
return bwIm # out put is boolean
I used 'hit or miss' transform, with the structural element 'strl' defined at the beginning. I guess that's a classic way to do it.
Please watch the #staticmethod is you're running it on older python.
Usage example would be bwmorph().diag(BinaryImage)
All the best ;)

Categories