I have a code that I learn from this link https://www.youtube.com/watch?v=nnH55-zD38I&t=1047s
but I got an error that said:
line 62, in <module>
cv2.putText(imgOriginal,classNames[id], (50,50),cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),1)
IndexError: list index out of range
[ WARN:0] global C:\Users\runneradmin\AppData\Local\Temp\pip-req-build-m8us58q4\opencv\modules\videoio\src\cap_msmf.cpp
(438) `anonymous-namespace'::SourceReaderCB::~SourceReaderCB terminating async callback
Here is the code:
import cv2
import numpy as np
import os
path = '#2 Image descriptor\Assets\Query'
orb = cv2.ORB_create(nfeatures=1000)
#IMPORT IMAGES
images = []
classNames = []
myList = os.listdir(path)
print('Jumlah Classes = ', len(myList))
#print(myList)
for cl in myList:
imgCur = cv2.imread(f'{path}/{cl}', 0)
images.append(imgCur)
classNames.append(os.path.splitext(cl)[0])
print (classNames)
def findDes(images) :
desList = []
for img in images :
kp, des = orb.detectAndCompute(img, None)
desList.append(des)
return desList
def findID(img, desList, thres=15):
kp2, des2 = orb.detectAndCompute(img, None)
bf = cv2.BFMatcher()
matchList = []
finalVal = -1
try :
for des in desList:
matches = bf.knnMatch(des, des2, k=2)
good = []
for m, n in matches:
if m.distance < 0.7*n.distance:
good.append([m])
matchList.append(len(good))
except :
pass
#print(matchList)
if len(matchList) != 0:
if max(matchList) > thres:
finalVal = matchList.index(max(matchList))
return finalVal
desList = findDes(images)
print(len(desList))
cap = cv2.VideoCapture(0)
#cap = cv2.imread('#2 Image descriptor\Assets\Train\RE8.jpg')
while True:
success, img2 = cap.read()
imgOriginal = img2.copy()
img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
id = findID(img2, desList)
if id != -1:
cv2.putText(imgOriginal,classNames[id], (50,50),cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),1)
cv2.imshow('img2', imgOriginal)
if cv2.waitKey(1) == ord('q'):
break
cap.release() #to make sure disable the camera from the code
cv2.destroyAllWindows
I think the error at:
imgCur = cv2.imread(f'{path}/{cl}', 0)
The problem is at this part of your code:
if id != -1:
cv2.putText(imgOriginal,classNames[id], (50,50),cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),1)
The error is basically telling you that you are trying to index id from the classNames list at an index that doesn't exist in the list. For example, if the classNames list is empty, doing classNames[0] will return the error. You can try debugging like so:
if id != -1:
if id < len(classNames):
cv2.putText(imgOriginal,classNames[id], (50,50),cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),1)
else:
print(classNames)
print(f"Error: Attempted to index {id} from list of length {len(classNames)}.")
Related
When I run with less images, e.g 50-100 images, it work fine, but when I run with larger amount of images (200-700) it's throwing error. This is my code
from unittest import result
import cv2
import os
from glob import glob
import pickle
import numpy as np
import matplotlib as plt
import string
def get_train_and_test_img_features():
train_path = "/Users/Antonie/Project/Skripsi/myWork/Code/Source/DB/query/*/*.png"
#train_path = "/Users/Antonie/Project/Skripsi/myWork/Skripsi_Code/SIFTDocumentRetrieval/dataset3/test/*.png"
test_path = "/Users/Antonie/Project/Skripsi/myWork/Code/Source/DB/target/*/*.png"
def get_sift_features(_in_path,_debug_view = False):
img = cv2.imread(_in_path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
sift = cv2.xfeatures2d.SIFT_create()
kp,desc = sift.detectAndCompute(gray, None)
img = cv2.drawKeypoints(gray, kp, img)
#cv2.imshow('sift_keypoints', img)
if _debug_view:
img = cv2.drawKeypoints(gray, kp, img)
cv2.imshow('sift_keypoints', img)
cv2.waitKey(0)
return kp,desc
def compare_features_flann(_kp1,_dsc1,_kp2,_dsc2,_thres=0):
FLANN_INDEX_KDTREE = 3
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(_dsc1, _dsc2, k=2)
matches_mask = [[0, 0,] for i in range(len(matches))]
good_points = []
for i, (m, n) in enumerate(matches):
if m.distance < 0.6 * n.distance:
#matches_mask[i] = [1, 0]
good_points.append(m)
number_keypoints = 0
if len(_kp1) <= len(_kp2):
number_keypoints = len(_kp1)
else:
number_keypoints = len(_kp2)
return good_points , len(good_points) / number_keypoints * 100
def compare_features_bf(_kp1,_dsc1,_kp2,_dsc2,_thres = 0):
bf = cv2.BFMatcher()
matches = bf.knnMatch(_dsc1, _dsc2, k=2)
good_points = []
for i, (m, n) in enumerate(matches):
if m.distance < 0.75 * n.distance:
#matches_mask[i] = [1, 0]
good_points.append(m)
number_keypoints = 0
if len(_kp1) <= len(_kp2):
number_keypoints = len(_kp1)
else:
number_keypoints = len(_kp2)
# print("KP 1: " + str(len(_kp1)))
# print("Kp 2: " + str(len(_kp2)))
# print("Match:", len(good_points))
# print("Matches: ", len(good_points) / number_keypoints * 100)
return good_points , len(good_points) / number_keypoints * 100
def create_query_database(_path):
img_db = {}
for file in glob(_path):
kp, desc = get_sift_features(file)
img_db[os.path.basename(file)] = {"keypoint": kp,
"descriptors": desc}
# # Database
# with open('queries.txt', 'wb') as file:
# file.write(pickle.dumps(img_db))
return img_db
def get_best_matches(_result_dict):
mean = np.mean([val for key,val in _result_dict.items()])
positive = {}
negative = {}
for key,val in _result_dict.items() :
res = (val - mean)
if res > mean:
positive[key] = val
else:
negative[key] = val
return positive
if __name__ == "__main__":
sift = cv2.xfeatures2d.SIFT_create()
#lokasi
target_path = "/Users/Antonie/Project/Skripsi/myWork/Code/Source/DB/target/*.*"
#query_path = "/Users/Antonie/Project/logo-det/Dataset/logo/img_logo/*/*.png"
#query_path = "/Users/Antonie/Project/Skripsi/myWork/Code/Source/DB/query/*/*.png"
query_path = "/Users/Antonie/Project/Skripsi/myWork/Code/dataset_img4/Process2/Process2/*.png"
query_db = create_query_database(query_path)
for files in glob(target_path, recursive=True):
results = {}
kb1, des1 = get_sift_features(files)
print(os.path.basename(files), "\n")
for keys, values in query_db.items():
kb2 = values["keypoint"]
des2 = values["descriptors"]
good, percentage = compare_features_flann(kb1, des1, kb2, des2)
results[keys] = percentage
#final_result = get_best_matches(results)
final_result = sorted(results.items(), key=lambda x: x[1], reverse=True)[:4]
sortdict = dict(final_result)
#print(sortdict)
out_list = []
for i in sortdict.keys():
out_list.append(i)
nama_file = out_list
print("ini daftar retrieve : ",out_list)
#print("ini hanya 1 : ", nama_file)
for i in range(0, len(nama_file)):
print(nama_file[i])
# document_path = "/Users/Antonie/Project/Skripsi/myWork/Code/dataset_img4/firstPage/"
# for root, dir, files in os.walk(query_path):
# if nama_file in files:
# print(os.path.join(root, nama_file))
# #img_path= os.path.join(document_path, str(a))
# img_path = document_path + str(out_list[1])
# #print(a)
# #bimg = cv2.imread(img_path)
# #b = cv2.resize(bimg, (450,600))
# #cv2.imshow("result", b)
# #cv2.waitKey(0)
#words = nama_file[0].split('_')
#print(words)
for j in nama_file:
wordss = j.split('_')
#wordss.append(j)
print(str(wordss[0]) + '.pdf')
#tugas next : split filename : PemkabBuleleng_1 (for logo)
#print(final_result)
#plt.figure()
#plt.imshow(final_result[1])
#plt.show()
print("-----------------")
Traceback (most recent call last): File
"/Users/Antonie/Project/Skripsi/myWork/Code/siftFindDescriptor.py",
line 127, in
good, percentage = compare_features_flann(kb1, des1, kb2, des2) File
"/Users/Antonie/Project/Skripsi/myWork/Code/siftFindDescriptor.py",
line 40, in compare_features_flann
matches = flann.knnMatch(_dsc1, dsc2, k=2) cv2.error: OpenCV(3.4.2)
/Users/travis/build/skvark/opencv-python/opencv/modules/flann/src/miniflann.cpp:315:
error: (-210:Unsupported format or combination of formats) in function
'buildIndex'
type=0
How can I fix it?
My code is about to create an H5 file for Each and Every video in the folder, Extracting the feature from the video and stored into the H5 file.
in Below shown code extraction feature from multi videos and all the features are stored in the single H5 file
H5 file order:
video1:
- feature
video2:
- feature
issues:
How to create an H5 file for every video after a process is done
Code: Create_data.py
import argparse
from utils.generate_dataset import Generate_Dataset
parser = argparse.ArgumentParser(""Welcome you to fraction)
# Dataset options
parser.add_argument('--input', '--split', type=str, help="input video")
parser.add_argument('--output', type=str, default='', help="out data")
args = parser.parse_args()
if __name__ == "__main__":
gen = Generate_Dataset(args.input, args.output)
gen.generate_dataset()
gen.h5_file.close()
Code: Generate_Dataset.py :
import os
from networks.CNN import ResNet
from utils.KTS.cpd_auto import cpd_auto
from tqdm import tqdm
import math
import cv2
import numpy as np
import h5py
import numpy as np
class Generate_Dataset:
def __init__(self, video_path, save_path):
self.resnet = ResNet()
self.dataset = {}
self.video_list = []
self.video_path = ''
self.h5_file = h5py.File(save_path, 'w')
self._set_video_list(video_path)
def _set_video_list(self, video_path):
# import pdb;pdb.set_trace()
if os.path.isdir(video_path):
self.video_path = video_path
fileExt = r".mp4",".avi"
self.video_list = [_ for _ in os.listdir(video_path) if _.endswith(fileExt)]
self.video_list.sort()
else:
self.video_path = ''
self.video_list.append(video_path)
for idx, file_name in enumerate(self.video_list):
self.dataset['video_{}'.format(idx+1)] = {}
self.h5_file.create_group('video_{}'.format(idx+1))
def _extract_feature(self, frame):
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = cv2.resize(frame, (224, 224))
res_pool5 = self.resnet(frame)
frame_feat = res_pool5.cpu().data.numpy().flatten()
return frame_feat
def _get_change_points(self, video_feat, n_frame, fps):
n = n_frame / fps
m = int(math.ceil(n/2.0))
K = np.dot(video_feat, video_feat.T)
change_points, _ = cpd_auto(K, m, 1)
change_points = np.concatenate(([0], change_points, [n_frame-1]))
temp_change_points = []
for idx in range(len(change_points)-1):
segment = [change_points[idx], change_points[idx+1]-1]
if idx == len(change_points)-2:
segment = [change_points[idx], change_points[idx+1]]
temp_change_points.append(segment)
change_points = np.array(list(temp_change_points))
# temp_n_frame_per_seg = []
# for change_points_idx in range(len(change_points)):
# n_frame = change_points[change_points_idx][1] - change_points[change_points_idx][0]
# temp_n_frame_per_seg.append(n_frame)
# n_frame_per_seg = np.array(list(temp_n_frame_per_seg))
# print(change_points)
arr = change_points
list1 = arr.tolist()
list2 = list1[-1].pop(1) #pop [-1]value
print(list2)
print(list1)
print("****************") # [-1][-1] value find and divided by 15
cps_m = math.floor(arr[-1][1]/15)
list1[-1].append(cps_m) #append to list
print(list1)
print("****************") #list to nd array convertion
arr = np.asarray(list1)
print(arr)
arrmul = arr * 15
print(arrmul)
print("****************")
# print(type(change_points))
# print(n_frame_per_seg)
# print(type(n_frame_per_seg))
median_frame = []
for x in arrmul:
print(x)
med = np.mean(x)
print(med)
int_array = med.astype(int)
median_frame.append(int_array)
print(median_frame)
# print(type(int_array))
return arrmul
# TODO : save dataset
def _save_dataset(self):
pass
def generate_dataset(self):
print('[INFO] CNN processing')
for video_idx, video_filename in enumerate(self.video_list):
video_path = video_filename
if os.path.isdir(self.video_path):
video_path = os.path.join(self.video_path, video_filename)
video_basename = os.path.basename(video_path).split('.')[0]
video_capture = cv2.VideoCapture(video_path)
fps = video_capture.get(cv2.CAP_PROP_FPS)
n_frames = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
frame_list = []
picks = []
video_feat = None
video_feat_for_train = None
for frame_idx in tqdm(range(n_frames-1)):
success, frame = video_capture.read()
if frame_idx % 15 == 0:
if success:
frame_feat = self._extract_feature(frame)
picks.append(frame_idx)
if video_feat_for_train is None:
video_feat_for_train = frame_feat
else:
video_feat_for_train = np.vstack((video_feat_for_train, frame_feat))
if video_feat is None:
video_feat = frame_feat
else:
video_feat = np.vstack((video_feat, frame_feat))
else:
break
video_capture.release()
arrmul = self._get_change_points(video_feat, n_frames, fps)
self.h5_file['video_{}'.format(video_idx+1)]['features'] = list(video_feat_for_train)
self.h5_file['video_{}'.format(video_idx+1)]['picks'] = np.array(list(picks))
self.h5_file['video_{}'.format(video_idx+1)]['n_frames'] = n_frames
self.h5_file['video_{}'.format(video_idx+1)]['fps'] = fps
self.h5_file['video_{}'.format(video_idx + 1)]['video_name'] = video_filename.split('.')[0]
self.h5_file['video_{}'.format(video_idx+1)]['change_points'] = arrmul
Expected results :
Folder: video
video_1:
video1.mp4
video2.mp4
Files are in this structure, now read video files and create separate H5 files after the process is over.
For more Code reference
You need to :
remove self.h5_file = h5py.File(save_path, 'w') from __init__()
remove self.h5_file.create_group('video_{}'.format(idx+1)) from _set_video_list()
remove gen.h5_file.close() from main()
change last block of generate_dataset() into something like:
.
video_capture.release()
arrmul = self._get_change_points(video_feat, n_frames, fps)
h5_dir = os.path.dirname(video_path)
h5_full_path = os.path.join(h5_dir, 'video_{}'.format(video_idx+1))
with h5py.File(h5_full_path, 'w') as h5_file:
h5_file['features'] = list(video_feat_for_train)
h5_file['picks'] = np.array(list(picks))
h5_file['n_frames'] = n_frames
h5_file['fps'] = fps
h5_file['video_name'] = video_filename.split('.')[0]
h5_file['change_points'] = arrmul
Please note that your inner video file indices and actual video file name numbers may not match. So I suggest to change
h5_dir = os.path.dirname(video_path)
h5_full_path = os.path.join(h5_dir, 'video_{}'.format(video_idx+1))
from above into
h5_full_path = video_path.split('.')[0] + '.h5'
This will create features file with the name matched to the video file.
align = openface.AlignDlib(dlibFacePredictor) subject=input("Enter Subject:") a=['BI','SIC','PGIS','SQA','ITSM'] path = 'C:/Users/ACER/Desktop/PROJECT ALL RESOURCE/Implementation/PYTHON FILES/images/' if subject in a: print("Success") wbook = load_workbook(filename = "C:/Users/ACER/Desktop/PROJECT ALL RESOURCE/Implementation/PYTHON FILES/Attendance/"+subject+".xlsx") sheet = wbook.get_sheet_by_name('TYBSCIT'+subject) else: print("Invalid")
def getDateColumns(): for i in range(1, len(sheet.rows[0]) + 1): cols = get_column_letter(i) if sheet.cell('%s%s'% (col,'1')).value == currentDate: return cols def getProfileId(Ids): connect = sqlite3.connect("C:/Users/ACER/Desktop/PROJECT ALL RESOURCE/Implementation/PYTHON FILES/sqlite3/Studentdb.db") cmd = "SELECT * FROM Students WHERE ID=" + str(Ids) cursor = connect.execute(cmd) profile = None for row in cursor: profile = row connect.close() return profile
attend = [0 for i in range(60)] rec = cv2.face.LBPHFaceRecognizer_create() # Local Binary Patterns Histograms rec.read('C:/Users/ACER/Desktop/PROJECT ALL RESOURCE/Implementation/PYTHON FILES/Training/trainingData.yml') # loading the trained data picNumber = 2 image= cv2.imread('C:/Users/ACER/Desktop/PROJECT ALL RESOURCE/Implementation/PYTHON FILES/images/'+subject+currentDate+'.jpg') font = cv2.FONT_HERSHEY_SIMPLEX # the font of text on face recognition gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # conveting the camera input into GrayScale dets = detector(image, 1) totalConf = 0.0 faceRec = 0 for i, d in enumerate(dets): image2 = image[d.top():d.bottom(), d.left():d.right()] rgbImg = cv2.cvtColor(image2, cv2.COLOR_BGR2RGB) bb = align.getLargestFaceBoundingBox(rgbImg) alignedFace = align.align(96, rgbImg, bb=None, landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE) alignedFace= cv2.cvtColor(alignedFace, cv2.COLOR_BGR2GRAY) # conveting the camera input into GrayScale Ids, conf = rec.predict(alignFace) # Comparing from the trained data
Traceback (most recent call last): File "C:\Users\ACER\Desktop\PROJECT ALL RESOURCE\Implementation\PYTHON FILES\facerecognition.py", line 60, in <module> Ids, conf = rec.predict(alignFace) # Comparing from the trained data NameError: name 'alignFace' is not defined
As the TraceBack states, alignFace is not defined. You have made a typo of your previously-defined alignedFace - use this instead.
Simple Code used to identify playing cards through a cam but I have Been trying to run this code but I keep getting this error when attempting to use any of the methods
TypeError: src is not a numpy array, neither a scalar
import sys
import numpy as np
sys.path.insert(0, "/usr/local/lib/python2.7/site-packages/")
import cv2
###############################################################################
# Utility code
###############################################################################
def rectify(h):
h = h.reshape((4,2))
hnew = np.zeros((4,2),dtype = np.float32)
add = h.sum(1)
hnew[0] = h[np.argmin(add)]
hnew[2] = h[np.argmax(add)]
diff = np.diff(h,axis = 1)
hnew[1] = h[np.argmin(diff)]
hnew[3] = h[np.argmax(diff)]
return hnew
###############################################################################
# Image Matching
###############################################################################
def preprocess(img):
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(5,5),2 )
thresh = cv2.adaptiveThreshold(blur,255,1,1,11,1)
return thresh
def imgdiff(img1,img2):
img1 = cv2.GaussianBlur(img1,(5,5),5)
img2 = cv2.GaussianBlur(img2,(5,5),5)
diff = cv2.absdiff(img1,img2)
diff = cv2.GaussianBlur(diff,(5,5),5)
flag, diff = cv2.threshold(diff, 200, 255, cv2.THRESH_BINARY)
return np.sum(diff)
def find_closest_card(training,img):
features = preprocess(img)
return sorted(training.values(), key=lambda x:imgdiff(x[1],features))[0][0]
###############################################################################
# Card Extraction
###############################################################################
def getCards(im, numcards=4):
gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(1,1),1000)
flag, thresh = cv2.threshold(blur, 120, 255, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
contours = sorted(contours, key=cv2.contourArea,reverse=True)[:numcards]
for card in contours:
peri = cv2.arcLength(card,True)
approx = rectify(cv2.approxPolyDP(card,0.02*peri,True))
# box = np.int0(approx)
# cv2.drawContours(im,[box],0,(255,255,0),6)
# imx = cv2.resize(im,(1000,600))
# cv2.imshow('a',imx)
h = np.array([ [0,0],[449,0],[449,449],[0,449] ],np.float32)
transform = cv2.getPerspectiveTransform(approx,h)
warp = cv2.warpPerspective(im,transform,(450,450))
yield warp
def get_training(training_labels_filename,training_image_filename,num_training_cards,avoid_cards=None):
training = {}
labels = {}
for line in file(training_labels_filename):
key, num, suit = line.strip().split()
labels[int(key)] = (num,suit)
print "Training"
im = cv2.imread(training_image_filename)
for i,c in enumerate(getCards(im,num_training_cards)):
if avoid_cards is None or (labels[i][0] not in avoid_cards[0] and labels[i][1] not in avoid_cards[1]):
training[i] = (labels[i], preprocess(c))
print "Done training"
return training
if __name__ == '__main__':
if len(sys.argv) == 6:
filename = sys.argv[1]
num_cards = int(sys.argv[2])
training_image_filename = sys.argv[3]
training_labels_filename = sys.argv[4]
num_training_cards = int(sys.argv[5])
training = get_training(training_labels_filename,training_image_filename,num_training_cards)
im = cv2.imread("test")
width = im.shape[0]
height = im.shape[1]
if width < height:
im = cv2.transpose(im)
im = cv2.flip(im,1)
# Debug: uncomment to see registered images
#for i,c in enumerate(getCards(im,num_cards)):
# card = find_closest_card(training,c,)
# cv2.imshow(str(card),c)
# cv2.waitKey(0)
cards = [find_closest_card(training,c) for c in getCards(im,num_cards)]
print cards
else:
print __doc__
here is the entire error message
Traceback (most recent call last):
File "<pyshell#15>", line 1, in <module>
preprocess("test.JPG")
File "C:\Users\Don Ellison\Desktop\Class Programs\CSC 490 Project\Cards\Python\Playing-Card-Recognition-master\card_img.py", line 43, in preprocess
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
TypeError: src is not a numpy array, neither a scalar
you are passing in wrong argument to preprocess function, I guess you are calling it from a python shell.
You are not suppose to pass in a image file name, but rather a numpy array. It seems you called proprocess function and passed in "test.JPG" in your python shell. If you want to test your preprocess function, do
test_img = cv2.imread("test.JPG")
preprocess(test_img)
I want to clear black point on the image with python. Because I need to applied ocr processing to image file.
I convert image to monochorome color so I get this image ;
http://s23.postimg.org/bulq1dmt3/ba210.png
So, I want to delete black points ;
def temizleHips2 (x,y,w,h,listei):
koordinat=list(nfind(x,y))
x = int(x)
y = int(y)
w = int(w)
h = int(h)
i=0
a=0
m=4
b=0
for i in xrange(8):
b=0
k=koordinat[i]
x2,y2=koordinatparse(k)
if x2>=0 and y2>=0 and x2<w and y2<h:
if listei[x2,y2]==0:
a=a+1
if a>2:
return 0
else:
return 255
def ultratemizle(dosya):
# 290a.tif
image_file = dosya
img = Image.open(image_file)
# img=img.convert('1')
# img.save("209i.tif","TIFF")
datas = list(img.getdata())
newData = list()
temizlemes = list()
temizlemeson = list()
siyah =0
beyaz =0
for each in datas:
if each == 255:
beyaz = beyaz +1
else:
siyah = siyah+1
if siyah > beyaz :
for each in datas:
if each == 255:
each=0
elif each==0:
each = 255
newData.append(each)
img.putdata(newData)
x1,y1=0,0
tmp_isim = "a"+dosya
img.save("b"+tmp_isim, "TIFF")
img = Image.open(tmp_isim)
imgmat = img.load()
x,y= img.size
x1=0
y1=0
deger =0
temizlemes =[]
for x1 in range (0,x):
for y1 in range(0,y):
if imgmat[x1,y1] == 0:
deger = temizleHips(x1,y1,x,y,imgmat)
temizlemes.append(deger)
if deger != imgmat[x1,y1]:
print "noktalar : "+str(x1)+","+str(y1)+" ilk : "+str(imgmat[x1,y1])+" son: "+str(deger)
else:
temizlemes.append(imgmat[x1,y1])
img.putdata(temizlemes)
img.show()
img.save(tmp_isim,"TIFF")
tem = img.load()
I get this image ;
http://s16.postimg.org/wc97bdzdt/a356.png
However, I want to clean "s" around black pixels on the second pic.
I can't find where the problem is
You should try find the big contours and remove another contours