raise KeyError(key) from err
KeyError: 'pixels'
Hi,I am a beginner and wanted to convert row to an image and this error appeared to me,what does it mean?and what is the solution?
# import these libraries
import numpy as np
import pandas as pd
import cv2
import utils
import os
# data
data = pd.read_csv('test.csv') # Path of the .csv file
#print(data.shape) # to check the shape
#print(data.head(5)) # Use this to print the first 5 lines of the data, to understand it better
def convert2image(row):
pixels = row['pixels'] # in out dataset, the row heading was 'pixels'
img = np.array(pixels.split())
img = img.reshape(48,48) # dimensions of the image
image = np.zeros((48,48,3)) # empty matrix
image[:,:,0] = img
image[:,:,1] = img
image[:,:,2] = img
return image.astype(np.uint8) # return the image
count = 0
for i in range(1, 6): #data.shape[0]):
face = data.iloc[i] # remove one row from the data
img = convert2image(face) # send this row of data to the function convert2image
count = count + 1 # counter to save the images with different name
cv2.imwrite(r'C:/Users/sanch/Desktop/Data/Python_projects/Emotion_reco/test/'+ str(count) +'.jpg',img) # path where you want to save the image
Related
This is sentiment analysis project and i am getting this error
Nudity-Detection-Model.h5
Traceback (most recent call last):
File "c:\Users\kvidushi\Desktop\Mini_project\script\vapp.py", line 214, in <module>
main()
File "c:\Users\kvidushi\Desktop\Mini_project\script\vapp.py", line 208, in main
model = load_model('Nudity-Detection-Model.h5')
File "c:\Users\kvidushi\Desktop\Mini_project\script\vapp.py", line 59, in load_model
raise ValueError("saved_model_path must be the valid directory of a saved model to load.")
ValueError: saved_model_path must be the valid directory of a saved model to load.
My script file is:
import json
import cv2
import os
import time
from os import listdir
from os.path import isfile, join, exists, isdir, abspath
from keras.models import load_model
import numpy as np
import tensorflow as tf
from tensorflow import keras
import tensorflow_hub as hub
import matplotlib.pyplot as plt
IMAGE_DIM = 224 # required/default image dimensionality
def load_images(image_paths, image_size, verbose=True):
# Function for loading images into numpy arrays for passing to model.predict
# inputs:
# image_paths: list of image paths to load
# image_size: size into which images should be resized
# verbose: show all of the image path and sizes loaded
# outputs:
# loaded_images: loaded images on which keras model can run predictions
# loaded_image_indexes: paths of images which the function is able to process
loaded_images = []
loaded_image_paths = []
if isdir(image_paths):
parent = abspath(image_paths)
image_paths = [join(parent, f) for f in listdir(image_paths) if isfile(join(parent, f))]
elif isfile(image_paths):
image_paths = [image_paths]
for img_path in image_paths:
try:
if verbose:
print(img_path, "size:", image_size)
image = keras.preprocessing.image.load_img(img_path, target_size=image_size)
image = keras.preprocessing.image.img_to_array(image)
# print(image.dtype)
# print(image.shape)
# print(image)
image /= 255
loaded_images.append(image)
loaded_image_paths.append(img_path)
except Exception as ex:
print("Image Load Failure: ", img_path, ex)
return np.asarray(loaded_images), loaded_image_paths
def load_model(model_path):
print(model_path)
if model_path is None or not exists(model_path):
raise ValueError("saved_model_path must be the valid directory of a saved model to load.")
model = tf.keras.models.load_model(model_path)
#model = tf.keras.models.load_model(model_path, custom_objects={'KerasLayer':hub.KerasLayer})
# model.summary()
print(model.summary())
return model
def classify(model, input_paths, image_dim=IMAGE_DIM):
""" Classify given a model, input paths (could be single string), and image dimensionality...."""
images, image_paths = load_images(input_paths, (image_dim, image_dim))
probs = classify_nd(model, images)
# print(type(probs))
return probs
def classify_nd(model, nd_images):
""" Classify given a model, image array (numpy)...."""
model_preds = model.predict(nd_images)
# preds = np.argsort(model_preds, axis = 1).tolist()
categories = ['drawings', 'hentai', 'neutral', 'porn', 'sexy']
probs = []
single_probs = {}
cnt=0
for i, single_preds in enumerate(model_preds):
cnt=cnt+1
for j, pred in enumerate(single_preds):
if categories[j] in single_probs.keys():
single_probs[categories[j]] = single_probs[categories[j]] + float(pred)
else:
single_probs[categories[j]]=float(pred)
print(cnt)
for i in single_probs.keys():
# print(single_probs[i])
single_probs[i]=single_probs[i]/cnt
probs.append(single_probs)
return probs
def predict(model,img_paths):
# for img in img_paths:
image_preds = classify(model, img_paths, IMAGE_DIM)
data=image_preds[0]
category= list(data.keys())
values = list(data.values())
fig = plt.figure(figsize = (10, 5))
# creating the bar plot
plt.bar(category, values, color ='maroon',
width = 0.4)
plt.xlabel("Categories")
plt.ylabel("values")
plt.title("Nudity Detection Model")
print(json.dumps(image_preds, indent=2), '\n')
plt.show()
def get_frames(inputFile,outputFolder,step,count):
'''
Input:
inputFile - name of the input file with directoy
outputFolder - name and path of the folder to save the results
step - time lapse between each step (in seconds)
count - number of screenshots
Output:
'count' number of screenshots that are 'step' seconds apart created from video 'inputFile' and stored in folder 'outputFolder'
Function Call:
get_frames("test.mp4", 'data', 10, 10)
'''
#initializing local variables
step = step
frames_count = count
currentframe = 0
frames_captured = 0
#creating a folder
try:
# creating a folder named data
if not os.path.exists(outputFolder):
os.makedirs(outputFolder)
#if not created then raise error
except OSError:
print ('Error! Could not create a directory')
#reading the video from specified path
cam = cv2.VideoCapture(inputFile)
#reading the number of frames at that particular second
frame_per_second = cam.get(cv2.CAP_PROP_FPS)
print( frame_per_second)
while (True):
ret, frame = cam.read()
if ret:
if currentframe > (step*frame_per_second):
currentframe = 0
#saving the frames (screenshots)
name = './data/frame' + str(frames_captured) + '.jpg'
print ('Creating...' + name)
cv2.imwrite(name, frame)
frames_captured+=1
#breaking the loop when count achieved
if frames_captured > frames_count-1:
ret = False
currentframe += 1
if ret == False:
break
#Releasing all space and windows once done
cam.release()
cv2.destroyAllWindows()
def main():
# img_paths=[]
# img_paths.append("1.jpg")
# img_paths.append("2.jpg")
# img_paths.append("3.jpg")
# img_paths.append("4.jpg")
# img_paths.append("5.jpg")
# img_paths.append("6.jpg")
# img_paths.append("7.jpg")
# img_paths.append("8.jpg")
# img_paths.append("9.jpg")
# img_paths.append("10.jpg")
# img_paths.append("11.jpg")
# img_paths.append("12.jpg")
# img_paths.append("13.jpg")
# img_paths.append("14.jpg")
# img_paths.append("15.jpg")
get_frames("1.mp4","data",5,20)
model = load_model('Nudity-Detection-Model.h5')
predict(model,"data")
if __name__ == "__main__":
main()
It is asking for this file: Nudity_detection_model.h5
I have put this file in same folder.Still it is not able to recognize it. I tried adding double quotes and single quotes and import load_model but still the error is same.
can anyone help me
I am taking a screenshot, and then I need to reference the shot I just took so I can translate what's inside it.
When I directly pass a file location, e.g "filex.png", to readtext, it works, but I just need it to pass the written file into it.
import easyocr
import pyautogui
import time
import cv2
import numpy as np
reader = easyocr.Reader(['en'])
tag = 1
for i in range(2):
time.sleep(4)
image = pyautogui.screenshot(region=(630,400,650,130))
image = cv2.cvtColor(np.array(image),
cv2.COLOR_RGB2BGR)
tag+=1
img = cv2.imwrite(f"image{tag}.png", image)
results = reader.readtext(img)
text=""
for result in results:
text += result[1] + " "
print(text)
In answer to your specific question, I think you're looking for something like:
import easyocr
import pyautogui
import time
import cv2
import numpy as np
reader = easyocr.Reader(['en'])
tag = 1
for i in range(2):
time.sleep(4)
image = pyautogui.screenshot(region=(630, 400, 650, 130))
image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
tag += 1
f_name = f"image{tag}.png"
cv2.imwrite(f_name, image)
results = reader.readtext(f_name)
text = ""
for result in results:
text += result[1] + " "
print(text)
You can just store your file name in a variable in pass it to both imwrite and readtext
There are other options as well, depending on what information you need access to within the program, and how quickly you need to process your data.
Option: Pass the np.array directly to readtext
image = pyautogui.screenshot(region=(630, 400, 650, 130))
image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
results = reader.readtext(image)
Option: Pass the data from the written file to the readtext function.
f_name = f"image{tag}.png"
cv2.imwrite(f_name, image)
with open(f_name, 'rb') as f:
results = reader.readtext(f.read())
import pyautogui
import easyocr
import numpy as np
reader = easyocr.Reader(['en'],gpu=False)
im = pyautogui.screenshot(region=(630,400,650,130)
result = reader.readtext(np.array(im),detail = 0)
just pass the pyautogui image as np.array
I am trying to read and display DICOM(.dcm) images using below code:-
import pydicom as dicom
import numpy as np
from PIL import Image, ImageEnhance, ImageOps
from PIL.ImageQt import ImageQt
def display_dicom_images(self, folder_Path):
try:
# Image parameters
image_width = 382
image_height = 382
image_depth = 3
self.total_images_in_folder = len(glob.glob1(folder_Path,"*"))
# Select the center image for display
self.current_image_number = round(self.total_images_in_folder / 2)
self.display_number = self.current_image_number
image_dtype = np.uint8
pixel_array = np.ndarray([self.total_images_in_folder, image_height, image_width, image_depth]).astype(image_dtype)
# load images here, once better MR images are acquired
for image_index in range(0, self.total_images_in_folder):
# for DICOM
image_path = folder_Path + "/" + str(image_index) + ".dcm"
scan_image = dicom.dcmread(image_path)
scan_image = scan_image.pixel_array.astype(image_dtype)
pixel_array[image_index, :scan_image.shape[0], :scan_image.shape[1], :scan_image.shape[2]] = scan_image
return pixel_array
But getting error:-
IndexError('tuple index out of range',)
i am using pillow python library for image.
How do you know scan_image.shape is of length 3? MR images should only be monochrome, which would make image_depth = 1 and the length of scan_image.shape equal to 2.
C.8.3.1.1.3 Photometric Interpretation
Enumerated Values:
MONOCHROME1
MONOCHROME2
I have written a script to find image size and aspect ratio of all images in a directory along with their corresponding filepaths, I want to print dict values to csv file with following headers width,height,aspect-ratio and filepath
import os
import json
from PIL import Image
folder_images = "/home/user/Desktop/images"
size_images = dict()
def yocd(a,b):
if(b==0):
return a
else:
return yocd(b,a%b)
for dirpath, _, filenames in os.walk(folder_images):
for path_image in filenames:
if path_image.endswith(".png") or path_image.endswith('.jpg') or path_image.endswith('.JPG') or path_image.endswith('.jpeg'):
image = os.path.abspath(os.path.join(dirpath, path_image))
""" ImageFile.LOAD_TRUNCATED_IMAGES = True """
try:
with Image.open(image) as img:
img.LOAD_TRUNCATED_IMAGES = True
img.verify()
print('Valid image')
except Exception:
print('Invalid image')
img = False
if img is not False:
width, heigth = img.size
divisor = yocd(width, heigth)
w = str(int(width / divisor))
h = str(int(heigth / divisor))
aspectratio = w+':'+h
size_images[image] = {'width': width, 'heigth': heigth,'aspect-ratio':aspectratio,'filepath': image}
for k, v in size_images.items():
print(k, '-->', v)
with open('/home/user/Documents/imagesize.txt', 'w') as file:
file.write(json.dumps(size_images))```
You can add a (properly constructed) dict directly to a pandas.DataFrame. Then, DataFrames have a .to_csv() function.
Here are the docs:
Pandas: Create a DataFrame
Pandas: Write to CSV
Without dependencies (but you may have to tweak the formatting)
csv_sep = ';' # choose here wich field separatar you want
with open('your_csv', 'w') as f:
# header
f.write("width"+csv_sep"+height"+csv_sep"+aspect-ratio"+csv_sep+"filepath\n")
# data
for img in size_images:
fields = [img['width'], img['height'], img['aspect-ratio'], img['filepath']]
f.write(csv_sep.join(fields)+'\n')
I have a CSV file reader which reads a csv file with one column and obtains data about the image such as its label. After much debugging I have found my all data about the image is read but I'm missing the actual get image part, so currently, cv2 shows the input as a black box.
Im not sure what to do to achieve this, and need some assistance
My CSV Reader Below
def __init__(self, csvPath, imageHeight, imageWidth, transform = None):
"""
Arguments:
A CSV File Path
Path to Image Foldr
Extension of Images
PIL Transforms
"""
self.dataFromCSV = pD.read_csv(csvPath)
self.dataLabels = nP.asarray(self.dataFromCSV.iloc[:, 0])
self.imageHeight = imageHeight
self.imageWidth = imageWidth
self.transform = transform
def __getitem__(self, index):
singleImageLabel = self.dataLabels[index]
imagePath = singleImageLabel.split(";")[0]
print ('Path: ' + str(imagePath))
originalImage = cv2.imread(imagePath)
cv2.imshow('IMAGE', originalImage)
# Create an Empty Numpy Array to Fill
imageAsNumpy = nP.ones((32, 32), dtype = 'uint8')
# Fill the Numpy Array with Data from Pandas DF
for i in range(1):
rowPosition = (i-1) // self.imageHeight
columnPosition = (i-1) % self.imageWidth
indexFirst = self.dataFromCSV.iloc[index][i].split(";")[3]
indexLast = self.dataFromCSV.iloc[index][i].split(";")[6]
imageAsNumpy[rowPosition][columnPosition] = indexFirst + indexLast
print ('LABEL: ' + str(singleImageLabel))
cv2.imshow('INPUT',imageAsNumpy)
cv2.waitKey(0)
cv2.destroyAllWindows()
# Convert Image from Numpy Array to PIL Image, Mode 'L' is for Grayscale
imageAsImage = Image.fromarray(imageAsNumpy)
imageAsImage = imageAsImage.convert('1')
# Transform Image to Tensor
if self.transform is not None:
imageAsTensor = self.transform(imageAsImage)
# Transform Label to Tensor
labelAsLabel = int(singleImageLabel.split(";")[7])
labelAsTensor = torch.from_numpy(nP.array(labelAsLabel))
# print ('Target: ' + str(labelAsTensor))
# Return Image & the Label
return (imageAsTensor, labelAsLabel)
def __len__(self):
return len(self.dataFromCSV.index)