From the training set I took a image('img') of size (3,32,32).
I have used plt.imshow(img.T). The image is not clear. Now changes I have to make to image('img') to make it more clearly visible.
Thanks.
Following prints 5X5 grid of random Cifar10 images. It isn't blurry, though not perfect either. Any suggestions welcome.
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from six.moves import cPickle
f = open('data/cifar10/cifar-10-batches-py/data_batch_1', 'rb')
datadict = cPickle.load(f,encoding='latin1')
f.close()
X = datadict["data"]
Y = datadict['labels']
X = X.reshape(10000, 3, 32, 32).transpose(0,2,3,1).astype("uint8")
Y = np.array(Y)
#Visualizing CIFAR 10
fig, axes1 = plt.subplots(5,5,figsize=(3,3))
for j in range(5):
for k in range(5):
i = np.random.choice(range(len(X)))
axes1[j][k].set_axis_off()
axes1[j][k].imshow(X[i:i+1][0])
Make sure you don't normalize your dataset when you want to display the image.
Example :
The loader...
import torch
from torchvision import datasets, transforms
import matplotlib.pyplot as plt
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('../data', train=True, download=True,
transform=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
# transforms.Normalize(
# (0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261))
])),
batch_size=64, shuffle=True)
The code that shows the image...
img = next(iter(train_loader))[0][0]
plt.imshow(transforms.ToPILImage()(img))
Normalized
Wihtout normalization
This file reads the cifar10 dataset and plots individual images using matplotlib.
import _pickle as pickle
import argparse
import numpy as np
import os
import matplotlib.pyplot as plt
cifar10 = "./cifar-10-batches-py/"
parser = argparse.ArgumentParser("Plot training images in cifar10 dataset")
parser.add_argument("-i", "--image", type=int, default=0,
help="Index of the image in cifar10. In range [0, 49999]")
args = parser.parse_args()
def unpickle(file):
with open(file, 'rb') as fo:
data = pickle.load(fo, encoding='bytes')
return data
def cifar10_plot(data, meta, im_idx=0):
im = data[b'data'][im_idx, :]
im_r = im[0:1024].reshape(32, 32)
im_g = im[1024:2048].reshape(32, 32)
im_b = im[2048:].reshape(32, 32)
img = np.dstack((im_r, im_g, im_b))
print("shape: ", img.shape)
print("label: ", data[b'labels'][im_idx])
print("category:", meta[b'label_names'][data[b'labels'][im_idx]])
plt.imshow(img)
plt.show()
def main():
batch = (args.image // 10000) + 1
idx = args.image - (batch-1)*10000
data = unpickle(os.path.join(cifar10, "data_batch_" + str(batch)))
meta = unpickle(os.path.join(cifar10, "batches.meta"))
cifar10_plot(data, meta, im_idx=idx)
if __name__ == "__main__":
main()
The image is blurry due to interpolation. To prevent blurring in matplotlib, call imshow with keyword interpolation='nearest':
plt.imshow(img.T, interpolation='nearest')
Also, it appears that your x and y axes are being swapped when you use the transpose so you may want to display like this instead:
plt.imshow(np.transpose(img, (1, 2, 0)), interpolation='nearest')
I have used the following code to show all CIFAR data as one big image. The code show the image, but if you want to save it and not be blurtry i sugest using plt.savefig(fname, format='png', dpi=1000)
import numpy as np
import matplotlib.pyplot as plt
def reshape_and_print(self, cifar_data):
# number of images in rows and columns
rows = cols = np.sqrt(cifar_data.shape[0]).astype(np.int32)
# Image hight and width. Divide by 3 because of 3 color channels
imh = imw = np.sqrt(cifar_data.shape[1] // 3).astype(np.int32)
# reshape to number of images X color channels X image size
# transpose to color channels X number of images X image size
timg = cifar_data.reshape(rows * cols, 3, imh * imh).transpose(1, 0, 2)
# reshape to color channels X rows X cols X image hight X image with
# swap axis to color channels X rows X image hight X cols X image with
timg = timg.reshape(3, rows, cols, imh, imw).swapaxes(2, 3)
# reshape to color channels X combined image hight X combined image with
# transpose to combined image hight X combined image with X color channels
timg = timg.reshape(3, rows * imh, cols * imw).transpose(1, 2, 0)
plt.imshow(timg)
plt.show()
I made a quick data helper class that i used for a small test project, I hope is can be useful:
import gzip
import pickle
import numpy as np
import matplotlib.pyplot as plt
class DataSet(object):
def __init__(self, seed=42, setsize=10000):
self.seed = seed
# set the seed for reproducability
np.random.seed(seed)
# load the data
train_set, test_set = self.load_data()
# self.split_data(train_set, valid_set, test_set)
self.split_data(train_set, test_set, setsize)
def split_data(self, data_set, test_set, split_size):
permutation = np.random.permutation(data_set.shape[0])
self.train = data_set[permutation[:split_size]]
self.valid = data_set[permutation[split_size:split_size * 2]]
self.test = test_set[:split_size]
def reshape_for_print(self, data):
raise NotImplemented
def load_data(self):
raise NotImplemented
def show_all_imgs(self, data):
raise NotImplemented
class CIFAR(DataSet):
def load_data(self):
# try to load data
with open('./data/cifar-100-python/train', 'rb') as f:
data = pickle.load(f, encoding='latin1')
train_set = data['data'].astype(np.float32) / 255.0
with open('./data/cifar-100-python/test', 'rb') as f:
data = pickle.load(f, encoding='latin1')
test_set = data['data'].astype(np.float32) / 255.0
return train_set, test_set
def reshape_for_print(self, data):
gh = gw = np.sqrt(data.shape[0]).astype(np.int32)
imh = imw = np.sqrt(data.shape[1] // 3).astype(np.int32)
timg = data.reshape(gh * gw, 3, imh * imh).transpose(1, 0, 2)
timg = timg.reshape(3, gh, gw, imh, imw).swapaxes(2, 3)
timg = timg.reshape(3, gh * imh, gw * imw).transpose(1, 2, 0)
return timg
def show_all_imgs(self, data):
timg = self.reshape_for_print(data)
plt.imshow(timg)
plt.show()
class MNIST(DataSet):
def load_data(self):
# try to load data
with gzip.open('./data/mnist.pkl.gz', 'rb') as f:
train_set, valid_set, test_set = pickle.load(f, encoding='latin1')
return train_set[0], test_set[0]
def reshape_for_print(self, data):
gh = gw = np.sqrt(data.shape[0]).astype(np.int32)
imh = imw = np.sqrt(data.shape[1]).astype(np.int32)
timg = data.reshape(gh, gw, imh, imw).swapaxes(1, 2)
timg = timg.reshape(gh * imh, gw * imw)
return timg
def show_all_imgs(self, data):
timg = self.reshape_for_print(data)
plt.imshow(timg, cmap=plt.cm.gray)
plt.show()
I made a function to plot the RGB image from a row in the CIFAR10 dataset.The image will be blurry at best since the original size of the image is very small (32px X 32px).
def unpickle(file):
with open(file, 'rb') as fo:
dict1 = pickle.load(fo, encoding='bytes')
return dict1
pd_tr = pd.DataFrame()
tr_y = pd.DataFrame()
for i in range(1,6):
data = unpickle('data/data_batch_' + str(i))
pd_tr = pd_tr.append(pd.DataFrame(data[b'data']))
tr_y = tr_y.append(pd.DataFrame(data[b'labels']))
pd_tr['labels'] = tr_y
tr_x = np.asarray(pd_tr.iloc[:, :3072])
tr_y = np.asarray(pd_tr['labels'])
ts_x = np.asarray(unpickle('data/test_batch')[b'data'])
ts_y = np.asarray(unpickle('data/test_batch')[b'labels'])
labels = unpickle('data/batches.meta')[b'label_names']
def plot_CIFAR(ind):
arr = tr_x[ind]
sc_dpi = 157.35
R = arr[0:1024].reshape(32,32)/255.0
G = arr[1024:2048].reshape(32,32)/255.0
B = arr[2048:].reshape(32,32)/255.0
img = np.dstack((R,G,B))
title = re.sub('[!##$b]', '', str(labels[tr_y[ind]]))
fig = plt.figure(figsize=(3,3))
ax = fig.add_subplot(111)
ax.imshow(img,interpolation='bicubic')
ax.set_title('Category = '+ title,fontsize =15)
plot_CIFAR(4)
try using
import matplotlib.pyplot as plt
from scipy.misc import toimage
plt.imshow(toimage(img))
I am not 100% sure of how the code works, but I think that because the images are stored in floating point numpy arrays, the imshow() function has a difficult time mapping them to the right colors. By typecasting them to image using toimage() you convert them into proper image format that imshow() expects, i.e not an array but an image encoded as .png or .jpg.
This code works for me every time I want to display images in python.
code result is: Try below code.
I found a very useful link about visualization of mnist and cifar images. You can find codes for various images :
https://machinelearningmastery.com/how-to-load-and-visualize-standard-computer-vision-datasets-with-keras/
cifar10 image code is below:
It works well. Image is above.
# example of loading the cifar10 dataset
from matplotlib import pyplot
from keras.datasets import cifar10
# load dataset
(trainX, trainy), (testX, testy) = cifar10.load_data()
# summarize loaded dataset
print('Train: X=%s, y=%s' % (trainX.shape, trainy.shape))
print('Test: X=%s, y=%s' % (testX.shape, testy.shape))
# plot first few images
for i in range(9):
# define subplot
pyplot.subplot(330 + 1 + i)
# plot raw pixel data
pyplot.imshow(trainX[i])
# show the figure
pyplot.show()
Add 0.5:
plt.imshow(np.transpose(img, (1, 2, 0)) + 0.5)
Related
random_vector = tf.random.normal(shape = (25, latent_dim,)
I am training my model with the above random vector and saving the outputs as a gird of 5x5.jpg file. But since my dataset has 60k images I am unable to find the corresponding input images.
My question is how can I save the random_vector as a 5x5 grid. Here is the code I used to save output from my model:
def save_images(model, epoch, step, input_):
prediction = model.predict(input_)
fig, axes = plt.subplots(5,5, figsize = (14,14))
idx = 0
for row in range(5):
for column in range(5):
image = prediction[idx] * 255
image = image.astype("int32")
axes[row, column].imshow(image)
axes[row, column].axis("off")
idx+=1
output_path = "./anime-faces/"
if not os.path.exists(output_path):
os.mkdir(output_path)
plt.savefig(output_path + "Epoch_{:04d}_step_{:04d}.jpg".format(epoch, step))
plt.close()
I am new to tensorflow and AIML so most of the code is written from different sources on internet, sorry if it is something obvious.
You could use a similar code that you are using for saving the output images, on the input_ that you feed into the function save_images, giving
fig, axes = plt.subplots(5,5, figsize = (latent_dim,))
idx = 0
for row in range(5):
for column in range(5):
image = inputs_[idx] * 255
image = image.astype("int32")
axes[row, column].imshow(image)
axes[row, column].axis("off")
idx+=1
output_path = "./anime-faces-inputs/"
if not os.path.exists(output_path):
os.mkdir(output_path)
plt.savefig(output_path + ".jpg")
plt.close()
My dataset is composed of image patches obtained from the original image (face patches and random outside of face patches). Patches are stored in a folder with a name of an original image from which patches originate. I created my own DataSet and DataLoader but when I iterate over the dataset data is not returned in batches. A batch of size 1 should include an array of tuples of patches and a label, so with the increased batch size, we should get an array of arrays of tuples with labels. But DataLoader returns only one array of tuples no matter the batch size.
My dataset:
import os
import cv2 as cv
import PIL.Image as Image
import torchvision.transforms as Transforms
from torch.utils.data import dataset
class PatchDataset(dataset.Dataset):
def __init__(self, img_folder, n_patches):
self.img_folder = img_folder
self.n_patches = n_patches
self.img_names = sorted(os.listdir(img_folder))
self.transform = Transforms.Compose([
Transforms.Resize((50, 50)),
Transforms.ToTensor()
])
def __len__(self):
return len(self.img_names)
def __getitem__(self, idx):
img_name = self.img_names[idx]
patch_dir = os.path.join(self.img_folder, img_name)
patches = []
for i in range(self.n_patches):
face_patch = cv.imread(os.path.join(patch_dir, f'{str(i)}_face.png'))
face_patch = cv.cvtColor(face_patch, cv.COLOR_BGR2RGB)
face_patch = Image.fromarray(face_patch)
face_patch = self.transform(face_patch)
patch = cv.imread(os.path.join(patch_dir, f'{str(i)}_patch.png'))
patch = cv.cvtColor(patch, cv.COLOR_BGR2RGB)
patch = Image.fromarray(patch)
patch = self.transform(patch)
patches.append((face_patch, patch))
return patches, int(img_name.split('-')[0])
Then I use it as such:
X = PatchDataset(PATCHES_DIR, 9)
train_dl = dataloader.DataLoader(
X,
batch_size=10,
drop_last=True
)
for batch_X, batch_Y in train_dl:
print(len(batch_X))
print(len(batch_Y))
In this provided case the batch size is 10, so printing of the batch_Y returns the correct number (10). But the printing of the batch_X returns 9 which is number of patch pairs - returns only one sample from dataset instead of batch of 10 samples where each of them is length of 9.
You should return a one dimension higher tensor instead of a list of tensors in __get_item__ function call. You can use torch.stack(patches).
def __getitem__(self, idx):
img_name = self.img_names[idx]
patch_dir = os.path.join(self.img_folder, img_name)
patches = []
for i in range(self.n_patches):
face_patch = cv.imread(os.path.join(patch_dir, f'{str(i)}_face.png'))
face_patch = cv.cvtColor(face_patch, cv.COLOR_BGR2RGB)
face_patch = Image.fromarray(face_patch)
face_patch = self.transform(face_patch)
patch = cv.imread(os.path.join(patch_dir, f'{str(i)}_patch.png'))
patch = cv.cvtColor(patch, cv.COLOR_BGR2RGB)
patch = Image.fromarray(patch)
patch = self.transform(patch)
patches.append((face_patch, patch))
return torch.stack(patches), int(img_name.split('-')[0])
I have a code for finding entropy info of an image by resizing the image and dividing it into its RGB channels.
import os
from PIL import Image
import numpy as np
from scipy.misc import imread
import cv2
import imageio
#读取RGB图像
def openRGB(image_path):
f = open(image_path,"rb")
data = f.read()
f.close()
data = [int(x) for x in data]
data = np.array(data).reshape((256*256, 3)).astype(np.uint8)
return data
def entropy(X):
n = len(X)
counts = np.bincount(X)
probs = counts[np.nonzero(counts)] / n
en = 0
for i in range(len(probs)):
en = en - probs[i] * np.log(probs[i])/np.log(2)
return en
def getEntropy(image_path):
data =openRGB(image_path)
data_B = data[:, 0]
data_G = data[:, 1]
data_R = data[:, 2]
B = entropy(data_B)
G = entropy(data_G)
R = entropy(data_R)
return (R+B+G)/2;
However, whenever I run the getentropy() function on a given image it keeps giving back this error
ValueError: cannot reshape array of size 37048 into shape (65536,3)
Any idea how I can reformat the image to fit that array shape?
There is a simple explanation: image_path length in bytes is only 37048.
When using np.array(data).reshape((256*256, 3)), the length of data must be 256*256*3 = 196608 bytes.
You are getting an exception because the lengths do not match.
It's simple to reproduce the problem.
Create an input sample file in size 196608 bytes, and there is no exception.
Create an input sample file in size 37048 bytes, get an exception.
Here is a code sample that reproduces the problem:
import os
#from PIL import Image
import numpy as np
#from scipy.misc import imread
#import cv2
#import imageio
def openRGB(image_path):
f = open(image_path, "rb")
data = f.read()
f.close()
data = [int(x) for x in data]
data = np.array(data).reshape((256*256, 3)).astype(np.uint8)
return data
def entropy(X):
n = len(X)
counts = np.bincount(X)
probs = counts[np.nonzero(counts)] / n
en = 0
for i in range(len(probs)):
en = en - probs[i] * np.log(probs[i])/np.log(2)
return en
def getEntropy(image_path):
data = openRGB(image_path)
data_B = data[:, 0]
data_G = data[:, 1]
data_R = data[:, 2]
B = entropy(data_B)
G = entropy(data_G)
R = entropy(data_R)
return (R+B+G)/2
# Create a binary file with random bytes
image_path = 'tmp.bin'
# When n_bytes=196608, there is no exception.
################################################################################
n_bytes = 256*256*3
tmp = np.random.randint(0, 255, n_bytes, np.uint8) # Build random array of n_bytes bytes
with open(image_path, 'wb') as f:
f.write(tmp) # Write tmp to a binary file
file_size_in_bytes = os.path.getsize(image_path)
print('file_size_in_bytes = ' + str(file_size_in_bytes))
res = getEntropy(image_path)
print(res)
################################################################################
# When n_bytes=37048, an exception is raised: ValueError: cannot reshape array of size 37048 into shape (65536,3)
################################################################################
n_bytes = 37048
tmp = np.random.randint(0, 255, n_bytes, np.uint8) # Build random array of n_bytes bytes
with open(image_path, 'wb') as f:
f.write(tmp) # Write tmp to a binary file
file_size_in_bytes = os.path.getsize(image_path)
print('file_size_in_bytes = ' + str(file_size_in_bytes))
res = getEntropy(image_path)
print(res)
################################################################################
Why are you reading 37048 bytes instead of 196608 bytes?
image_path is a JPEG image file, and you are reading the image using f = open(image_path, "rb") and data = f.read().
You may read and reshape the image as follows:
import cv2
def openRGB(image_path):
# For example: image_path = 'img.jpg'
img = cv2.imread(image_path) # Read image in BGR color format.
data = np.array(img).reshape(img.shape[0]*img.shape[1], 3) # Reshape to rows x cols x 3 (Blue in data[:, 0], green in data[:, 1], red in data[:, 2]).
return data
In the above example I used img.shape[0]*img.shape[1], the image resolution is:
height = img.shape[0]
width = img.shape[1]
I was trying to use a voice emotion detecton model on github HERE. Based on their examples, I was able to implement the following code to predict the final emotion of an audio file as a single prediction. Looks like it makes sub-predictions for each 0.4s window in the audio file, and then takes the maximum occurance as the final output (here is the sample file I used).
How can I change it to print a prediction for every 1s chunk of the audio file (as opposed to a single value for the whole file)?
import numpy as np
import pandas as pd
import librosa
from tqdm import tqdm
from keras.utils import to_categorical
from keras.models import Sequential
from keras.layers import Conv2D, MaxPool2D, Flatten, Dropout, Dense
from sklearn.utils.class_weight import compute_class_weight
from sklearn.metrics import confusion_matrix
import seaborn as sns
import matplotlib.pyplot as plt
import pickle
# Create a configuration class to help if I want to change parameters later
class Config:
def __init__(self, n_mfcc = 26, n_feat = 13, n_fft = 552, sr = 22050, window = 0.4, test_shift = 0.1):
self.n_mfcc = n_mfcc
self.n_feat = n_feat
self.n_fft = n_fft
self.sr = sr
self.window = window
self.step = int(sr * window)
self.test_shift = test_shift
self.shift = int(sr * test_shift)
config = Config()
model = pickle.load(open('cnn_ep25_mfccOnly_moreData.pkl', 'rb'))
wav, sr = librosa.load('YAF_chain_angry.wav')
all_results = []
# Initialize a local results list
local_results = []
# Initialize min and max values for each file for scaling
_min, _max = float('inf'), -float('inf')
# Load the file
# Create an array to hold features for each window
X = []
# Iterate over sliding 0.4s windows of the audio file
for i in range(int((wav.shape[0]/sr-config.window)/config.test_shift)):
X_sample = wav[i*config.shift: i*config.shift + config.step] # slice out 0.4s window
X_mfccs = librosa.feature.mfcc(X_sample, sr, n_mfcc = config.n_mfcc, n_fft = config.n_fft,
hop_length = config.n_fft)[1:config.n_feat + 1] # generate mfccs from sample
_min = min(np.amin(X_mfccs), _min)
_max = max(np.amax(X_mfccs), _max) # check min and max values
X.append(X_mfccs) # add features of window to X
# Put window data into array, scale, then reshape
X = np.array(X)
X = (X - _min) / (_max - _min)
X = X.reshape(X.shape[0], X.shape[1], X.shape[2], 1)
# Feed data for each window into model for prediction
for i in range(X.shape[0]):
window = X[i].reshape(1, X.shape[1], X.shape[2], 1)
local_results.append(model.predict(window))
# Aggregate predictions for file into one then append to all_results
local_results = (np.sum(np.array(local_results), axis = 0)/len(local_results))[0]
local_results = list(local_results)
prediction = np.argmax(local_results)
# Turn all results into a dataframe
df_cols = ['neutral', 'happy', 'sad', 'angry', 'fearful', 'disgusted', 'surprised']
print(df_cols)
print(local_results)
print("Prediction: "+ df_cols[prediction])
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
data = pd.read_csv('fer2013.csv')
data.head()
face1 = np.fromstring(data['pixels'][0], dtype=int, sep=' ')
exp1 = np.zeros((48,48))
k = 0
for i in range(len(exp1)):
for j in range(len(exp1[0])):
exp1[i][j] = face1[k]
k = k + 1
imgplot = plt.imshow(exp1, cmap="Greys_r")
plt.show()
mpimg.imsave('save.png', exp1)
The images are 48 x 48 pixels represented as a string ("12 34 12 34 55 ... "). So the first value in the string corresponds to the first pixel value.
Hence, my question is: How do I convert the string of space separated pixel values to columns of features that I can use to train an SVM classifier with and why is the image not greyscale??? The training part I can do for myself.
There are 35887 training examples denoting 7 different expressions so i need an efficient way of doing this.
P.S. The problem originated from attempting Challenges in Representation Learning: Facial Expression Recognition Challenge (Kaggle.com)
You should show current attempts/ research you've done already to solve the problem when positing questions on SO.
You can load an image in Python easily using OpenCV, the result img is a numpy array, so you can just print it as a string e.g.
import numpy as np
import cv2
# Load image
img = cv2.imread('image.jpg',0)
print img
Update after question revision:
If you want to just convert the string of numbers to an image, you can use something like the following:
import numpy as np
image = '1 2 3 4 5 6'
image_width, image_height = 2, 3
result = np.fromstring(image, dtype=int, sep=" ").reshape((image_height, image_width))
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from sklearn import svm, metrics
#Read csv file
data = pd.read_csv('fer2013.csv')
#Number of samples
n_samples = len(data)
n_samples_train = 28709
n_samples_test = 3589
n_samples_validation = 3589
#Pixel width and height
w = 48
h = 48
#Separating labels and features respectively
y = data['emotion']
X = np.zeros((n_samples, w, h))
for i in range(n_samples):
X[i] = np.fromstring(data['pixels'][i], dtype=int, sep=' ').reshape(w, h)
#Training set
X_train = X[:n_samples_train].reshape(n_samples_train, -1)
y_train = y[:n_samples_train]
#Classifier
clf = svm.SVC(gamma=0.001, kernel='rbf', class_weight='balanced')
print('Training Classifier...')
clf.fit(X_train, y_train)
print('Done!!!')
#Testing set
X_test = X[n_samples_train : (n_samples_train + n_samples_test)].reshape(n_samples_test, -1)
y_test = y[n_samples_train : (n_samples_train + n_samples_test)]
#Prediction
expected = y_test
predicted = clf.predict(X_test)
#Results
print("Classification report for classifier %s:\n%s\n" % (clf, metrics.classification_report(expected, predicted)))
Here is my solution! Kindly let me know if certain things that can be done more efficiently. Thank you mark and tom for all your help.
import pandas as pd
dataset_path = './fer2013/fer2013.csv'
image_size=(48,48)
def load_fer2013():
data = pd.read_csv(dataset_path)
pixels = data['pixels'].tolist()
width, height = 48, 48
faces = []
for pixel_sequence in pixels:
face = [int(pixel) for pixel in pixel_sequence.split(' ')]
face = np.asarray(face).reshape(width, height)
face = cv2.resize(face.astype('uint8'),image_size)
faces.append(face.astype('float32'))
faces = np.asarray(faces)
faces = np.expand_dims(faces, -1)
emotions = pd.get_dummies(data['emotion']).as_matrix()
return faces, emotions
faces, emotions = load_fer2013()
xtrain, xtest,ytrain,ytest = train_test_split(faces, emotions,test_size=0.2,shuffle=True)
This code is very simple it loops over the the pixel rows in the csv file, return the int pixels separated by ' ' to face convert it to an np array then to an image with cv2 and stack all the faces in a list