Iterate over an audio file with Python's librosa - python

I was trying to use a voice emotion detecton model on github HERE. Based on their examples, I was able to implement the following code to predict the final emotion of an audio file as a single prediction. Looks like it makes sub-predictions for each 0.4s window in the audio file, and then takes the maximum occurance as the final output (here is the sample file I used).
How can I change it to print a prediction for every 1s chunk of the audio file (as opposed to a single value for the whole file)?
import numpy as np
import pandas as pd
import librosa
from tqdm import tqdm
from keras.utils import to_categorical
from keras.models import Sequential
from keras.layers import Conv2D, MaxPool2D, Flatten, Dropout, Dense
from sklearn.utils.class_weight import compute_class_weight
from sklearn.metrics import confusion_matrix
import seaborn as sns
import matplotlib.pyplot as plt
import pickle
# Create a configuration class to help if I want to change parameters later
class Config:
def __init__(self, n_mfcc = 26, n_feat = 13, n_fft = 552, sr = 22050, window = 0.4, test_shift = 0.1):
self.n_mfcc = n_mfcc
self.n_feat = n_feat
self.n_fft = n_fft
self.sr = sr
self.window = window
self.step = int(sr * window)
self.test_shift = test_shift
self.shift = int(sr * test_shift)
config = Config()
model = pickle.load(open('cnn_ep25_mfccOnly_moreData.pkl', 'rb'))
wav, sr = librosa.load('YAF_chain_angry.wav')
all_results = []
# Initialize a local results list
local_results = []
# Initialize min and max values for each file for scaling
_min, _max = float('inf'), -float('inf')
# Load the file
# Create an array to hold features for each window
X = []
# Iterate over sliding 0.4s windows of the audio file
for i in range(int((wav.shape[0]/sr-config.window)/config.test_shift)):
X_sample = wav[i*config.shift: i*config.shift + config.step] # slice out 0.4s window
X_mfccs = librosa.feature.mfcc(X_sample, sr, n_mfcc = config.n_mfcc, n_fft = config.n_fft,
hop_length = config.n_fft)[1:config.n_feat + 1] # generate mfccs from sample
_min = min(np.amin(X_mfccs), _min)
_max = max(np.amax(X_mfccs), _max) # check min and max values
X.append(X_mfccs) # add features of window to X
# Put window data into array, scale, then reshape
X = np.array(X)
X = (X - _min) / (_max - _min)
X = X.reshape(X.shape[0], X.shape[1], X.shape[2], 1)
# Feed data for each window into model for prediction
for i in range(X.shape[0]):
window = X[i].reshape(1, X.shape[1], X.shape[2], 1)
local_results.append(model.predict(window))
# Aggregate predictions for file into one then append to all_results
local_results = (np.sum(np.array(local_results), axis = 0)/len(local_results))[0]
local_results = list(local_results)
prediction = np.argmax(local_results)
# Turn all results into a dataframe
df_cols = ['neutral', 'happy', 'sad', 'angry', 'fearful', 'disgusted', 'surprised']
print(df_cols)
print(local_results)
print("Prediction: "+ df_cols[prediction])

Related

normalizing mel spectrogram to unit peak amplitude?

I am new to both python and librosa. I am trying to follow this method for a speech recognizer: acoustic front end
My code:
import librosa
import librosa.display
import numpy as np
y, sr = librosa.load('test.wav', sr = None)
normalizedy = librosa.util.normalize(y)
stft = librosa.core.stft(normalizedy, n_fft = 256, hop_length=16)
mel = librosa.feature.melspectrogram(S=stft, n_mels=32)
melnormalized = librosa.util.normalize(mel)
mellog = np.log(melnormalized) - np.log(10**-5)
The problem is that when I apply librosa.util.normalize to variable mel, I expect values to be between 1 and -1, which they aren't. What am I missing?
If you want your output to be log-scaled and normalized to between -1 and +1, you should log-scale first, then normalize:
import librosa
import librosa.display
import numpy as np
y, sr = librosa.load('test.wav', sr = None)
normalizedy = librosa.util.normalize(y)
stft = librosa.core.stft(normalizedy, n_fft = 256, hop_length=16)
mel = librosa.feature.melspectrogram(S=stft, n_mels=32)
mellog = np.log(mel + 1e-9)
melnormalized = librosa.util.normalize(mellog)
# use melnormalized

Python: How to reshape an array based on image input?

I have a function which applies masking operation on the input images as follows:
file_names = glob(os.path.join(IMAGE_DIR, "*.jpg"))
masks_prediction = np.zeros((2000, 2000, len(file_names)))
for i in range(len(file_names)):
print(i)
image = skimage.io.imread(file_names[i])
predictions = model.detect([image], verbose=1)
p = predictions[0]
masks = p['masks']
merged_mask = np.zeros((masks.shape[0], masks.shape[1]))
for j in range(masks.shape[2]):
merged_mask[masks[:,:,j]==True] = True
masks_prediction[:,:,i] = merged_mask
print(masks_prediction.shape)
So basically it reads all the images from the directory, creates a mask for each and runs the detection.
However, since the images are of different sizes, it does not work:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-10-764e6229811a> in <module>()
10 for j in range(masks.shape[2]):
11 merged_mask[masks[:,:,j]==True] = True
---> 12 masks_prediction[:,:,i] = merged_mask
13 print(masks_prediction.shape)
ValueError: could not broadcast input array from shape (1518,1077) into shape (2000,2000)
I was thinking of a way to know the size of each image before the mask operation is applied (before line 12 in the error message), thus passing the exact image shape size correctly for the masking operation.
Is this somehow possible in Python?
EDIT: So apparently people somehow didn't get what I wanted to achieve - although I genuinely believe it was written in a very simple way. Nevertheless here is the entire code (copied from ipython notebook) where the function is located:
import os
import sys
import random
import math
import re
import time
import numpy as np
import tensorflow as tf
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import skimage.draw
# Root directory of the project
ROOT_DIR = os.path.abspath("../../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn import utils
from mrcnn import visualize
from mrcnn.visualize import display_images
import mrcnn.model as modellib
from mrcnn.model import log
from glob import glob
import components
%matplotlib inline
# Directories to be referred
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
IMAGE_DIR = os.path.join(ROOT_DIR, "datasets/components/back/predict")
ANNOTATION_DIR = os.path.join(ROOT_DIR, "datasets/components/front/")
WEIGHTS_PATH = os.path.join(ROOT_DIR, "logs/back/mask_rcnn_components_0100.h5")
config = components.ComponentsConfig()
# Override the training configurations with a few
# changes for inferencing.
class InferenceConfig(config.__class__):
# Run detection on one image at a time
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
config.display()
# Create model in inference mode
with tf.device(DEVICE):
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR,
config=config)
# Load weights
print("Loading weights ", WEIGHTS_PATH)
model.load_weights(WEIGHTS_PATH, by_name=True)
file_names = glob(os.path.join(IMAGE_DIR, "*.jpg"))
masks_prediction = np.zeros((2000, 2000, len(file_names)))
for i in range(len(file_names)):
print(i)
image = skimage.io.imread(file_names[i])
predictions = model.detect([image], verbose=1)
p = predictions[0]
masks = p['masks']
merged_mask = np.zeros((masks.shape[0], masks.shape[1]))
for j in range(masks.shape[2]):
merged_mask[masks[:,:,j]==True] = True
masks_prediction[:,:,i] = merged_mask
print(masks_prediction.shape)
dataset = components.ComponentsDataset()
dataset.load_components(ANNOTATION_DIR, "predict")
accuracy = 0
precision = 0
for image_id in range(len(dataset.image_info)):
name = dataset.image_info[image_id]['id']
file_name = os.path.join(IMAGE_DIR, name)
image_id_pred = file_names.index(file_name)
merged_mask = masks_prediction[:, :, image_id_pred]
annotated_mask = dataset.load_mask(image_id)[0]
merged_annotated_mask = np.zeros((510, 510))
for i in range(annotated_mask.shape[2]):
merged_annotated_mask[annotated_mask[:,:,i]==True] = True
accuracy += np.sum(merged_mask==merged_annotated_mask) / (1200 * 1600)
all_correct = np.sum(merged_annotated_mask[merged_mask == 1])
precision += all_correct / (np.sum(merged_mask))
print('accuracy:{}'.format(accuracy / len(file_names)))
print('precision:{}'.format(precision / len(file_names)))
file_names = glob(os.path.join(IMAGE_DIR, "*.jpg"))
class_names = ['BG', 'screw', 'lid']
test_image = skimage.io.imread(file_names[random.randint(0,len(file_names)-1)])
predictions = model.detect([test_image], verbose=1) # We are replicating the same image to fill up the batch_size
p = predictions[0]
visualize.display_instances(test_image, p['rois'], p['masks'], p['class_ids'],
class_names, p['scores'])
The image is just a numpy array. So to answer your question "is it possible to know the size of each image": Yes, simply use the shape of the image.
If you are working on many images of different sizes, it might make sense to resize them to a uniform resolution.
skimage has a built-in functionality for that, the skimage.transform.resize method.
Look at the docs here.
If you use resize, you should make sure that no artifacts are introduced to your images. Check the result of the resizing operation before you use it.
The resize of skimage is fairly slow. If you need more performance, you could use opencv. They have a great python API and since there is a conda package, installation has become really easy.
resized_images = []
file_names = glob(os.path.join(IMAGE_DIR, "*.jpg"))
for i in range(len(file_names)):
print("Resizing: " + str(i))
image = skimage.io.imread(file_names[i])
image_resized = resize(image, (1200, 800),anti_aliasing=True)
resized_images.append(image_resized)

How to fit cubic equation on pybrain

I am trying to fit a Neural Network on a cubic Equation, but after many tries changing the number of the neurons on the hidden layer and increasing the number of epochs I could only get this:
Could you guys help me on this?
from pybrain.datasets import SupervisedDataSet
from pybrain.tools.shortcuts import buildNetwork
from pybrain.structure import TanhLayer
from pybrain.supervised.trainers import BackpropTrainer
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Preparing Data
dataset = pd.read_csv("C:\Users\gugub\Documents\projects\Cyrus\graph.data", header = None)
darray = np.array(dataset)
x = []
y = []
ds = SupervisedDataSet(1,1)
#Preparing True inputs and True Outputs
for i in range(43):
e1 = darray[i,0]
s1 = darray[i,1]
x.append(e1)
y.append(s1)
print(x)
print(y)
ds = SupervisedDataSet(1,1)
i = 0
for i in range(43):
ds.addSample(x[i],y[i])
print(ds)
net = buildNetwork(ds.indim,30,ds.outdim,recurrent=True)
trainer = BackpropTrainer(net,learningrate=0.01,verbose=True)
trainer.trainOnDataset(ds,20000)
trainer.testOnData(verbose=True)
y1 = []
i = 0
for i in x:
y1.append(net.activate(i))
plt.plot(x,y,'r')
plt.plot(x,y1,'b')
plt.show()
while True:
e2 = int(raw_input(">"))
s2 = [e2]
print(net.activate(s2))
P.S The red line on the graph is what it should be and the blue one is the function generated by my network

Stocking different types of data into an 2D numpy array

I would like to know how I can store different data into a numpy array, in order to feed it to a machine Learning SVC algorithm.
My goal, is to get a dataframe of size (sample * features) like this:
With:
Feature 1 in gray containing list of size n
Feature 2 in red, containing 2D numpy array of shape (i,k)
Feature ... Something else (array for pwelch spectrum, integers, float, ...)
Feature n in blue, containing integer.
How can I do that in Python ? Is this going to be ok for sklearn ?
Here is the current error from the code bellow:
ValueError: setting an array element with a sequence.
Code:
# -*- coding: utf-8 -*-
"""----------------------------------------------------------------------------
-------------------------------- Imports --------------------------------------
----------------------------------------------------------------------------"""
import os
import pandas as pd
import numpy as np
from scipy import io as sio
from scipy import signal
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
"""----------------------------------------------------------------------------
------------------------------ Parameters -------------------------------------
----------------------------------------------------------------------------"""
# Path to the clean EEG .mat files
EEG_path = "data"
# Listing of the .mat files
EEG = list()
for elt in os.listdir(EEG_path):
if os.path.isfile(os.path.join(EEG_path, elt)):
if '.mat' in elt[len(elt)-4:]:
EEG.append(elt)
# Spectrum used
spectrum = ['all', (1,45), (8,12)]
nb_features = 3
"""----------------------------------------------------------------------------
------------------------------ Functions --------------------------------------
----------------------------------------------------------------------------"""
# Function on 1 channel
# Input: All points from one channel, for one epoch
def filter(x, n, fs, fc1, fc2):
b, a = signal.butter(n, [fc1/(fs/2), fc2/(fs/2)], 'bandpass')
y = signal.filtfilt(b, a, x)
return y
def haming(x, L):
# Symetric L-points hamming window
window = signal.hamming(L)
y = x * window.T # Element wise multiplication
return y
# Function on one epoch
# Input is a matrix of size (channel * length)
def amp_mean(x):
size = x.shape
y = list()
for i in range(size[0]):
y.append(np.mean(x[i,:]))
return y
def amp_max(x):
size = x.shape
y = list()
for i in range(size[0]):
y.append(np.max(abs(x[i,:])))
return y
"""----------------------------------------------------------------------------
-------------------------------- Script ---------------------------------------
----------------------------------------------------------------------------"""
# Load data
s_EEG = "{}/{}".format(EEG_path, EEG[4])
data = sio.loadmat(s_EEG)['s_EEG']['data'][0][0].astype(float) # data[i, j ,k]
labels = sio.loadmat(s_EEG)['s_EEG']['labels'][0][0][0] # labels[k]
fs = sio.loadmat(s_EEG)['s_EEG']['sampling_rate'][0][0][0][0] # 500 Hz
size = data.shape
# Creates an empty data frame of size (epoch * features)
df = np.empty(shape = (size[2], nb_features * len(spectrum)))
# Filling the dataframe with features
# for every epoch
for k in range(size[2]):
for freq in spectrum:
data_to_compute = np.empty(shape = size, dtype = float)
# Apply hamming
if freq == 'all':
for i in range(size[0]):
data_to_compute[i,:,k] = haming(data[i,:,k], size[1])
# Apply hamming after filtering
else:
for i in range(size[0]):
data_to_compute[i,:,k] = haming(filter(data[i,:,k],
15, fs, freq[0], freq[1]), size[1])
# data_to_compute is ready to have feature extracted
for n in range(0, df.shape[1], nb_features):
df[k, n] = data_to_compute[:,:,k]
df[k, n+1] = amp_mean(data_to_compute[:,:,k])
df[k, n+2] = amp_max(data_to_compute[:,:,k])
# X signal / Y label
X_train, X_test, Y_train, Y_test = train_test_split(data,
list(labels),
test_size=0.15,
random_state=42)
clf = SVC()
clf.fit(X_train, Y_train)
Variable type:
Thanks !

Why CIFAR-10 images are not displayed properly using matplotlib?

From the training set I took a image('img') of size (3,32,32).
I have used plt.imshow(img.T). The image is not clear. Now changes I have to make to image('img') to make it more clearly visible.
Thanks.
Following prints 5X5 grid of random Cifar10 images. It isn't blurry, though not perfect either. Any suggestions welcome.
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from six.moves import cPickle
f = open('data/cifar10/cifar-10-batches-py/data_batch_1', 'rb')
datadict = cPickle.load(f,encoding='latin1')
f.close()
X = datadict["data"]
Y = datadict['labels']
X = X.reshape(10000, 3, 32, 32).transpose(0,2,3,1).astype("uint8")
Y = np.array(Y)
#Visualizing CIFAR 10
fig, axes1 = plt.subplots(5,5,figsize=(3,3))
for j in range(5):
for k in range(5):
i = np.random.choice(range(len(X)))
axes1[j][k].set_axis_off()
axes1[j][k].imshow(X[i:i+1][0])
Make sure you don't normalize your dataset when you want to display the image.
Example :
The loader...
import torch
from torchvision import datasets, transforms
import matplotlib.pyplot as plt
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('../data', train=True, download=True,
transform=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
# transforms.Normalize(
# (0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261))
])),
batch_size=64, shuffle=True)
The code that shows the image...
img = next(iter(train_loader))[0][0]
plt.imshow(transforms.ToPILImage()(img))
Normalized
Wihtout normalization
This file reads the cifar10 dataset and plots individual images using matplotlib.
import _pickle as pickle
import argparse
import numpy as np
import os
import matplotlib.pyplot as plt
cifar10 = "./cifar-10-batches-py/"
parser = argparse.ArgumentParser("Plot training images in cifar10 dataset")
parser.add_argument("-i", "--image", type=int, default=0,
help="Index of the image in cifar10. In range [0, 49999]")
args = parser.parse_args()
def unpickle(file):
with open(file, 'rb') as fo:
data = pickle.load(fo, encoding='bytes')
return data
def cifar10_plot(data, meta, im_idx=0):
im = data[b'data'][im_idx, :]
im_r = im[0:1024].reshape(32, 32)
im_g = im[1024:2048].reshape(32, 32)
im_b = im[2048:].reshape(32, 32)
img = np.dstack((im_r, im_g, im_b))
print("shape: ", img.shape)
print("label: ", data[b'labels'][im_idx])
print("category:", meta[b'label_names'][data[b'labels'][im_idx]])
plt.imshow(img)
plt.show()
def main():
batch = (args.image // 10000) + 1
idx = args.image - (batch-1)*10000
data = unpickle(os.path.join(cifar10, "data_batch_" + str(batch)))
meta = unpickle(os.path.join(cifar10, "batches.meta"))
cifar10_plot(data, meta, im_idx=idx)
if __name__ == "__main__":
main()
The image is blurry due to interpolation. To prevent blurring in matplotlib, call imshow with keyword interpolation='nearest':
plt.imshow(img.T, interpolation='nearest')
Also, it appears that your x and y axes are being swapped when you use the transpose so you may want to display like this instead:
plt.imshow(np.transpose(img, (1, 2, 0)), interpolation='nearest')
I have used the following code to show all CIFAR data as one big image. The code show the image, but if you want to save it and not be blurtry i sugest using plt.savefig(fname, format='png', dpi=1000)
import numpy as np
import matplotlib.pyplot as plt
def reshape_and_print(self, cifar_data):
# number of images in rows and columns
rows = cols = np.sqrt(cifar_data.shape[0]).astype(np.int32)
# Image hight and width. Divide by 3 because of 3 color channels
imh = imw = np.sqrt(cifar_data.shape[1] // 3).astype(np.int32)
# reshape to number of images X color channels X image size
# transpose to color channels X number of images X image size
timg = cifar_data.reshape(rows * cols, 3, imh * imh).transpose(1, 0, 2)
# reshape to color channels X rows X cols X image hight X image with
# swap axis to color channels X rows X image hight X cols X image with
timg = timg.reshape(3, rows, cols, imh, imw).swapaxes(2, 3)
# reshape to color channels X combined image hight X combined image with
# transpose to combined image hight X combined image with X color channels
timg = timg.reshape(3, rows * imh, cols * imw).transpose(1, 2, 0)
plt.imshow(timg)
plt.show()
I made a quick data helper class that i used for a small test project, I hope is can be useful:
import gzip
import pickle
import numpy as np
import matplotlib.pyplot as plt
class DataSet(object):
def __init__(self, seed=42, setsize=10000):
self.seed = seed
# set the seed for reproducability
np.random.seed(seed)
# load the data
train_set, test_set = self.load_data()
# self.split_data(train_set, valid_set, test_set)
self.split_data(train_set, test_set, setsize)
def split_data(self, data_set, test_set, split_size):
permutation = np.random.permutation(data_set.shape[0])
self.train = data_set[permutation[:split_size]]
self.valid = data_set[permutation[split_size:split_size * 2]]
self.test = test_set[:split_size]
def reshape_for_print(self, data):
raise NotImplemented
def load_data(self):
raise NotImplemented
def show_all_imgs(self, data):
raise NotImplemented
class CIFAR(DataSet):
def load_data(self):
# try to load data
with open('./data/cifar-100-python/train', 'rb') as f:
data = pickle.load(f, encoding='latin1')
train_set = data['data'].astype(np.float32) / 255.0
with open('./data/cifar-100-python/test', 'rb') as f:
data = pickle.load(f, encoding='latin1')
test_set = data['data'].astype(np.float32) / 255.0
return train_set, test_set
def reshape_for_print(self, data):
gh = gw = np.sqrt(data.shape[0]).astype(np.int32)
imh = imw = np.sqrt(data.shape[1] // 3).astype(np.int32)
timg = data.reshape(gh * gw, 3, imh * imh).transpose(1, 0, 2)
timg = timg.reshape(3, gh, gw, imh, imw).swapaxes(2, 3)
timg = timg.reshape(3, gh * imh, gw * imw).transpose(1, 2, 0)
return timg
def show_all_imgs(self, data):
timg = self.reshape_for_print(data)
plt.imshow(timg)
plt.show()
class MNIST(DataSet):
def load_data(self):
# try to load data
with gzip.open('./data/mnist.pkl.gz', 'rb') as f:
train_set, valid_set, test_set = pickle.load(f, encoding='latin1')
return train_set[0], test_set[0]
def reshape_for_print(self, data):
gh = gw = np.sqrt(data.shape[0]).astype(np.int32)
imh = imw = np.sqrt(data.shape[1]).astype(np.int32)
timg = data.reshape(gh, gw, imh, imw).swapaxes(1, 2)
timg = timg.reshape(gh * imh, gw * imw)
return timg
def show_all_imgs(self, data):
timg = self.reshape_for_print(data)
plt.imshow(timg, cmap=plt.cm.gray)
plt.show()
I made a function to plot the RGB image from a row in the CIFAR10 dataset.The image will be blurry at best since the original size of the image is very small (32px X 32px).
def unpickle(file):
with open(file, 'rb') as fo:
dict1 = pickle.load(fo, encoding='bytes')
return dict1
pd_tr = pd.DataFrame()
tr_y = pd.DataFrame()
for i in range(1,6):
data = unpickle('data/data_batch_' + str(i))
pd_tr = pd_tr.append(pd.DataFrame(data[b'data']))
tr_y = tr_y.append(pd.DataFrame(data[b'labels']))
pd_tr['labels'] = tr_y
tr_x = np.asarray(pd_tr.iloc[:, :3072])
tr_y = np.asarray(pd_tr['labels'])
ts_x = np.asarray(unpickle('data/test_batch')[b'data'])
ts_y = np.asarray(unpickle('data/test_batch')[b'labels'])
labels = unpickle('data/batches.meta')[b'label_names']
def plot_CIFAR(ind):
arr = tr_x[ind]
sc_dpi = 157.35
R = arr[0:1024].reshape(32,32)/255.0
G = arr[1024:2048].reshape(32,32)/255.0
B = arr[2048:].reshape(32,32)/255.0
img = np.dstack((R,G,B))
title = re.sub('[!##$b]', '', str(labels[tr_y[ind]]))
fig = plt.figure(figsize=(3,3))
ax = fig.add_subplot(111)
ax.imshow(img,interpolation='bicubic')
ax.set_title('Category = '+ title,fontsize =15)
plot_CIFAR(4)
try using
import matplotlib.pyplot as plt
from scipy.misc import toimage
plt.imshow(toimage(img))
I am not 100% sure of how the code works, but I think that because the images are stored in floating point numpy arrays, the imshow() function has a difficult time mapping them to the right colors. By typecasting them to image using toimage() you convert them into proper image format that imshow() expects, i.e not an array but an image encoded as .png or .jpg.
This code works for me every time I want to display images in python.
code result is: Try below code.
I found a very useful link about visualization of mnist and cifar images. You can find codes for various images :
https://machinelearningmastery.com/how-to-load-and-visualize-standard-computer-vision-datasets-with-keras/
cifar10 image code is below:
It works well. Image is above.
# example of loading the cifar10 dataset
from matplotlib import pyplot
from keras.datasets import cifar10
# load dataset
(trainX, trainy), (testX, testy) = cifar10.load_data()
# summarize loaded dataset
print('Train: X=%s, y=%s' % (trainX.shape, trainy.shape))
print('Test: X=%s, y=%s' % (testX.shape, testy.shape))
# plot first few images
for i in range(9):
# define subplot
pyplot.subplot(330 + 1 + i)
# plot raw pixel data
pyplot.imshow(trainX[i])
# show the figure
pyplot.show()
Add 0.5:
plt.imshow(np.transpose(img, (1, 2, 0)) + 0.5)

Categories