I'm having trouble understanding what this bit of code means. Why is y.values compared to a tuple with two values, when the shape of the array is a single row (650,)?
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
df = pd.read_csv('readonly/mushrooms.csv')
df2 = pd.get_dummies(df)
df3 = df2.sample(frac=0.08)
X = df3.iloc[:,2:]
y = df3.iloc[:,1]
pca = PCA(n_components=2).fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(pca, y, random_state=0)
plt.figure(dpi=120)
plt.scatter(pca[y.values==0,0], pca[y.values==0,1], alpha=0.5, label='Edible', s=2) # < --- why is y.values compared to two values instead of one???
plt.scatter(pca[y.values==1,0], pca[y.values==1,1], alpha=0.5, label='Poisonous', s=2)
plt.legend()
plt.title('Mushroom Data Set\nFirst Two Principal Components')
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.gca().set_aspect('equal')
Actually y.values is a single row. In pca[y.values==0,0], it is obtaining the values which satisfies y.values==0 condition.
Please see the fit_transform documentation. It outputs a 2-dimensional array.
Related
I used the ellipticenvelope method to find the anomalies in the iris dataset as below:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
iris = load_iris()
cols = iris.feature_names
X = pd.DataFrame(iris.data, columns=cols)
X.head()
from sklearn.preprocessing import StandardScaler
from sklearn.covariance import EllipticEnvelope
scaler = StandardScaler()
scaler.fit_transform(X)
cov = EllipticEnvelope(store_precision=True,
assume_centered=True,
support_fraction=None,
contamination=0.01,
random_state=0)
cov.fit(X)
X['Anomaly'] = cov.predict(X)
Now you can find the anomalies in the last column with the value -1.
X[X['Anomaly'] == -1]
Now I want to do a root cause analysis to find the source of the anomaly, so I want to plot the anomalies in the boxplot with red dots for example. Is it possible or not? if yes, how can I add it?
X.boxplot(column=cols, grid=False, rot=45)
# code to plot anomalies on boxplot
plt.show()
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import fetch_openml
from sklearn.ensemble import RandomForestClassifier
from sklearn.impute import SimpleImputer
from sklearn.inspection import permutation_importance
from sklearn.compose import ColumnTransformer
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder
result = permutation_importance(rf,
X_test,
y_test,
n_repeats=10,
random_state=42,
n_jobs=2)
sorted_idx = result.importances_mean.argsort()
fig, ax = plt.subplots()
ax.boxplot(result.importances[sorted_idx].T,
vert=False,
labels=X_test.columns[sorted_idx])
ax.set_title("Permutation Importances (test set)")
fig.tight_layout()
plt.show()
In the code above, taken from this example in the documentation, is there a way to plot the top 3 features only instead of all the features?
argsort "returns the indices that would sort an array," so here sorted_idx contains the feature indices in order of least to most important. Since you just want the 3 most important features, take only the last 3 indices:
sorted_idx = result.importances_mean.argsort()[-3:]
# array([4, 0, 1])
Then the plotting code can remain as is, but now it will only plot the top 3 features:
# unchanged
fig, ax = plt.subplots(figsize=(6, 3))
ax.boxplot(result.importances[sorted_idx].T,
vert=False, labels=X_test.columns[sorted_idx])
ax.set_title("Permutation Importances (test set)")
fig.tight_layout()
plt.show()
Note that if you prefer to leave sorted_idx untouched (e.g., to use the full indices elsewhere in the code),
either change sorted_idx to sorted_idx[-3:] inline:
sorted_idx = result.importances_mean.argsort() # unchanged
ax.boxplot(result.importances[sorted_idx[-3:]].T, # replace sorted_idx with sorted_idx[-3:]
vert=False, labels=X_test.columns[sorted_idx[-3:]]) # replace sorted_idx with sorted_idx[-3:]
or store the filtered indices in a separate variable:
sorted_idx = result.importances_mean.argsort() # unchanged
top3_idx = sorted_idx[-3:] # store top 3 indices
ax.boxplot(result.importances[top3_idx].T, # replace sorted_idx with top3_idx
vert=False, labels=X_test.columns[top3_idx]) # replace sorted_idx with top3_idx
I run this code for polynomial regression using sklearn but my plot is not what i was expecting. As you can see here i'm not getting a smooth line but it's jumping from one point to another. From my understanding i have to sort X, but when i do that all i get is an empty plot with a linear line.
import operator
import numpy as np
from sklearn.cluster import KMeans
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.metrics import mean_squared_error, r2_score
import statsmodels.formula.api as smf
df = pd.read_csv('D:\Mall_Customers.csv', usecols = ['Age', 'Annual Income (k$)','Spending Score (1-100)'])
x = StandardScaler().fit_transform(df)
kmeans = KMeans(n_clusters=3, max_iter=100)
y_kmeans= kmeans.fit_predict(x)
mydict = {i: np.where(kmeans.labels_ == i)[0] for i in range(kmeans.n_clusters)}
dictlist = []
for key, value in mydict.items():
temp = [key,value]
dictlist.append(temp)
df0 = df[df.index.isin(mydict[0].tolist())]
X = df0[['Age', 'Annual Income (k$)']]
Y = df0['Spending Score (1-100)']
poly_reg = PolynomialFeatures(degree=2)
X_poly = poly_reg.fit_transform(X)
model = LinearRegression()
model.fit(X_poly, Y)
y_poly_pred = model.predict(X_poly)
r2 = r2_score(Y,y_poly_pred)
print(r2)
model = make_pipeline(PolynomialFeatures(degree=2), LinearRegression(fit_intercept = False))
model.fit(X,Y)
plt.scatter(X.iloc[:, 1], Y, color='red')
plt.plot(X, Y, color='blue')
plt.xlabel('Age. Annual income')
plt.ylabel('Spending Score')
plt.show()
TLDR; the data is not linear dependent.
The reason the graph got so messy is because you plotted the X (train data) with the Y (the actual prediction data) and the fact that you were plotting this data while:
the data was messy and not really linear dependent
is what made the result this messy graph.
I suggest you to:
split to the train data into train, test and then after you train the model check the error with the test and maybe create 2 plots, 1 with the model results according to the test data and one with the actual result for the test data.
and change plot code to this:
.
plt.scatter(X, Y)
plt.plot(X, Y_pred, color='red')
plt.show()
I have a set of data that I've been assigned to apply PCA and retain one component and then visualize the distribution in a scatter plot which indicates the class of each data point.
For context: The data we're working with has three columns. X is column 1 and 2 and y is column 3 which contains the class of each data point.
It was implied that the resulting visualization should be a horizontal line, but I'm not seeing that. The resulting visualization is a scatter plot that looks like a positive linear distribution.
import pandas as pd
import sklearn
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
df = pd.read_csv("data.csv", header=None)
X = df.iloc[:, 0:2].values
y = df.iloc[:,-1].values
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.3,random_state=np.random)
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
pcaObj1 = PCA(n_components=1)
X_train_PCA = pcaObj1.fit_transform(X_train)
X_test_PCA = pcaObj1.transform(X_test)
X_set, y_set = X_test_PCA, y_test
X3 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01))
X3 = np.array(X3)
plt.xlim(X3.min(), X3.max())
plt.ylim(X3.min(), X3.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 0],
c = ListedColormap(('purple', 'yellow'))(i), label = j)
I see that you have a test set in addition to a training set, however this not the usual setup for PCA. PCA has multiple applications, but one of the main ones is dimensionality reduction. Dimensionality reduction is about removing variables, and PCA serves this purpose by changing the basis of your data and ordering them by the amount (or relative amount) of the total variation that they linearly explain. Since this does not require test data, we can think of this as unsupervised machine learning, although many would also prefer to call this feature engineering as it is often used to preprocess data to improve the performance of models trained on that preprocessed data.
Let me generate a random dataset with 10 variables and 1000 entries for the sake of example. Fitting the PCA transform for 1 component, you're selecting a new variable (feature) that is a linear combination of the original variables that attempts to linearly explain the most variance in the data. As you say, it is a number line; just as a quick-and-easy plot let's just use the x-axis as the index of the new variable array and the y-axis as the value of the variable.
import matplotlib.pyplot as plt
import numpy as np
from sklearn.decomposition import PCA
X_train = np.random.random((1000, 10))
y_labels = np.array([0] * 500 + [1] * 500)
pcaObj1 = PCA(n_components=1)
X_PCA = pcaObj1.fit_transform(X_train)
plt.scatter(range(len(y_labels)), X_PCA, c=['red' if i==0 else 'green' for i in y_labels])
plt.show()
You can see this produces a 1000 x 1 array representing your new variable.
>>> X_PCA.shape
(1000, 1)
If you had selected n_components=2 instead, you'd have a 1000 x 2 array with two such variables. Let's see that as example. This time I'll plot the two principal components against each other instead of using a single principal component against its index.
import matplotlib.pyplot as plt
import numpy as np
from sklearn.decomposition import PCA
X_train = np.random.random((1000, 10))
y_labels = np.array([0] * 500 + [1] * 500)
pcaObj1 = PCA(n_components=2)
X_PCA = pcaObj1.fit_transform(X_train)
plt.scatter(X_PCA[:,0], X_PCA[:,1], c=['red' if i==0 else 'green' for i in y_labels])
plt.show()
Now, my randomly-generated data may not have the same properties as your data set. If you really expect the output to be a line, then I'd say certainly not as my example generates a very eratic trace. You'll see even in the 2D case that the data doesn't seem structured by class, but that's what you would expect from random data.
This example should give some clarity. Make sure you read all the comments so you can follow what's going on.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import urllib.request
import random
# seaborn is a layer on top of matplotlib which has additional visualizations -
# just importing it changes the look of the standard matplotlib plots.
# the current version also shows some warnings which we'll disable.
import seaborn as sns
sns.set(style="white", color_codes=True)
import warnings
warnings.filterwarnings("ignore")
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 0:4] # we take the first four features.
y = iris.target
print(X.sample(5))
print(y.sample(5))
# see how many samples we have of each species
data["species"].value_counts()
from sklearn import preprocessing
scaler = preprocessing.StandardScaler()
scaler.fit(X)
X_scaled_array = scaler.transform(X)
X_scaled = pd.DataFrame(X_scaled_array, columns = X.columns)
X_scaled.sample(5)
# try clustering on the 4d data and see if can reproduce the actual clusters.
# ie imagine we don't have the species labels on this data and wanted to
# divide the flowers into species. could set an arbitrary number of clusters
# and try dividing them up into similar clusters.
# we happen to know there are 3 species, so let's find 3 species and see
# if the predictions for each point matches the label in y.
from sklearn.cluster import KMeans
nclusters = 3 # this is the k in kmeans
seed = 0
km = KMeans(n_clusters=nclusters, random_state=seed)
km.fit(X_scaled)
# predict the cluster for each data point
y_cluster_kmeans = km.predict(X_scaled)
y_cluster_kmeans
# use seaborn to make scatter plot showing species for each sample
sns.FacetGrid(data, hue="species", size=4) \
.map(plt.scatter, "sepal_length", "sepal_width") \
.add_legend();
# do same for petals
sns.FacetGrid(data, hue="species", size=4) \
.map(plt.scatter, "petal_width", "sepal_width") \
.add_legend();
# if you have a lot of features it can be helpful to do some feature reduction
# to avoid the curse of dimensionality (i.e. needing exponentially more data
# to do accurate predictions as the number of features grows).
# you can do this with Principal Component Analysis (PCA), which remaps the data
# to a new (smaller) coordinate system which tries to account for the
# most information possible.
# you can *also* use PCA to visualize the data by reducing the
# features to 2 dimensions and making a scatterplot.
# it kind of mashes the data down into 2d, so can lose
# information - but in this case it's just going from 4d to 2d,
# so not losing too much info.
# so let's just use it to visualize the data...
# mash the data down into 2 dimensions
from sklearn.decomposition import PCA
ndimensions = 2
seed = 10
pca = PCA(n_components=ndimensions, random_state=seed)
pca.fit(X_scaled)
X_pca_array = pca.transform(X_scaled)
X_pca = pd.DataFrame(X_pca_array, columns=['PC1','PC2']) # PC=principal component
X_pca.sample(5)
# so that gives us new 2d coordinates for each data point.
# at this point, if you don't have labelled data,
# you can add the k-means cluster ids to this table and make a
# colored scatterplot.
# we do actually have labels for the data points, but let's imagine
# we don't, and use the predicted labels to see what the predictions look like.
# first, convert species to an arbitrary number
y_id_array = pd.Categorical(data['species']).codes
df_plot = X_pca.copy()
df_plot['ClusterKmeans'] = y_cluster_kmeans
df_plot['SpeciesId'] = y_id_array # also add actual labels so we can use it in later plots
df_plot.sample(5)
# so now we can make a 2d scatterplot of the clusters
# first define a plot fn
def plotData(df, groupby):
"make a scatterplot of the first two principal components of the data, colored by the groupby field"
# make a figure with just one subplot.
# you can specify multiple subplots in a figure,
# in which case ax would be an array of axes,
# but in this case it'll just be a single axis object.
fig, ax = plt.subplots(figsize = (7,7))
# color map
cmap = mpl.cm.get_cmap('prism')
# we can use pandas to plot each cluster on the same graph.
# see http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.plot.html
for i, cluster in df.groupby(groupby):
cluster.plot(ax = ax, # need to pass this so all scatterplots are on same graph
kind = 'scatter',
x = 'PC1', y = 'PC2',
color = cmap(i/(nclusters-1)), # cmap maps a number to a color
label = "%s %i" % (groupby, i),
s=30) # dot size
ax.grid()
ax.axhline(0, color='black')
ax.axvline(0, color='black')
ax.set_title("Principal Components Analysis (PCA) of Iris Dataset");
# plot the clusters each datapoint was assigned to
plotData(df_plot, 'ClusterKmeans')
I'm stuck solving this issue for two days now. I have some datapoints I put in a scatter plot and get this:
Which is nice, but now I also want to add a regression line, so I had a look at this example from sklearn and changed the code to this
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
degrees = [3, 4, 5]
X = combined[['WPI score']]
y = combined[['CPI score']]
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i], include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features), ("linear_regression", linear_regression)])
pipeline.fit(X, y)
# Evaluate the models using crossvalidation
scores = cross_val_score(pipeline, X, y, scoring="neg_mean_squared_error", cv=10)
X_test = X #np.linspace(0, 1, len(combined))
plt.plot(X, pipeline.predict(X_test), label="Model")
plt.scatter(X, y, label="CPI-WPI")
plt.xlabel("X")
plt.ylabel("y")
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(degrees[i], -scores.mean(), scores.std()))
plt.savefig(pic_path + 'multi.png', bbox_inches='tight')
plt.show()
which has the following output:
Note that X and y are both DataFrames of size (151, 1). I can post the content of X and y too, if necessary.
What I want is a nice smooth line, but I seem not to be able to figure out, how to do this.
[Edit]
The question here is: How do I get a single smooth, curvy polynomial line instead of multiple ones with seemingly random pattern.
[Edit 2]
The problem is, when I use the linspace like this:
X_test = np.linspace(1, 4, 151)
X_test = X_test[:, np.newaxis]
I get a even more random pattern:
The trick was to set the code like following:
X_test = np.linspace(min(X['GPI score']), max(X['GPI score']), X.shape[0])
X_test = X_test[:, np.newaxis]
plt.plot(X_test, pipeline.predict(X_test), label="Model")
Which yields the following result (a much nicer, single smooth line)