I'm trying to draw a complete-link scipy.cluster.hierarchy.dendrogram, and I found that scipy.cluster.hierarchy.linkage is slower than sklearn.AgglomerativeClustering.
However, sklearn.AgglomerativeClustering doesn't return the distance between clusters and the number of original observations, which scipy.cluster.hierarchy.dendrogram needs. Is there a way to take them?
It's possible, but it isn't pretty. It requires (at a minimum) a small rewrite of AgglomerativeClustering.fit (source). The difficulty is that the method requires a number of imports, so it ends up getting a bit nasty looking. To add in this feature:
Insert the following line after line 748:
kwargs['return_distance'] = True
Replace line 752 with:
self.children_, self.n_components_, self.n_leaves_, parents, self.distance = \
This will give you a new attribute, distance, that you can easily call.
A couple things to note:
When doing this, I ran into this issue about the check_array function on line 711. This can be fixed by using check_arrays (from sklearn.utils.validation import check_arrays). You can modify that line to become X = check_arrays(X)[0]. This appears to be a bug (I still have this issue on the most recent version of scikit-learn).
Depending on which version of sklearn.cluster.hierarchical.linkage_tree you have, you may also need to modify it to be the one provided in the source.
To make things easier for everyone, here is the full code that you will need to use:
from heapq import heapify, heappop, heappush, heappushpop
import warnings
import sys
import numpy as np
from scipy import sparse
from sklearn.base import BaseEstimator, ClusterMixin
from sklearn.externals.joblib import Memory
from sklearn.externals import six
from sklearn.utils.validation import check_arrays
from sklearn.utils.sparsetools import connected_components
from sklearn.cluster import _hierarchical
from sklearn.cluster.hierarchical import ward_tree
from sklearn.cluster._feature_agglomeration import AgglomerationTransform
from sklearn.utils.fast_dict import IntFloatDict
def _fix_connectivity(X, connectivity, n_components=None,
affinity="euclidean"):
"""
Fixes the connectivity matrix
- copies it
- makes it symmetric
- converts it to LIL if necessary
- completes it if necessary
"""
n_samples = X.shape[0]
if (connectivity.shape[0] != n_samples or
connectivity.shape[1] != n_samples):
raise ValueError('Wrong shape for connectivity matrix: %s '
'when X is %s' % (connectivity.shape, X.shape))
# Make the connectivity matrix symmetric:
connectivity = connectivity + connectivity.T
# Convert connectivity matrix to LIL
if not sparse.isspmatrix_lil(connectivity):
if not sparse.isspmatrix(connectivity):
connectivity = sparse.lil_matrix(connectivity)
else:
connectivity = connectivity.tolil()
# Compute the number of nodes
n_components, labels = connected_components(connectivity)
if n_components > 1:
warnings.warn("the number of connected components of the "
"connectivity matrix is %d > 1. Completing it to avoid "
"stopping the tree early." % n_components,
stacklevel=2)
# XXX: Can we do without completing the matrix?
for i in xrange(n_components):
idx_i = np.where(labels == i)[0]
Xi = X[idx_i]
for j in xrange(i):
idx_j = np.where(labels == j)[0]
Xj = X[idx_j]
D = pairwise_distances(Xi, Xj, metric=affinity)
ii, jj = np.where(D == np.min(D))
ii = ii[0]
jj = jj[0]
connectivity[idx_i[ii], idx_j[jj]] = True
connectivity[idx_j[jj], idx_i[ii]] = True
return connectivity, n_components
# average and complete linkage
def linkage_tree(X, connectivity=None, n_components=None,
n_clusters=None, linkage='complete', affinity="euclidean",
return_distance=False):
"""Linkage agglomerative clustering based on a Feature matrix.
The inertia matrix uses a Heapq-based representation.
This is the structured version, that takes into account some topological
structure between samples.
Parameters
----------
X : array, shape (n_samples, n_features)
feature matrix representing n_samples samples to be clustered
connectivity : sparse matrix (optional).
connectivity matrix. Defines for each sample the neighboring samples
following a given structure of the data. The matrix is assumed to
be symmetric and only the upper triangular half is used.
Default is None, i.e, the Ward algorithm is unstructured.
n_components : int (optional)
Number of connected components. If None the number of connected
components is estimated from the connectivity matrix.
NOTE: This parameter is now directly determined directly
from the connectivity matrix and will be removed in 0.18
n_clusters : int (optional)
Stop early the construction of the tree at n_clusters. This is
useful to decrease computation time if the number of clusters is
not small compared to the number of samples. In this case, the
complete tree is not computed, thus the 'children' output is of
limited use, and the 'parents' output should rather be used.
This option is valid only when specifying a connectivity matrix.
linkage : {"average", "complete"}, optional, default: "complete"
Which linkage critera to use. The linkage criterion determines which
distance to use between sets of observation.
- average uses the average of the distances of each observation of
the two sets
- complete or maximum linkage uses the maximum distances between
all observations of the two sets.
affinity : string or callable, optional, default: "euclidean".
which metric to use. Can be "euclidean", "manhattan", or any
distance know to paired distance (see metric.pairwise)
return_distance : bool, default False
whether or not to return the distances between the clusters.
Returns
-------
children : 2D array, shape (n_nodes-1, 2)
The children of each non-leaf node. Values less than `n_samples`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_samples` is a non-leaf
node and has children `children_[i - n_samples]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_samples + i`
n_components : int
The number of connected components in the graph.
n_leaves : int
The number of leaves in the tree.
parents : 1D array, shape (n_nodes, ) or None
The parent of each node. Only returned when a connectivity matrix
is specified, elsewhere 'None' is returned.
distances : ndarray, shape (n_nodes-1,)
Returned when return_distance is set to True.
distances[i] refers to the distance between children[i][0] and
children[i][1] when they are merged.
See also
--------
ward_tree : hierarchical clustering with ward linkage
"""
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (-1, 1))
n_samples, n_features = X.shape
linkage_choices = {'complete': _hierarchical.max_merge,
'average': _hierarchical.average_merge,
}
try:
join_func = linkage_choices[linkage]
except KeyError:
raise ValueError(
'Unknown linkage option, linkage should be one '
'of %s, but %s was given' % (linkage_choices.keys(), linkage))
if connectivity is None:
from scipy.cluster import hierarchy # imports PIL
if n_clusters is not None:
warnings.warn('Partial build of the tree is implemented '
'only for structured clustering (i.e. with '
'explicit connectivity). The algorithm '
'will build the full tree and only '
'retain the lower branches required '
'for the specified number of clusters',
stacklevel=2)
if affinity == 'precomputed':
# for the linkage function of hierarchy to work on precomputed
# data, provide as first argument an ndarray of the shape returned
# by pdist: it is a flat array containing the upper triangular of
# the distance matrix.
i, j = np.triu_indices(X.shape[0], k=1)
X = X[i, j]
elif affinity == 'l2':
# Translate to something understood by scipy
affinity = 'euclidean'
elif affinity in ('l1', 'manhattan'):
affinity = 'cityblock'
elif callable(affinity):
X = affinity(X)
i, j = np.triu_indices(X.shape[0], k=1)
X = X[i, j]
out = hierarchy.linkage(X, method=linkage, metric=affinity)
children_ = out[:, :2].astype(np.int)
if return_distance:
distances = out[:, 2]
return children_, 1, n_samples, None, distances
return children_, 1, n_samples, None
if n_components is not None:
warnings.warn(
"n_components is now directly calculated from the connectivity "
"matrix and will be removed in 0.18",
DeprecationWarning)
connectivity, n_components = _fix_connectivity(X, connectivity)
connectivity = connectivity.tocoo()
# Put the diagonal to zero
diag_mask = (connectivity.row != connectivity.col)
connectivity.row = connectivity.row[diag_mask]
connectivity.col = connectivity.col[diag_mask]
connectivity.data = connectivity.data[diag_mask]
del diag_mask
if affinity == 'precomputed':
distances = X[connectivity.row, connectivity.col]
else:
# FIXME We compute all the distances, while we could have only computed
# the "interesting" distances
distances = paired_distances(X[connectivity.row],
X[connectivity.col],
metric=affinity)
connectivity.data = distances
if n_clusters is None:
n_nodes = 2 * n_samples - 1
else:
assert n_clusters <= n_samples
n_nodes = 2 * n_samples - n_clusters
if return_distance:
distances = np.empty(n_nodes - n_samples)
# create inertia heap and connection matrix
A = np.empty(n_nodes, dtype=object)
inertia = list()
# LIL seems to the best format to access the rows quickly,
# without the numpy overhead of slicing CSR indices and data.
connectivity = connectivity.tolil()
# We are storing the graph in a list of IntFloatDict
for ind, (data, row) in enumerate(zip(connectivity.data,
connectivity.rows)):
A[ind] = IntFloatDict(np.asarray(row, dtype=np.intp),
np.asarray(data, dtype=np.float64))
# We keep only the upper triangular for the heap
# Generator expressions are faster than arrays on the following
inertia.extend(_hierarchical.WeightedEdge(d, ind, r)
for r, d in zip(row, data) if r < ind)
del connectivity
heapify(inertia)
# prepare the main fields
parent = np.arange(n_nodes, dtype=np.intp)
used_node = np.ones(n_nodes, dtype=np.intp)
children = []
# recursive merge loop
for k in xrange(n_samples, n_nodes):
# identify the merge
while True:
edge = heappop(inertia)
if used_node[edge.a] and used_node[edge.b]:
break
i = edge.a
j = edge.b
if return_distance:
# store distances
distances[k - n_samples] = edge.weight
parent[i] = parent[j] = k
children.append((i, j))
# Keep track of the number of elements per cluster
n_i = used_node[i]
n_j = used_node[j]
used_node[k] = n_i + n_j
used_node[i] = used_node[j] = False
# update the structure matrix A and the inertia matrix
# a clever 'min', or 'max' operation between A[i] and A[j]
coord_col = join_func(A[i], A[j], used_node, n_i, n_j)
for l, d in coord_col:
A[l].append(k, d)
# Here we use the information from coord_col (containing the
# distances) to update the heap
heappush(inertia, _hierarchical.WeightedEdge(d, k, l))
A[k] = coord_col
# Clear A[i] and A[j] to save memory
A[i] = A[j] = 0
# Separate leaves in children (empty lists up to now)
n_leaves = n_samples
# # return numpy array for efficient caching
children = np.array(children)[:, ::-1]
if return_distance:
return children, n_components, n_leaves, parent, distances
return children, n_components, n_leaves, parent
# Matching names to tree-building strategies
def _complete_linkage(*args, **kwargs):
kwargs['linkage'] = 'complete'
return linkage_tree(*args, **kwargs)
def _average_linkage(*args, **kwargs):
kwargs['linkage'] = 'average'
return linkage_tree(*args, **kwargs)
_TREE_BUILDERS = dict(
ward=ward_tree,
complete=_complete_linkage,
average=_average_linkage,
)
def _hc_cut(n_clusters, children, n_leaves):
"""Function cutting the ward tree for a given number of clusters.
Parameters
----------
n_clusters : int or ndarray
The number of clusters to form.
children : list of pairs. Length of n_nodes
The children of each non-leaf node. Values less than `n_samples` refer
to leaves of the tree. A greater value `i` indicates a node with
children `children[i - n_samples]`.
n_leaves : int
Number of leaves of the tree.
Returns
-------
labels : array [n_samples]
cluster labels for each point
"""
if n_clusters > n_leaves:
raise ValueError('Cannot extract more clusters than samples: '
'%s clusters where given for a tree with %s leaves.'
% (n_clusters, n_leaves))
# In this function, we store nodes as a heap to avoid recomputing
# the max of the nodes: the first element is always the smallest
# We use negated indices as heaps work on smallest elements, and we
# are interested in largest elements
# children[-1] is the root of the tree
nodes = [-(max(children[-1]) + 1)]
for i in xrange(n_clusters - 1):
# As we have a heap, nodes[0] is the smallest element
these_children = children[-nodes[0] - n_leaves]
# Insert the 2 children and remove the largest node
heappush(nodes, -these_children[0])
heappushpop(nodes, -these_children[1])
label = np.zeros(n_leaves, dtype=np.intp)
for i, node in enumerate(nodes):
label[_hierarchical._hc_get_descendent(-node, children, n_leaves)] = i
return label
class AgglomerativeClustering(BaseEstimator, ClusterMixin):
"""
Agglomerative Clustering
Recursively merges the pair of clusters that minimally increases
a given linkage distance.
Parameters
----------
n_clusters : int, default=2
The number of clusters to find.
connectivity : array-like or callable, optional
Connectivity matrix. Defines for each sample the neighboring
samples following a given structure of the data.
This can be a connectivity matrix itself or a callable that transforms
the data into a connectivity matrix, such as derived from
kneighbors_graph. Default is None, i.e, the
hierarchical clustering algorithm is unstructured.
affinity : string or callable, default: "euclidean"
Metric used to compute the linkage. Can be "euclidean", "l1", "l2",
"manhattan", "cosine", or 'precomputed'.
If linkage is "ward", only "euclidean" is accepted.
memory : Instance of joblib.Memory or string (optional)
Used to cache the output of the computation of the tree.
By default, no caching is done. If a string is given, it is the
path to the caching directory.
n_components : int (optional)
Number of connected components. If None the number of connected
components is estimated from the connectivity matrix.
NOTE: This parameter is now directly determined from the connectivity
matrix and will be removed in 0.18
compute_full_tree : bool or 'auto' (optional)
Stop early the construction of the tree at n_clusters. This is
useful to decrease computation time if the number of clusters is
not small compared to the number of samples. This option is
useful only when specifying a connectivity matrix. Note also that
when varying the number of clusters and using caching, it may
be advantageous to compute the full tree.
linkage : {"ward", "complete", "average"}, optional, default: "ward"
Which linkage criterion to use. The linkage criterion determines which
distance to use between sets of observation. The algorithm will merge
the pairs of cluster that minimize this criterion.
- ward minimizes the variance of the clusters being merged.
- average uses the average of the distances of each observation of
the two sets.
- complete or maximum linkage uses the maximum distances between
all observations of the two sets.
pooling_func : callable, default=np.mean
This combines the values of agglomerated features into a single
value, and should accept an array of shape [M, N] and the keyword
argument ``axis=1``, and reduce it to an array of size [M].
Attributes
----------
labels_ : array [n_samples]
cluster labels for each point
n_leaves_ : int
Number of leaves in the hierarchical tree.
n_components_ : int
The estimated number of connected components in the graph.
children_ : array-like, shape (n_nodes-1, 2)
The children of each non-leaf node. Values less than `n_samples`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_samples` is a non-leaf
node and has children `children_[i - n_samples]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_samples + i`
"""
def __init__(self, n_clusters=2, affinity="euclidean",
memory=Memory(cachedir=None, verbose=0),
connectivity=None, n_components=None,
compute_full_tree='auto', linkage='ward',
pooling_func=np.mean):
self.n_clusters = n_clusters
self.memory = memory
self.n_components = n_components
self.connectivity = connectivity
self.compute_full_tree = compute_full_tree
self.linkage = linkage
self.affinity = affinity
self.pooling_func = pooling_func
def fit(self, X, y=None):
"""Fit the hierarchical clustering on the data
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The samples a.k.a. observations.
Returns
-------
self
"""
X = check_arrays(X)[0]
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory, verbose=0)
if self.linkage == "ward" and self.affinity != "euclidean":
raise ValueError("%s was provided as affinity. Ward can only "
"work with euclidean distances." %
(self.affinity, ))
if self.linkage not in _TREE_BUILDERS:
raise ValueError("Unknown linkage type %s."
"Valid options are %s" % (self.linkage,
_TREE_BUILDERS.keys()))
tree_builder = _TREE_BUILDERS[self.linkage]
connectivity = self.connectivity
if self.connectivity is not None:
if callable(self.connectivity):
connectivity = self.connectivity(X)
connectivity = check_arrays(
connectivity, accept_sparse=['csr', 'coo', 'lil'])
n_samples = len(X)
compute_full_tree = self.compute_full_tree
if self.connectivity is None:
compute_full_tree = True
if compute_full_tree == 'auto':
# Early stopping is likely to give a speed up only for
# a large number of clusters. The actual threshold
# implemented here is heuristic
compute_full_tree = self.n_clusters < max(100, .02 * n_samples)
n_clusters = self.n_clusters
if compute_full_tree:
n_clusters = None
# Construct the tree
kwargs = {}
kwargs['return_distance'] = True
if self.linkage != 'ward':
kwargs['linkage'] = self.linkage
kwargs['affinity'] = self.affinity
self.children_, self.n_components_, self.n_leaves_, parents, \
self.distance = memory.cache(tree_builder)(X, connectivity,
n_components=self.n_components,
n_clusters=n_clusters,
**kwargs)
# Cut the tree
if compute_full_tree:
self.labels_ = _hc_cut(self.n_clusters, self.children_,
self.n_leaves_)
else:
labels = _hierarchical.hc_get_heads(parents, copy=False)
# copy to avoid holding a reference on the original array
labels = np.copy(labels[:n_samples])
# Reasign cluster numbers
self.labels_ = np.searchsorted(np.unique(labels), labels)
return self
Below is a simple example showing how to use the modified AgglomerativeClustering class:
import numpy as np
import AgglomerativeClustering # Make sure to use the new one!!!
d = np.array(
[
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
)
clustering = AgglomerativeClustering(n_clusters=2, compute_full_tree=True,
affinity='euclidean', linkage='complete')
clustering.fit(d)
print clustering.distance
That example has the following output:
[ 5.19615242 10.39230485]
This can then be compared to a scipy.cluster.hierarchy.linkage implementation:
import numpy as np
from scipy.cluster.hierarchy import linkage
d = np.array(
[
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
)
print linkage(d, 'complete')
Output:
[[ 1. 2. 5.19615242 2. ]
[ 0. 3. 10.39230485 3. ]]
Just for kicks I decided to follow up on your statement about performance:
import AgglomerativeClustering
from scipy.cluster.hierarchy import linkage
import numpy as np
import time
l = 1000; iters = 50
d = [np.random.random(100) for _ in xrange(1000)]
t = time.time()
for _ in xrange(iters):
clustering = AgglomerativeClustering(n_clusters=l-1,
affinity='euclidean', linkage='complete')
clustering.fit(d)
scikit_time = (time.time() - t) / iters
print 'scikit-learn Time: {0}s'.format(scikit_time)
t = time.time()
for _ in xrange(iters):
linkage(d, 'complete')
scipy_time = (time.time() - t) / iters
print 'SciPy Time: {0}s'.format(scipy_time)
print 'scikit-learn Speedup: {0}'.format(scipy_time / scikit_time)
This gave me the following results:
scikit-learn Time: 0.566560001373s
SciPy Time: 0.497740001678s
scikit-learn Speedup: 0.878530077083
According to this, the implementation from Scikit-Learn takes 0.88x the execution time of the SciPy implementation, i.e. SciPy's implementation is 1.14x faster. It should be noted that:
I modified the original scikit-learn implementation
I only did a small number of iterations
I only tested a small number of test cases (both cluster size as well as number of items per dimension should be tested)
I ran SciPy second, so it is had the advantage of obtaining more cache hits on the source data
The two methods don't exactly do the same thing.
With all of that in mind, you should really evaluate which method performs better for your specific application. There are also functional reasons to go with one implementation over the other.
I made a scipt to do it without modifying sklearn and without recursive functions. Before using note that:
Merge distance can sometimes decrease with respect to the children
merge distance. I added three ways to handle those cases: Take the
max, do nothing or increase with the l2 norm. The l2 norm logic has not been verified yet. Please check yourself what suits you best.
Import the packages:
from sklearn.cluster import AgglomerativeClustering
import numpy as np
import matplotlib.pyplot as plt
from scipy.cluster.hierarchy import dendrogram
Function to compute weights and distances:
def get_distances(X,model,mode='l2'):
distances = []
weights = []
children=model.children_
dims = (X.shape[1],1)
distCache = {}
weightCache = {}
for childs in children:
c1 = X[childs[0]].reshape(dims)
c2 = X[childs[1]].reshape(dims)
c1Dist = 0
c1W = 1
c2Dist = 0
c2W = 1
if childs[0] in distCache.keys():
c1Dist = distCache[childs[0]]
c1W = weightCache[childs[0]]
if childs[1] in distCache.keys():
c2Dist = distCache[childs[1]]
c2W = weightCache[childs[1]]
d = np.linalg.norm(c1-c2)
cc = ((c1W*c1)+(c2W*c2))/(c1W+c2W)
X = np.vstack((X,cc.T))
newChild_id = X.shape[0]-1
# How to deal with a higher level cluster merge with lower distance:
if mode=='l2': # Increase the higher level cluster size suing an l2 norm
added_dist = (c1Dist**2+c2Dist**2)**0.5
dNew = (d**2 + added_dist**2)**0.5
elif mode == 'max': # If the previrous clusters had higher distance, use that one
dNew = max(d,c1Dist,c2Dist)
elif mode == 'actual': # Plot the actual distance.
dNew = d
wNew = (c1W + c2W)
distCache[newChild_id] = dNew
weightCache[newChild_id] = wNew
distances.append(dNew)
weights.append( wNew)
return distances, weights
Make sample data of 2 clusters with 2 subclusters:
# Make 4 distributions, two of which form a bigger cluster
X1_1 = np.random.randn(25,2)+[8,1.5]
X1_2 = np.random.randn(25,2)+[8,-1.5]
X2_1 = np.random.randn(25,2)-[8,3]
X2_2 = np.random.randn(25,2)-[8,-3]
# Merge the four distributions
X = np.vstack([X1_1,X1_2,X2_1,X2_2])
# Plot the clusters
colors = ['r']*25 + ['b']*25 + ['g']*25 + ['y']*25
plt.scatter(X[:,0],X[:,1],c=colors)
Sample data:
Fit the clustering model
model = AgglomerativeClustering(n_clusters=2,linkage="ward")
model.fit(X)
Call the function to find the distances, and pass it to the dendogram
distance, weight = get_distances(X,model)
linkage_matrix = np.column_stack([model.children_, distance, weight]).astype(float)
plt.figure(figsize=(20,10))
dendrogram(linkage_matrix)
plt.show()
Ouput dendogram:
Update: I recommend this solution - https://stackoverflow.com/a/47769506/1333621, if you found my attempt useful please examine Arjun's solution and re-examine your vote
You will need to generate a "linkage matrix" from children_ array
where every row in the linkage matrix has the format [idx1, idx2, distance, sample_count].
This is not meant to be a paste-and-run solution, I'm not keeping track of what I needed to import - but it should be pretty clear anyway.
Here is one way to generate the required structure Z and visualize the result
X is your n_samples x n_features input data
cluster
agg_cluster = sklearn.cluster.AgglomerativeClustering(n_clusters=n)
agg_labels = agg_cluster.fit_predict(X)
some empty data structures
Z = []
# should really call this cluster dict
node_dict = {}
n_samples = len(X)
write a recursive function to gather all leaf nodes associated with a given cluster, compute distances, and centroid positions
def get_all_children(k, verbose=False):
i,j = agg_cluster.children_[k]
if k in node_dict:
return node_dict[k]['children']
if i < leaf_count:
left = [i]
else:
# read the AgglomerativeClustering doc. to see why I select i-n_samples
left = get_all_children(i-n_samples)
if j < leaf_count:
right = [j]
else:
right = get_all_children(j-n_samples)
if verbose:
print k,i,j,left, right
left_pos = np.mean(map(lambda ii: X[ii], left),axis=0)
right_pos = np.mean(map(lambda ii: X[ii], right),axis=0)
# this assumes that agg_cluster used euclidean distances
dist = metrics.pairwise_distances([left_pos,right_pos],metric='euclidean')[0,1]
all_children = [x for y in [left,right] for x in y]
pos = np.mean(map(lambda ii: X[ii], all_children),axis=0)
# store the results to speed up any additional or recursive evaluations
node_dict[k] = {'top_child':[i,j],'children':all_children, 'pos':pos,'dist':dist, 'node_i':k + n_samples}
return all_children
#return node_di|ct
populate node_dict and generate Z - with distance and n_samples per node
for k,x in enumerate(agg_cluster.children_):
get_all_children(k,verbose=False)
# Every row in the linkage matrix has the format [idx1, idx2, distance, sample_count].
Z = [[v['top_child'][0],v['top_child'][1],v['dist'],len(v['children'])] for k,v in node_dict.iteritems()]
# create a version with log scaled distances for easier visualization
Z_log =[[v['top_child'][0],v['top_child'][1],np.log(1.0+v['dist']),len(v['children'])] for k,v in node_dict.iteritems()]
plot it using scipy dendrogram
from scipy.cluster import hierarchy
plt.figure()
dn = hierarchy.dendrogram(Z_log,p=4,truncate_mode='level')
plt.show()
be disappointed by how opaque this visualization is and wish you could interactively drill down into larger clusters and examine directional (not scalar) distances between centroids :( - maybe a bokeh solution exists?
references
http://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.dendrogram.html
https://joernhees.de/blog/2015/08/26/scipy-hierarchical-clustering-and-dendrogram-tutorial/#Selecting-a-Distance-Cut-Off-aka-Determining-the-Number-of-Clusters
I think the official example of sklearn on the AgglomerativeClustering would be helpful.
Plot Hierarchical Clustering Dendrogram:
import numpy as np
from matplotlib import pyplot as plt
from scipy.cluster.hierarchy import dendrogram
from sklearn.datasets import load_iris
from sklearn.cluster import AgglomerativeClustering
def plot_dendrogram(model, **kwargs):
# Create linkage matrix and then plot the dendrogram
# create the counts of samples under each node
counts = np.zeros(model.children_.shape[0])
n_samples = len(model.labels_)
for i, merge in enumerate(model.children_):
current_count = 0
for child_idx in merge:
if child_idx < n_samples:
current_count += 1 # leaf node
else:
current_count += counts[child_idx - n_samples]
counts[i] = current_count
linkage_matrix = np.column_stack([model.children_, model.distances_,
counts]).astype(float)
# Plot the corresponding dendrogram
dendrogram(linkage_matrix, **kwargs)
iris = load_iris()
X = iris.data
# setting distance_threshold=0 ensures we compute the full tree.
model = AgglomerativeClustering(distance_threshold=0, n_clusters=None)
model = model.fit(X)
plt.title('Hierarchical Clustering Dendrogram')
# plot the top three levels of the dendrogram
plot_dendrogram(model, truncate_mode='level', p=3)
plt.xlabel("Number of points in node (or index of point if no parenthesis).")
plt.show()
NB This solution relies on distances_ variable which only is set when calling AgglomerativeClustering with the distance_threshold parameter.
I ran into the same problem when setting n_clusters.
I think the problem is that if you set n_clusters, the distances don't get evaluated.
If you set n_clusters = None and set a distance_threshold, then it works with the code provided on sklearn.
I understand that this will probably not help in your situation but I hope a fix is underway.
Related
I have a distance matrix BD_D_R that contains the distance of each element of the main matrix psnr_bitrate to other elements. each time we have 6 clusters of main elements and we have to find an element that has the lowest distance from all other elements in the cluster. for this aim, I have to run three nested for loops to find the element for each cluster which is very time-consuming. is it a way to find this point for each cluster instead of using these for loops and calculating the distance again? we calculate the distances one time and the number of elements is high and it is time-consuming. psnr_bitrate is an array with size (700,6) and BD_D_R is an array with size 700x700. each cluster maybe has 200 or fewer members. if we could extract the index of elements in each cluster and calculate the distance using a distance matrix without for loops, I think it works, but I do not know how can I do this using python code.
I used the following code for updating the points. As you can see I implement Kmeans clustering with my own distance function BD_rate and the bottleneck of the code is during updating the centroid when I have to calculate the distance of each node with all other nodes in each cluster. do you have any suggestions to do this part without using nested for loops?
#####################################################################################
def centroid_initiation(centroid,K,label,psnr_bitrate):
array_part=np.array_split(psnr_bitrate,K)
if label==0:
for i in range(K):
tmp=array_part[i]
tmp=tmp[np.lexsort(np.fliplr(tmp).T)]# sort based on first column without chnaging other columns
centroid[i,:]=tmp[int(np.floor(len(array_part[i])/2)),:]
return(centroid)
else:
rnd=random.sample(range(0, psnr_bitrate.shape[0]), K)
centroid=psnr_bitrate[rnd]
return centroid
###################################################################################
def kmeans_BD(psnr_bitrate,K,centroid):
m=psnr_bitrate.shape[0]#number of samples
n=psnr_bitrate.shape[1]#number of bitrate
# creating an empty array
BD=np.zeros((m,K))
#weight of BD_rate
wr=0.5
#weight of BD_Q
wq=0.5
n_itr=1000
# finding distance between for each centroid
for itr in range(n_itr):
for k in range(K):
for i in range(len(psnr_bitrate)):
# print(i)
tmp_return=BD_RATE('VMAF_Y',rate,centroid[k,:],rate,psnr_bitrate[i,:])
if tmp_return==np.NINF:
BD[i,k]=np.inf
else:
BD[i,k]=np.abs(tmp_return)
# a=BD_RATE('VMAF_Y',rate,psnr_bitrate[i,:],rate,centroid[k,:])
# BD_R=bd_rate(rate,psnr_bitrate[i,:],rate,centroid[k,:],i)
# BD_Q=bd_PSNR(rate,psnr_bitrate[i,:],rate,centroid[k,:],i)
# BD[i,k]=wr*BD_R+wq*BD_Q
#remove rows which all columns has inf value and put them in a new cluster
finding_non_infValue=np.asarray(BD[BD.min(axis=1) != np.inf])
indx_removed=np.where([BD.min(axis=1) == np.inf])[1]
indx_nonremoved=np.where([BD.min(axis=1) != np.inf])[1]
removed_member=[psnr_bitrate[x] for x in indx_removed]
saved_member=[psnr_bitrate[x] for x in indx_nonremoved]
# storing the minimum value we have computed
minimum=np.argmin(finding_non_infValue,axis=1)+1
minimum_distance=finding_non_infValue.min(axis=1)
minimum_merge=np.zeros((minimum.shape[0],2))
minimum_merge[:,0]=minimum
minimum_merge[:,1]=minimum_distance
# computing the mean of separated clusters
clusters={}
for itr1 in range(K):
clusters[itr1+1]=np.array([]).reshape(n,0)
# assigning of clusters to points
for itr1 in range(len(saved_member)):
clusters[minimum[itr1]]=np.c_[clusters[minimum[itr1]],saved_member[itr1]]
for itr1 in range(K):
clusters[itr1+1]=clusters[itr1+1].T
clusters_tmp=np.zeros((K,1))
for itr1 in range(K):
if clusters[itr1+1].shape[0]!=[]:
clusters_tmp[itr1]=clusters[itr1+1].shape[0]
else:
clusters_tmp[itr1]=-1
num=(clusters_tmp==-1).sum()#[itr4 for itr4, x in clusters_tmp if x == -1]
if num==K:
centroid=centroid_initiation(centroid,K,label,psnr_bitrate)
else:
if num>0:
tmp_idx=0
while num>0:
indx=np.where([clusters_tmp ==-1 ])[1]+1 #index of no member cluster
H_cluster=np.argmax(clusters_tmp)+1# index of cluster with more members
tmp=np.where(minimum_merge == max([i for i in minimum_merge if i[0]==H_cluster], key=lambda x : x[1]))
idx,=np.where(tmp[1]==1)
clusters[indx[tmp_idx]][0]=clusters[H_cluster][idx]#add member to other cluster
clusters[H_cluster]=np.delete(clusters[H_cluster],(idx),axis=0)#remove the member from main cluster
num-=1
tmp_idx+=1
# computing mean and updating it
for itr2 in range(K):
tmp_cl=clusters[itr2+1]
if len(tmp_cl)>1:
BD_cent=np.zeros((len(tmp_cl),1))
for itr3 in range(len(tmp_cl)):
sumv=0
for itr5 in range(len(tmp_cl)):
value=BD_RATE('VMAF_Y',rate,tmp_cl[itr3,:],rate,tmp_cl[itr5,:])
if value!=np.NINF:
sumv+=np.abs(value)
else:
sumv+=1000#for curve which have not overlap with others
BD_cent[itr3]=sumv/len(tmp_cl)
new_centroid_index=np.argmin(BD_cent)
centroid[itr2]=clusters[itr2+1][new_centroid_index]
# a=BD_RATE('VMAF_Y',rate,psnr_bitrate[i,:],rate,centroid[k,:])
# BD_R=bd_rate(rate,psnr_bitrate[i,:],rate,centroid[k,:],i)
# BD_Q=bd_PSNR(rate,psnr_bitrate[i,:],rate,centroid[k,:],i)
# BD[i,k]=wr*BD_R+wq*BD_Q
# storing the minimum value we have computed
scaled_features = pd.DataFrame((psnr_bitrate))
scaled_features['cluster'] =minimum
pd.plotting.parallel_coordinates(scaled_features, 'cluster')
psnr_bitrate=np.vstack([np.loadtxt(path, dtype='float') for path in glob.iglob(r'C:/Users/jamalm8/ffmpeg/UGCvideos/*.txt')])
#remove non monotonic curve
itr=0
m=len(psnr_bitrate)
while (itr<len(psnr_bitrate)):
brqtypairs1=psnr_bitrate[itr,:]
rd1_monotonic = all(x<y for x, y in zip(brqtypairs1, brqtypairs1[1:]))
if (rd1_monotonic == False ):
print (itr)
psnr_bitrate=np.delete(psnr_bitrate, (itr),axis=0)
itr=itr-1
itr=itr+1
m=psnr_bitrate.shape[0]#number of samples
n=psnr_bitrate.shape[1]#number of bitrate
K=6 #number of cluster
rate=[1000,2000,3000,4000,5000,6000]
#firt initializaation of cluster centroid
#the median of each group is considered as centroid
array_part=np.array_split(psnr_bitrate,K)
centroid=np.zeros((K,6))
label=0
centroid=centroid_initiation(centroid,K,label,psnr_bitrate)
label+=1
# rnd=random.sample(range(0, 638), K)
# centroid=psnr_bitrate[rnd]
# creating an empty array
#weight of BD_rate
wr=0.5
#weight of BD_Q
wq=0.5
kmeans_BD(psnr_bitrate, K, centroid)
I am trying to create a directed network with more than 5000 nodes. The edges between the nodes are based on the difference in a certain value assigned to each node; if the difference in values between node pairs is less than a threshold, there is an edge. I generate an adjacency matrix and want to check if the directed graph is weakly connected, and also compute Page rank. Currently, I use the code below to generate the graph and it takes me 78s and occupies nearly 7GB memory. I want to know if there is a more efficient (time and memory) way of constructing and evaluating large networks in Python.
%reset -f
!pip install faiss-gpu
import faiss
import numpy as np
import torch
import random
import networkx as nx
import time
device='cuda'
res = faiss.StandardGpuResources()
start=time.time()
# Total Nodes
N = 5000
# Mean
mu = 0.5*np.pi
# Variance
var = np.pi/18
# Maximum degree of each node
max_degree = 1000
# Threshold
value_thres = np.pi/6
# Placeholders
Values = torch.zeros((N,1),dtype=torch.double,device='cuda')
Matrixs = torch.zeros((2,N,max_degree),dtype=torch.double,device='cuda')
Adj_Matrix = torch.zeros((N,N),dtype=torch.long,device='cuda')
#Generate a directed network with N nodes whose connectivity is based on values
start_network=time.time()
Values[:,0] = torch.normal(mu,var,(N,))
# Find neighbors upto max_degree
# Pytorch to numpy
Current = np.float32(Values[:,0].cpu().detach().numpy())
index_flat = faiss.IndexFlatL2(Current[:,None].shape[1])
gpu_index_flat = faiss.index_cpu_to_gpu(res, 0, index_flat)
gpu_index_flat.add(Current[:,None])
m, n = gpu_index_flat.search(Current[:,None], max_degree)
# Indices of possible neighbors
Matrixs[1] = torch.from_numpy(n).long()
# Value Separation
Matrixs[0] = torch.squeeze(torch.cdist(Values[:,0][:,None][:,None],Values[:,0][:,None][Matrixs[1].long()],p=2))<value_thres
# Construct Adjacency Matrix
Adj_Matrix[Matrixs[1].long()] = 1
Adj_Matrix-=torch.eye(N,dtype=torch.long,device='cuda')
G = nx.from_numpy_matrix(Adj_Matrix.cpu().detach().numpy())
end=time.time()
print('Network Creation Time',end-start_network)
print('Total Time',end-start-start_network)
From the snippet in the question, it's hard to isolate the time/memory of networkx (I don't have 'cuda' on my machine, so unable to replicate). However, the following code runs for about 36 seconds:
import networkx as nx
import numpy as np
A = np.random.randint(2, size=(5000, 5000))
G = nx.from_numpy_matrix(A) # about 36 seconds
There could be scope for generating a faster algorithm by writing a custom low-level graph constructor, but it's unlikely that it will have memory advantages.
I'm trying to vectorize my fitness function for a Minimum Vector Cover genetic algorithm, but I'm at a loss about how to do it.
As it stands now:
vert_cover_fitness = [1 if self.dna[edge[0]] or self.dna[edge[1]] else -num_edges for edge in edges]
The dna is a one-dimensional binary array of size [0..n], where each index corresponds to a vertex, and its value indicates if we have chosen it or not. edges is a two dimensional positive integer array, where each value corresponds to a vertex (index) in dna. Both are ndarrays.
Simply explained - if one of the vertices connected by an edge is "selected", then we get a score of one. If not, the function is penalized by -num_edges.
I have tried np.vectorize as an attempt to get away cheap with a lambda function:
fit_func = np.vectorize(lambda edge: 1 if self.dna[edge[0]] or self.dna[edge[1]] else -num_edges)
vert_cover_fitness = fit_func(edges)
This returns IndexError: invalid index to scalar variable., as this function is applied to each value, and not each row.
To fix this I tried np.apply_along_axis. This works but it's just a wrapper for a loop so I'm not getting any speedups.
If any Numpy wizards can see some obvious way to do this, I would much appreciate your help. I'm guessing a problem lies with the representation of the problem, and that changing either the dna or edges shapes could help. I'm just not skilled enough to see what I should do.
I came up with this bit of numpy code, it runs 30x faster than your for loop on my randomly generated data.
import numpy as np
num_vertices = 1000
num_edges = 500
dna = np.random.choice([0, 1], num_vertices)
edges = np.random.randint(0, num_vertices, num_edges * 2).reshape(-1, 2)
vert_cover_fitness1 = [1 if dna[edge[0]] or dna[edge[1]] else -num_edges for edge in edges]
vert_cover_fitness2 = np.full([num_edges], -num_edges)
mask = (dna[edges[:, 0]] | dna[edges[:, 1]]).astype(bool)
vert_cover_fitness2[mask] = 1.0
print((vert_cover_fitness1 == vert_cover_fitness2).all()) # this shows it's correct
Here is the timeit code used to measure the speedup.
import timeit
setup = """
import numpy as np
num_vertices = 1000
num_edges = 500
dna = np.random.choice([0, 1], num_vertices)
edges = np.random.randint(0, num_vertices, num_edges*2).reshape(-1, 2)
"""
python_loop = "[1 if dna[edge[0]] or dna[edge[1]] else -num_edges for edge in edges]"
print(timeit.timeit(python_loop, setup, number=1000))
vectorised="""
vert_cover_fitness2 = np.full([num_edges], -num_edges)
mask = (dna[edges[:, 0]] | dna[edges[:, 1]]).astype(bool)
vert_cover_fitness2[mask] = 1.0
"""
print(timeit.timeit(vectorised, setup, number=1000))
# prints:
# 0.375906624016352
# 0.012783741112798452
I have a list of points P=[p1,...pN] where pi=(latitudeI,longitudeI).
Using Python 3, I would like to find a smallest set of clusters (disjoint subsets of P) such that every member of a cluster is within 20km of every other member in the cluster.
Distance between two points is computed using the Vincenty method.
To make this a little more concrete, suppose I have a set of points such as
from numpy import *
points = array([[33. , 41. ],
[33.9693, 41.3923],
[33.6074, 41.277 ],
[34.4823, 41.919 ],
[34.3702, 41.1424],
[34.3931, 41.078 ],
[34.2377, 41.0576],
[34.2395, 41.0211],
[34.4443, 41.3499],
[34.3812, 40.9793]])
Then I am trying to define this function:
from geopy.distance import vincenty
def clusters(points, distance):
"""Returns smallest list of clusters [C1,C2...Cn] such that
for x,y in Ci, vincenty(x,y).km <= distance """
return [points] # Incorrect but gives the form of the output
NOTE: Many questions cluster on geo location and attribute. My question is for location only. This is for lat/lon, not Euclidean distance. There are other questions out there that give sort-of answers but not the answer to this question (many unanswered):
https://datascience.stackexchange.com/questions/761/clustering-geo-location-coordinates-lat-long-pairs
https://gis.stackexchange.com/questions/300171/clustering-geo-points-and-export-borders-in-kml
https://gis.stackexchange.com/questions/194873/clustering-geographical-data-based-on-point-location-and-associated-point-values
https://gis.stackexchange.com/questions/256477/clustering-latitude-longitude-data-based-on-distance
and more, none of which answer this question.
This might be a start. the algorithm attempts to k means cluster the points by iterating k from 2 to the number of points validating each solution along the way. You should pick the lowest number.
It works by clustering the points and then checking that each cluster obeys the constraint. If any cluster is not compliant the solution is labeled as False and we move on to the next number of clusters.
Because the K-means algorithm used in sklearn falls into local minima, proving whether or not this is the solution you're looking for is the best one is still to be established, but it could be one
import numpy as np
from sklearn.cluster import KMeans
from scipy.spatial.distance import cdist
import math
points = np.array([[33. , 41. ],
[33.9693, 41.3923],
[33.6074, 41.277 ],
[34.4823, 41.919 ],
[34.3702, 41.1424],
[34.3931, 41.078 ],
[34.2377, 41.0576],
[34.2395, 41.0211],
[34.4443, 41.3499],
[34.3812, 40.9793]])
def distance(origin, destination): #found here https://gist.github.com/rochacbruno/2883505
lat1, lon1 = origin[0],origin[1]
lat2, lon2 = destination[0],destination[1]
radius = 6371 # km
dlat = math.radians(lat2-lat1)
dlon = math.radians(lon2-lon1)
a = math.sin(dlat/2) * math.sin(dlat/2) + math.cos(math.radians(lat1)) \
* math.cos(math.radians(lat2)) * math.sin(dlon/2) * math.sin(dlon/2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
d = radius * c
return d
def create_clusters(number_of_clusters,points):
kmeans = KMeans(n_clusters=number_of_clusters, random_state=0).fit(points)
l_array = np.array([[label] for label in kmeans.labels_])
clusters = np.append(points,l_array,axis=1)
return clusters
def validate_solution(max_dist,clusters):
_, __, n_clust = clusters.max(axis=0)
n_clust = int(n_clust)
for i in range(n_clust):
two_d_cluster=clusters[clusters[:,2] == i][:,np.array([True, True, False])]
if not validate_cluster(max_dist,two_d_cluster):
return False
else:
continue
return True
def validate_cluster(max_dist,cluster):
distances = cdist(cluster,cluster, lambda ori,des: int(round(distance(ori,des))))
print(distances)
print(30*'-')
for item in distances.flatten():
if item > max_dist:
return False
return True
if __name__ == '__main__':
for i in range(2,len(points)):
print(i)
print(validate_solution(20,create_clusters(i,points)))
Once a benchmark established one would have to focus more one each cluster to establish whether its' points could be distributed to others without violating the distance constraint.
You can replace the lambda function in cdist with whatever distance metric you chose, I found the great circle distance in the repo i mentioned.
Here is a solution that seems correct and will behave O(N^2) worst case and better depending on the data:
def my_cluster(S,distance):
coords=set(S)
C=[]
while len(coords):
locus=coords.pop()
cluster = [x for x in coords if vincenty(locus,x).km <= distance]
C.append(cluster+[locus])
for x in cluster:
coords.remove(x)
return C
NOTE: I am not marking this as an answer because one of my requirements is that it be a smallest set of clusters. My first pass is good but I haven't proven that it is a smallest set.
The result (on a larger set of points) can be visualized as follows:
why not use the S2 library to create the 20km zones and see which points are in each?
I am trying to implement this algorithm to find the intercept and slope for single variable:
Here is my Python code to update the Intercept and slope. But it is not converging. RSS is Increasing with Iteration rather than decreasing and after some iteration it's becoming infinite. I am not finding any error implementing the algorithm.How Can I solve this problem? I have attached the csv file too.
Here is the code.
import pandas as pd
import numpy as np
#Defining gradient_decend
#This Function takes X value, Y value and vector of w0(intercept),w1(slope)
#INPUT FEATURES=X(sq.feet of house size)
#TARGET VALUE=Y (Price of House)
#W=np.array([w0,w1]).reshape(2,1)
#W=[w0,
# w1]
def gradient_decend(X,Y,W):
intercept=W[0][0]
slope=W[1][0]
#Here i will get a list
#list is like this
#gd=[sum(predicted_value-(intercept+slope*x)),
# sum(predicted_value-(intercept+slope*x)*x)]
gd=[sum(y-(intercept+slope*x) for x,y in zip(X,Y)),
sum(((y-(intercept+slope*x))*x) for x,y in zip(X,Y))]
return np.array(gd).reshape(2,1)
#Defining Resudual sum of squares
def RSS(X,Y,W):
return sum((y-(W[0][0]+W[1][0]*x))**2 for x,y in zip(X,Y))
#Reading Training Data
training_data=pd.read_csv("kc_house_train_data.csv")
#Defining fixed parameters
#Learning Rate
n=0.0001
iteration=1500
#Intercept
w0=0
#Slope
w1=0
#Creating 2,1 vector of w0,w1 parameters
W=np.array([w0,w1]).reshape(2,1)
#Running gradient Decend
for i in range(iteration):
W=W+((2*n)* (gradient_decend(training_data["sqft_living"],training_data["price"],W)))
print RSS(training_data["sqft_living"],training_data["price"],W)
Here is the CSV file.
Firstly, I find that when writing machine learning code, it's best NOT to use complex list comprehension because anything that you can iterate,
it's easier to read if written when normal loops and indentation and/or
it can be done with numpy broadcasting
And using proper variable names can help you better understand the code. Using Xs, Ys, Ws as short hand is nice only if you're good at math. Personally, I don't use them in the code, especially when writing in python. From import this: explicit is better than implicit.
My rule of thumb is to remember that if I write code I can't read 1 week later, it's bad code.
First, let's decide what is the input parameters for gradient descent, you will need:
feature_matrix (The X matrix, type: numpy.array, a matrix of N * D size, where N is the no. of rows/datapoints and D is the no. of columns/features)
output (The Y vector, type: numpy.array, a vector of size N)
initial_weights (type: numpy.array, a vector of size D).
Additionally, to check for convergence you will need:
step_size (the magnitude of change when iterating through to change the weights; type: float, usually a small number)
tolerance (the criteria to break the iterations, when the gradient magnitude is smaller than tolerance, assume that your weights have convereged, type: float, usually a small number but much bigger than the step size).
Now to the code.
def regression_gradient_descent(feature_matrix, output, initial_weights, step_size, tolerance):
converged = False # Set a boolean to check for convergence
weights = np.array(initial_weights) # make sure it's a numpy array
while not converged:
# compute the predictions based on feature_matrix and weights.
# iterate through the row and find the single scalar predicted
# value for each weight * column.
# hint: a dot product can solve this easily
predictions = [??? for row in feature_matrix]
# compute the errors as predictions - output
errors = predictions - output
gradient_sum_squares = 0 # initialize the gradient sum of squares
# while we haven't reached the tolerance yet, update each feature's weight
for i in range(len(weights)): # loop over each weight
# Recall that feature_matrix[:, i] is the feature column associated with weights[i]
# compute the derivative for weight[i]:
# Hint: the derivative is = 2 * dot product of feature_column and errors.
derivative = 2 * ????
# add the squared value of the derivative to the gradient magnitude (for assessing convergence)
gradient_sum_squares += (derivative * derivative)
# subtract the step size times the derivative from the current weight
weights[i] -= (step_size * derivative)
# compute the square-root of the gradient sum of squares to get the gradient magnitude:
gradient_magnitude = ???
# Then check whether the magnitude is lower than the tolerance.
if ???:
converged = True
# Once it while loop breaks, return the loop.
return(weights)
I hope the extended pseudo-code helps you better understand the gradient descent. I won't fill in the ??? so as to not spoil your homework.
Note that your RSS code is also unreadable and unmaintainable. It's easier to do just:
>>> import numpy as np
>>> prediction = np.array([1,2,3])
>>> output = np.array([1,1,5])
>>> residual = output - prediction
>>> RSS = sum(residual * residual)
>>> RSS
5
Going through numpy basics will go a long way to machine learning and matrix-vector manipulation without going nuts with iterations: http://docs.scipy.org/doc/numpy-1.10.1/user/basics.html
I have solved my own problem!
Here is the solved way.
import numpy as np
import pandas as pd
import math
from sys import stdout
#function Takes the pandas dataframe, Input features list and the target column name
def get_numpy_data(data, features, output):
#Adding a constant column with value 1 in the dataframe.
data['constant'] = 1
#Adding the name of the constant column in the feature list.
features = ['constant'] + features
#Creating Feature matrix(Selecting columns and converting to matrix).
features_matrix=data[features].as_matrix()
#Target column is converted to the numpy array
output_array=np.array(data[output])
return(features_matrix, output_array)
def predict_outcome(feature_matrix, weights):
weights=np.array(weights)
predictions = np.dot(feature_matrix, weights)
return predictions
def errors(output,predictions):
errors=predictions-output
return errors
def feature_derivative(errors, feature):
derivative=np.dot(2,np.dot(feature,errors))
return derivative
def regression_gradient_descent(feature_matrix, output, initial_weights, step_size, tolerance):
converged = False
#Initital weights are converted to numpy array
weights = np.array(initial_weights)
while not converged:
# compute the predictions based on feature_matrix and weights:
predictions=predict_outcome(feature_matrix,weights)
# compute the errors as predictions - output:
error=errors(output,predictions)
gradient_sum_squares = 0 # initialize the gradient
# while not converged, update each weight individually:
for i in range(len(weights)):
# Recall that feature_matrix[:, i] is the feature column associated with weights[i]
feature=feature_matrix[:, i]
# compute the derivative for weight[i]:
#predict=predict_outcome(feature,weights[i])
#err=errors(output,predict)
deriv=feature_derivative(error,feature)
# add the squared derivative to the gradient magnitude
gradient_sum_squares=gradient_sum_squares+(deriv**2)
# update the weight based on step size and derivative:
weights[i]=weights[i] - np.dot(step_size,deriv)
gradient_magnitude = math.sqrt(gradient_sum_squares)
stdout.write("\r%d" % int(gradient_magnitude))
stdout.flush()
if gradient_magnitude < tolerance:
converged = True
return(weights)
#Example of Implementation
#Importing Training and Testing Data
# train_data=pd.read_csv("kc_house_train_data.csv")
# test_data=pd.read_csv("kc_house_test_data.csv")
# simple_features = ['sqft_living', 'sqft_living15']
# my_output= 'price'
# (simple_feature_matrix, output) = get_numpy_data(train_data, simple_features, my_output)
# initial_weights = np.array([-100000., 1., 1.])
# step_size = 7e-12
# tolerance = 2.5e7
# simple_weights = regression_gradient_descent(simple_feature_matrix, output,initial_weights, step_size,tolerance)
# print simple_weights
It is so simple
def mean(values):
return sum(values)/float(len(values))
def variance(values, mean):
return sum([(x-mean)**2 for x in values])
def covariance(x, mean_x, y, mean_y):
covar = 0.0
for i in range(len(x)):
covar+=(x[i]-mean_x) * (y[i]-mean_y)
return covar
def coefficients(dataset):
x = []
y = []
for line in dataset:
xi, yi = map(float, line.split(','))
x.append(xi)
y.append(yi)
dataset.close()
x_mean, y_mean = mean(x), mean(y)
b1 = covariance(x, x_mean, y, y_mean)/variance(x, x_mean)
b0 = y_mean-b1*x_mean
return [b0, b1]
dataset = open('trainingdata.txt')
b0, b1 = coefficients(dataset)
n=float(raw_input())
print(b0+b1*n)
reference : www.machinelearningmastery.com/implement-simple-linear-regression-scratch-python/