Copy from sklearn.cluster import KMeans
import numpy as np
from scipy.optimize import minimize
class alg:
def __init__(self, similarity_matrices, num_clusters, random_seed=0):
self.num_clusters = num_clusters
self.random_seed = random_seed
self.num_nodes = None
self.similarity_matrices = []
self.metapath_index = {}
self.alpha = None
self.beta = None
self.gamma = None
for index, (metapath, matrix) in enumerate(similarity_matrices.items()):
if self.num_nodes is None:
self.num_nodes = matrix.shape[0]
if matrix.shape != (self.num_nodes, self.num_nodes):
raise ValueError('Invalid shape of similarity matrix.')
row_normalized_matrix = matrix/matrix.sum(axis=1, keepdims=True)
self.similarity_matrices.append(row_normalized_matrix)
self.metapath_index[metapath] = index
self.similarity_matrices = np.array(self.similarity_matrices)
self.num_metapaths = len(similarity_matrices)
def run(self, verbose=False, cluster_using='similarity'):
if cluster_using not in ['similarity', 'laplacian']:
raise ValueError('Invalid option for parameter \'cluster_using\'.')
similarity_matrix, metapath_weights = self.optimize(verbose=verbose)
if cluster_using == 'similarity':
labels = self.cluster(similarity_matrix)
elif cluster_using == 'laplacian':
laplacian = normalized_laplacian(similarity_matrix)
labels = self.cluster(eigenvectors(laplacian, num=self.num_clusters))
metapath_weights_dict = {metapath: metapath_weights[index] for metapath, index in self.metapath_index.items()}
return labels, similarity_matrix, metapath_weights_dict
def cluster(self, feature_matrix):
return KMeans(self.num_clusters, n_init=10, random_state=self.random_seed).fit_predict(feature_matrix)
def optimize(self, num_iterations=20, alpha=0.5, beta=10, gamma=0.01, verbose=False):
self.alpha = alpha
self.beta = beta
self.gamma = gamma
lambdas = np.ones(self.num_metapaths)/self.num_metapaths
W = np.tensordot(lambdas, self.similarity_matrices, axes=[[0], [0]])
S = W
for iteration in range(num_iterations):
if verbose:
loss = np.trace(np.matmul((S - W).T, (S - W)))
loss += self.alpha * np.trace(np.matmul(S.T, S))
loss += self.beta * np.dot(lambdas, lambdas)
loss += self.gamma * np.sum(eigenvalues(normalized_laplacian(S), num=self.num_clusters))
print('Iteration %d: Loss = %0.3f' % (iteration, loss))
F = self.optimize_F(S)
S = self.optimize_S(W, F)
lambdas = self.optimize_lambdas(S, lambdas)
W = np.tensordot(lambdas, self.similarity_matrices, axes=[[0], [0]])
return S, lambdas
def optimize_F(self, S):
LS = normalized_laplacian(S)
return eigenvectors(LS, num=self.num_clusters)
def optimize_S(self, W, F):
Q = distance_matrix(F, metric='euclidean')
P = (2*W - self.gamma*Q)/(2 + 2*self.alpha)
S = np.zeros((self.num_nodes, self.num_nodes))
for index in range(S.shape[0]):
S[index] = best_simplex_projection(P[index])
return S
def optimize_lambdas(self, S, init_lambdas):
def objective(lambdas):
W = np.tensordot(lambdas, self.similarity_matrices, axes=[[0], [0]])
value = np.trace(np.matmul(W.T, W))
value -= 2 * np.trace(np.matmul(S.T, W))
value += self.beta * np.dot(lambdas, lambdas)
return value
def constraints():
def sum_one(lambdas):
return np.sum(lambdas) - 1
return {
'type': 'eq',
'fun': sum_one,
}
def bounds(init_lambdas):
return [(0, 1) for init_lambda in init_lambdas]
return minimize(objective, init_lambdas, method='SLSQP', constraints=constraints(), bounds=bounds(init_lambdas)).x