From c8cf0fe04540f80a0ec0b2ff8db24dda25f08c67 Mon Sep 17 00:00:00 2001 From: bastien ollier Date: Fri, 21 Jun 2024 15:57:34 +0200 Subject: [PATCH 01/10] add stats --- frontend/clusters.py | 63 +++++++++++++++++++++++++++++ frontend/pages/clustering_dbscan.py | 11 ++--- frontend/pages/clustering_kmeans.py | 14 +++---- 3 files changed, 74 insertions(+), 14 deletions(-) create mode 100644 frontend/clusters.py diff --git a/frontend/clusters.py b/frontend/clusters.py new file mode 100644 index 0000000..ac2af4c --- /dev/null +++ b/frontend/clusters.py @@ -0,0 +1,63 @@ +from sklearn.cluster import DBSCAN, KMeans +import numpy as np + +class DBSCAN_cluster(): + def __init__(self, eps, min_samples,data): + self.eps = eps + self.min_samples = min_samples + self.data = data + self.labels = np.array([]) + + def run(self): + dbscan = DBSCAN(eps=self.eps, min_samples=self.min_samples) + self.labels = dbscan.fit_predict(self.data) + return self.labels + + def get_stats(self): + unique_labels = np.unique(self.labels) + stats = [] + for label in unique_labels: + if label == -1: + continue + cluster_points = self.data[self.labels == label] + num_points = len(cluster_points) + density = num_points / (np.max(cluster_points, axis=0) - np.min(cluster_points, axis=0)).prod() + stats.append({ + "cluster": label, + "num_points": num_points, + "density": density + }) + + return stats + + +class KMeans_cluster(): + def __init__(self, n_clusters, n_init, max_iter, data): + self.n_clusters = n_clusters + self.n_init = n_init + self.max_iter = max_iter + self.data = data + self.labels = np.array([]) + self.centers = [] + + def run(self): + kmeans = KMeans(n_clusters=self.n_clusters, init="random", n_init=self.n_init, max_iter=self.max_iter, random_state=111) + self.labels = kmeans.fit_predict(self.data) + self.centers = kmeans.cluster_centers_ + return self.labels + + + def get_stats(self): + unique_labels = np.unique(self.labels) + stats = [] + + for label in unique_labels: + cluster_points = self.data[self.labels == label] + num_points = len(cluster_points) + center = self.centers[label] + stats.append({ + 'cluster': label, + 'num_points': num_points, + 'center': center + }) + return stats diff --git a/frontend/pages/clustering_dbscan.py b/frontend/pages/clustering_dbscan.py index d06b10a..7ca16f6 100644 --- a/frontend/pages/clustering_dbscan.py +++ b/frontend/pages/clustering_dbscan.py @@ -1,10 +1,9 @@ import streamlit as st import matplotlib.pyplot as plt -from sklearn.cluster import DBSCAN +from clusters import DBSCAN_cluster st.header("Clustering: dbscan") - if "data" in st.session_state: data = st.session_state.data @@ -17,8 +16,9 @@ if "data" in st.session_state: if len(data_name) >= 2 and len(data_name) <=3: x = data[data_name].to_numpy() - dbscan = DBSCAN(eps=eps, min_samples=min_samples) - y_dbscan = dbscan.fit_predict(x) + dbscan = DBSCAN_cluster(eps,min_samples,x) + y_dbscan = dbscan.run() + st.table(dbscan.get_stats()) fig = plt.figure() if len(data_name) == 2: @@ -28,8 +28,5 @@ if "data" in st.session_state: ax = fig.add_subplot(projection='3d') ax.scatter(x[:, 0], x[:, 1],x[:, 2], c=y_dbscan, s=50, cmap="viridis") st.pyplot(fig) - - - else: st.error("file not loaded") \ No newline at end of file diff --git a/frontend/pages/clustering_kmeans.py b/frontend/pages/clustering_kmeans.py index c61bf40..63c7d55 100644 --- a/frontend/pages/clustering_kmeans.py +++ b/frontend/pages/clustering_kmeans.py @@ -1,10 +1,9 @@ import streamlit as st -from sklearn.cluster import KMeans import matplotlib.pyplot as plt +from clusters import KMeans_cluster st.header("Clustering: kmeans") - if "data" in st.session_state: data = st.session_state.data @@ -23,21 +22,22 @@ if "data" in st.session_state: if len(data_name) >= 2 and len(data_name) <=3: x = data[data_name].to_numpy() - kmeans = KMeans(n_clusters=n_clusters, init="random", n_init=n_init, max_iter=max_iter, random_state=111) - y_kmeans = kmeans.fit_predict(x) + kmeans = KMeans_cluster(n_clusters, n_init, max_iter, x) + y_kmeans = kmeans.run() + st.table(kmeans.get_stats()) + + centers = kmeans.centers fig = plt.figure() if len(data_name) == 2: ax = fig.add_subplot(projection='rectilinear') plt.scatter(x[:, 0], x[:, 1], c=y_kmeans, s=50, cmap="viridis") - centers = kmeans.cluster_centers_ plt.scatter(centers[:, 0], centers[:, 1], c="black", s=200, marker="X") else: ax = fig.add_subplot(projection='3d') ax.scatter(x[:, 0], x[:, 1],x[:, 2], c=y_kmeans, s=50, cmap="viridis") - centers = kmeans.cluster_centers_ - ax.scatter(centers[:, 0], centers[:, 1],centers[:, 2], c="black", s=200, marker="X") + ax.scatter(centers[:, 0], centers[:, 1], centers[:, 2], c="black", s=200, marker="X") st.pyplot(fig) else: -- 2.43.0 From 9da6e2d594688346d398f9fb8bf2e6ee5684050b Mon Sep 17 00:00:00 2001 From: Bastien OLLIER Date: Tue, 25 Jun 2024 08:37:38 +0200 Subject: [PATCH 02/10] Add cluster stats (#13) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: bastien ollier Reviewed-on: https://codefirst.iut.uca.fr/git/clement.freville2/miner/pulls/13 Reviewed-by: Hugo PRADIER Reviewed-by: Clément FRÉVILLE Co-authored-by: Bastien OLLIER Co-committed-by: Bastien OLLIER --- frontend/clusters.py | 63 +++++++++++++++++++++++++++++ frontend/pages/clustering_dbscan.py | 11 ++--- frontend/pages/clustering_kmeans.py | 14 +++---- 3 files changed, 74 insertions(+), 14 deletions(-) create mode 100644 frontend/clusters.py diff --git a/frontend/clusters.py b/frontend/clusters.py new file mode 100644 index 0000000..ac2af4c --- /dev/null +++ b/frontend/clusters.py @@ -0,0 +1,63 @@ +from sklearn.cluster import DBSCAN, KMeans +import numpy as np + +class DBSCAN_cluster(): + def __init__(self, eps, min_samples,data): + self.eps = eps + self.min_samples = min_samples + self.data = data + self.labels = np.array([]) + + def run(self): + dbscan = DBSCAN(eps=self.eps, min_samples=self.min_samples) + self.labels = dbscan.fit_predict(self.data) + return self.labels + + def get_stats(self): + unique_labels = np.unique(self.labels) + stats = [] + for label in unique_labels: + if label == -1: + continue + cluster_points = self.data[self.labels == label] + num_points = len(cluster_points) + density = num_points / (np.max(cluster_points, axis=0) - np.min(cluster_points, axis=0)).prod() + stats.append({ + "cluster": label, + "num_points": num_points, + "density": density + }) + + return stats + + +class KMeans_cluster(): + def __init__(self, n_clusters, n_init, max_iter, data): + self.n_clusters = n_clusters + self.n_init = n_init + self.max_iter = max_iter + self.data = data + self.labels = np.array([]) + self.centers = [] + + def run(self): + kmeans = KMeans(n_clusters=self.n_clusters, init="random", n_init=self.n_init, max_iter=self.max_iter, random_state=111) + self.labels = kmeans.fit_predict(self.data) + self.centers = kmeans.cluster_centers_ + return self.labels + + + def get_stats(self): + unique_labels = np.unique(self.labels) + stats = [] + + for label in unique_labels: + cluster_points = self.data[self.labels == label] + num_points = len(cluster_points) + center = self.centers[label] + stats.append({ + 'cluster': label, + 'num_points': num_points, + 'center': center + }) + return stats diff --git a/frontend/pages/clustering_dbscan.py b/frontend/pages/clustering_dbscan.py index d06b10a..7ca16f6 100644 --- a/frontend/pages/clustering_dbscan.py +++ b/frontend/pages/clustering_dbscan.py @@ -1,10 +1,9 @@ import streamlit as st import matplotlib.pyplot as plt -from sklearn.cluster import DBSCAN +from clusters import DBSCAN_cluster st.header("Clustering: dbscan") - if "data" in st.session_state: data = st.session_state.data @@ -17,8 +16,9 @@ if "data" in st.session_state: if len(data_name) >= 2 and len(data_name) <=3: x = data[data_name].to_numpy() - dbscan = DBSCAN(eps=eps, min_samples=min_samples) - y_dbscan = dbscan.fit_predict(x) + dbscan = DBSCAN_cluster(eps,min_samples,x) + y_dbscan = dbscan.run() + st.table(dbscan.get_stats()) fig = plt.figure() if len(data_name) == 2: @@ -28,8 +28,5 @@ if "data" in st.session_state: ax = fig.add_subplot(projection='3d') ax.scatter(x[:, 0], x[:, 1],x[:, 2], c=y_dbscan, s=50, cmap="viridis") st.pyplot(fig) - - - else: st.error("file not loaded") \ No newline at end of file diff --git a/frontend/pages/clustering_kmeans.py b/frontend/pages/clustering_kmeans.py index c61bf40..63c7d55 100644 --- a/frontend/pages/clustering_kmeans.py +++ b/frontend/pages/clustering_kmeans.py @@ -1,10 +1,9 @@ import streamlit as st -from sklearn.cluster import KMeans import matplotlib.pyplot as plt +from clusters import KMeans_cluster st.header("Clustering: kmeans") - if "data" in st.session_state: data = st.session_state.data @@ -23,21 +22,22 @@ if "data" in st.session_state: if len(data_name) >= 2 and len(data_name) <=3: x = data[data_name].to_numpy() - kmeans = KMeans(n_clusters=n_clusters, init="random", n_init=n_init, max_iter=max_iter, random_state=111) - y_kmeans = kmeans.fit_predict(x) + kmeans = KMeans_cluster(n_clusters, n_init, max_iter, x) + y_kmeans = kmeans.run() + st.table(kmeans.get_stats()) + + centers = kmeans.centers fig = plt.figure() if len(data_name) == 2: ax = fig.add_subplot(projection='rectilinear') plt.scatter(x[:, 0], x[:, 1], c=y_kmeans, s=50, cmap="viridis") - centers = kmeans.cluster_centers_ plt.scatter(centers[:, 0], centers[:, 1], c="black", s=200, marker="X") else: ax = fig.add_subplot(projection='3d') ax.scatter(x[:, 0], x[:, 1],x[:, 2], c=y_kmeans, s=50, cmap="viridis") - centers = kmeans.cluster_centers_ - ax.scatter(centers[:, 0], centers[:, 1],centers[:, 2], c="black", s=200, marker="X") + ax.scatter(centers[:, 0], centers[:, 1], centers[:, 2], c="black", s=200, marker="X") st.pyplot(fig) else: -- 2.43.0 From 01168f3588d9449ad92d36333635334642b8cef8 Mon Sep 17 00:00:00 2001 From: bastien Date: Tue, 25 Jun 2024 18:06:30 +0200 Subject: [PATCH 03/10] add visu to prediction regression --- frontend/pages/prediction_regression.py | 31 +++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/frontend/pages/prediction_regression.py b/frontend/pages/prediction_regression.py index 377274e..42acf34 100644 --- a/frontend/pages/prediction_regression.py +++ b/frontend/pages/prediction_regression.py @@ -1,6 +1,8 @@ import streamlit as st from sklearn.linear_model import LinearRegression import pandas as pd +import matplotlib.pyplot as plt +import numpy as np st.header("Prediction: Regression") @@ -25,5 +27,34 @@ if "data" in st.session_state: prediction = model.predict(pd.DataFrame([pred_values], columns=data_name)) st.write("Prediction:", prediction[0]) + + fig = plt.figure() + dataframe_sorted = pd.concat([X, y], axis=1).sort_values(by=data_name) + + if len(data_name) == 1: + X = dataframe_sorted[data_name[0]] + y = dataframe_sorted[target_name] + + prediction_array_y = [ + model.predict(pd.DataFrame([[dataframe_sorted[data_name[0]].iloc[i]]], columns=data_name))[0] + for i in range(dataframe_sorted.shape[0]) + ] + + plt.scatter(dataframe_sorted[data_name[0]], dataframe_sorted[target_name], color='b') + plt.scatter(dataframe_sorted[data_name[0]], prediction_array_y, color='r') + else: + ax = fig.add_subplot(111, projection='3d') + + prediction_array_y = [ + model.predict(pd.DataFrame([[dataframe_sorted[data_name[0]].iloc[i], dataframe_sorted[data_name[1]].iloc[i]]], columns=data_name))[0] + for i in range(dataframe_sorted.shape[0]) + ] + + ax.scatter(dataframe_sorted[data_name[0]], dataframe_sorted[data_name[1]], dataframe_sorted[target_name], color='b') + ax.scatter(dataframe_sorted[data_name[0]], dataframe_sorted[data_name[1]], prediction_array_y, color='r') + + st.pyplot(fig) + + else: st.error("File not loaded") -- 2.43.0 From 405439564147fc6430b9a933e156fe1e480372da Mon Sep 17 00:00:00 2001 From: bastien Date: Tue, 25 Jun 2024 19:54:35 +0200 Subject: [PATCH 04/10] update --- frontend/pages/prediction_classification.py | 23 +++++++++++++++++++++ frontend/pages/prediction_regression.py | 7 +++---- 2 files changed, 26 insertions(+), 4 deletions(-) diff --git a/frontend/pages/prediction_classification.py b/frontend/pages/prediction_classification.py index 5aaf52f..20ae5e1 100644 --- a/frontend/pages/prediction_classification.py +++ b/frontend/pages/prediction_classification.py @@ -4,6 +4,8 @@ from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score from sklearn.preprocessing import LabelEncoder import pandas as pd +import matplotlib.pyplot as plt + st.header("Prediction: Classification") @@ -60,5 +62,26 @@ if "data" in st.session_state: prediction = label_encoders[target_name].inverse_transform(prediction) st.write("Prediction:", prediction[0]) + + + + + fig = plt.figure() + dataframe_sorted = pd.concat([X, y], axis=1).sort_values(by=data_name) + + X = dataframe_sorted[data_name[0]] + y = dataframe_sorted[target_name] + + prediction_array_y = [ + model.predict(pd.DataFrame([[dataframe_sorted[data_name[0]].iloc[i]]], columns=data_name))[0] + for i in range(dataframe_sorted.shape[0]) + ] + + plt.scatter(dataframe_sorted[data_name[0]], dataframe_sorted[target_name], color='b') + plt.scatter(dataframe_sorted[data_name[0]], prediction_array_y, color='r') + + st.pyplot(fig) + + else: st.error("File not loaded") diff --git a/frontend/pages/prediction_regression.py b/frontend/pages/prediction_regression.py index 42acf34..6d125e0 100644 --- a/frontend/pages/prediction_regression.py +++ b/frontend/pages/prediction_regression.py @@ -41,8 +41,8 @@ if "data" in st.session_state: ] plt.scatter(dataframe_sorted[data_name[0]], dataframe_sorted[target_name], color='b') - plt.scatter(dataframe_sorted[data_name[0]], prediction_array_y, color='r') - else: + plt.plot(dataframe_sorted[data_name[0]], prediction_array_y, color='r') + elif len(data_name) == 2: ax = fig.add_subplot(111, projection='3d') prediction_array_y = [ @@ -51,10 +51,9 @@ if "data" in st.session_state: ] ax.scatter(dataframe_sorted[data_name[0]], dataframe_sorted[data_name[1]], dataframe_sorted[target_name], color='b') - ax.scatter(dataframe_sorted[data_name[0]], dataframe_sorted[data_name[1]], prediction_array_y, color='r') + ax.plot(dataframe_sorted[data_name[0]], dataframe_sorted[data_name[1]], prediction_array_y, color='r') st.pyplot(fig) - else: st.error("File not loaded") -- 2.43.0 From 27e69b2af8b4dfd1adb7c5441f8e0a713d1192d4 Mon Sep 17 00:00:00 2001 From: bastien ollier Date: Wed, 26 Jun 2024 10:45:50 +0200 Subject: [PATCH 05/10] add confusion_matrix --- frontend/pages/prediction_classification.py | 26 +++++++++------------ frontend/pages/prediction_regression.py | 2 +- 2 files changed, 12 insertions(+), 16 deletions(-) diff --git a/frontend/pages/prediction_classification.py b/frontend/pages/prediction_classification.py index 20ae5e1..c11d7ee 100644 --- a/frontend/pages/prediction_classification.py +++ b/frontend/pages/prediction_classification.py @@ -1,11 +1,11 @@ import streamlit as st from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split -from sklearn.metrics import accuracy_score +from sklearn.metrics import accuracy_score,confusion_matrix from sklearn.preprocessing import LabelEncoder import pandas as pd import matplotlib.pyplot as plt - +import seaborn as sns st.header("Prediction: Classification") @@ -63,24 +63,20 @@ if "data" in st.session_state: st.write("Prediction:", prediction[0]) + if len(data_name) == 1: + fig = plt.figure() + y_pred = [model.predict(pd.DataFrame([pred_value[0]], columns=data_name)) for pred_value in X.values.tolist()] + print([x[0] for x in X.values.tolist()]) + cm = confusion_matrix(y, y_pred) + sns.heatmap(cm, annot=True, fmt="d") - fig = plt.figure() - dataframe_sorted = pd.concat([X, y], axis=1).sort_values(by=data_name) + plt.xlabel('Predicted') + plt.ylabel('True') - X = dataframe_sorted[data_name[0]] - y = dataframe_sorted[target_name] + st.pyplot(fig) - prediction_array_y = [ - model.predict(pd.DataFrame([[dataframe_sorted[data_name[0]].iloc[i]]], columns=data_name))[0] - for i in range(dataframe_sorted.shape[0]) - ] - - plt.scatter(dataframe_sorted[data_name[0]], dataframe_sorted[target_name], color='b') - plt.scatter(dataframe_sorted[data_name[0]], prediction_array_y, color='r') - - st.pyplot(fig) else: diff --git a/frontend/pages/prediction_regression.py b/frontend/pages/prediction_regression.py index 6d125e0..e06fa12 100644 --- a/frontend/pages/prediction_regression.py +++ b/frontend/pages/prediction_regression.py @@ -2,7 +2,6 @@ import streamlit as st from sklearn.linear_model import LinearRegression import pandas as pd import matplotlib.pyplot as plt -import numpy as np st.header("Prediction: Regression") @@ -31,6 +30,7 @@ if "data" in st.session_state: fig = plt.figure() dataframe_sorted = pd.concat([X, y], axis=1).sort_values(by=data_name) + if len(data_name) == 1: X = dataframe_sorted[data_name[0]] y = dataframe_sorted[target_name] -- 2.43.0 From da1e97f07f3dca68e0762700ba4a1b79f023dad3 Mon Sep 17 00:00:00 2001 From: bastien ollier Date: Wed, 26 Jun 2024 10:59:25 +0200 Subject: [PATCH 06/10] add r2 score --- frontend/pages/prediction_classification.py | 6 +----- frontend/pages/prediction_regression.py | 5 +++++ 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/frontend/pages/prediction_classification.py b/frontend/pages/prediction_classification.py index c11d7ee..bb6bb22 100644 --- a/frontend/pages/prediction_classification.py +++ b/frontend/pages/prediction_classification.py @@ -67,7 +67,6 @@ if "data" in st.session_state: fig = plt.figure() y_pred = [model.predict(pd.DataFrame([pred_value[0]], columns=data_name)) for pred_value in X.values.tolist()] - print([x[0] for x in X.values.tolist()]) cm = confusion_matrix(y, y_pred) sns.heatmap(cm, annot=True, fmt="d") @@ -75,9 +74,6 @@ if "data" in st.session_state: plt.xlabel('Predicted') plt.ylabel('True') - st.pyplot(fig) - - - + st.pyplot(fig, figsize=(1, 1)) else: st.error("File not loaded") diff --git a/frontend/pages/prediction_regression.py b/frontend/pages/prediction_regression.py index e06fa12..35b648d 100644 --- a/frontend/pages/prediction_regression.py +++ b/frontend/pages/prediction_regression.py @@ -1,5 +1,6 @@ import streamlit as st from sklearn.linear_model import LinearRegression +from sklearn.metrics import r2_score import pandas as pd import matplotlib.pyplot as plt @@ -21,6 +22,10 @@ if "data" in st.session_state: model = LinearRegression() model.fit(X, y) + y_pred = [model.predict(pd.DataFrame([pred_value[0]], columns=data_name)) for pred_value in X.values.tolist()] + r2 = r2_score(y, y_pred) + st.write('R-squared score:', r2) + st.subheader("Enter values for prediction") pred_values = [st.number_input(f"Value for {feature}", value=0.0) for feature in data_name] prediction = model.predict(pd.DataFrame([pred_values], columns=data_name)) -- 2.43.0 From 9bc9e21e45412fca2f2ffce6230cb61573ad928c Mon Sep 17 00:00:00 2001 From: bastien ollier Date: Wed, 26 Jun 2024 11:05:04 +0200 Subject: [PATCH 07/10] add r2 score --- frontend/pages/prediction_regression.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/frontend/pages/prediction_regression.py b/frontend/pages/prediction_regression.py index 35b648d..a290c10 100644 --- a/frontend/pages/prediction_regression.py +++ b/frontend/pages/prediction_regression.py @@ -22,10 +22,6 @@ if "data" in st.session_state: model = LinearRegression() model.fit(X, y) - y_pred = [model.predict(pd.DataFrame([pred_value[0]], columns=data_name)) for pred_value in X.values.tolist()] - r2 = r2_score(y, y_pred) - st.write('R-squared score:', r2) - st.subheader("Enter values for prediction") pred_values = [st.number_input(f"Value for {feature}", value=0.0) for feature in data_name] prediction = model.predict(pd.DataFrame([pred_values], columns=data_name)) @@ -35,8 +31,11 @@ if "data" in st.session_state: fig = plt.figure() dataframe_sorted = pd.concat([X, y], axis=1).sort_values(by=data_name) - if len(data_name) == 1: + y_pred = [model.predict(pd.DataFrame([pred_value[0]], columns=data_name)) for pred_value in X.values.tolist()] + r2 = r2_score(y, y_pred) + st.write('R-squared score:', r2) + X = dataframe_sorted[data_name[0]] y = dataframe_sorted[target_name] -- 2.43.0 From 01ef19a2f80888d559a8fff910039bc0b057c2bc Mon Sep 17 00:00:00 2001 From: clfreville2 Date: Wed, 26 Jun 2024 12:00:21 +0200 Subject: [PATCH 08/10] Merge files using strategies --- frontend/clusters.py | 78 ++++++++++++++++++----------- frontend/pages/clustering.py | 48 ++++++++++++++++++ frontend/pages/clustering_dbscan.py | 32 ------------ frontend/pages/clustering_kmeans.py | 44 ---------------- 4 files changed, 97 insertions(+), 105 deletions(-) create mode 100644 frontend/pages/clustering.py delete mode 100644 frontend/pages/clustering_dbscan.py delete mode 100644 frontend/pages/clustering_kmeans.py diff --git a/frontend/clusters.py b/frontend/clusters.py index ac2af4c..20e1927 100644 --- a/frontend/clusters.py +++ b/frontend/clusters.py @@ -1,25 +1,40 @@ from sklearn.cluster import DBSCAN, KMeans import numpy as np +from dataclasses import dataclass +from abc import ABC, abstractmethod +from typing import Any, Optional -class DBSCAN_cluster(): - def __init__(self, eps, min_samples,data): +@dataclass +class ClusterResult: + labels: np.array + centers: Optional[np.array] + statistics: list[dict[str, Any]] + + +class Cluster(ABC): + @abstractmethod + def run(self, data: np.array) -> ClusterResult: + pass + + +class DBSCANCluster(Cluster): + def __init__(self, eps: float = 0.5, min_samples: int = 5): self.eps = eps self.min_samples = min_samples - self.data = data - self.labels = np.array([]) - def run(self): + #@typing.override + def run(self, data: np.array) -> ClusterResult: dbscan = DBSCAN(eps=self.eps, min_samples=self.min_samples) - self.labels = dbscan.fit_predict(self.data) - return self.labels + labels = dbscan.fit_predict(data) + return ClusterResult(labels, None, self.get_statistics(data, labels)) - def get_stats(self): - unique_labels = np.unique(self.labels) + def get_statistics(self, data: np.array, labels: np.array) -> list[dict[str, Any]]: + unique_labels = np.unique(labels) stats = [] for label in unique_labels: if label == -1: continue - cluster_points = self.data[self.labels == label] + cluster_points = data[labels == label] num_points = len(cluster_points) density = num_points / (np.max(cluster_points, axis=0) - np.min(cluster_points, axis=0)).prod() stats.append({ @@ -27,37 +42,42 @@ class DBSCAN_cluster(): "num_points": num_points, "density": density }) - return stats - -class KMeans_cluster(): - def __init__(self, n_clusters, n_init, max_iter, data): + def __str__(self) -> str: + return "DBScan" + + +class KMeansCluster(Cluster): + def __init__(self, n_clusters: int = 8, n_init: int = 1, max_iter: int = 300): self.n_clusters = n_clusters self.n_init = n_init self.max_iter = max_iter - self.data = data - self.labels = np.array([]) - self.centers = [] - def run(self): + #@typing.override + def run(self, data: np.array) -> ClusterResult: kmeans = KMeans(n_clusters=self.n_clusters, init="random", n_init=self.n_init, max_iter=self.max_iter, random_state=111) - self.labels = kmeans.fit_predict(self.data) - self.centers = kmeans.cluster_centers_ - return self.labels + labels = kmeans.fit_predict(data) + centers = kmeans.cluster_centers_ + return ClusterResult(labels, centers, self.get_statistics(data, labels, centers)) - - def get_stats(self): - unique_labels = np.unique(self.labels) + def get_statistics(self, data: np.array, labels: np.array, centers: np.array) -> list[dict[str, Any]]: + unique_labels = np.unique(labels) stats = [] for label in unique_labels: - cluster_points = self.data[self.labels == label] + cluster_points = data[labels == label] num_points = len(cluster_points) - center = self.centers[label] + center = centers[label] stats.append({ - 'cluster': label, - 'num_points': num_points, - 'center': center + "cluster": label, + "num_points": num_points, + "center": center, }) return stats + + def __str__(self) -> str: + return "KMeans" + + +CLUSTERING_STRATEGIES = [DBSCANCluster(), KMeansCluster()] diff --git a/frontend/pages/clustering.py b/frontend/pages/clustering.py new file mode 100644 index 0000000..b3bf971 --- /dev/null +++ b/frontend/pages/clustering.py @@ -0,0 +1,48 @@ +import streamlit as st +import matplotlib.pyplot as plt +from clusters import DBSCANCluster, KMeansCluster, CLUSTERING_STRATEGIES + +st.header("Clustering") + +if "data" in st.session_state: + data = st.session_state.data + + general_row = st.columns([1, 1]) + clustering = general_row[0].selectbox("Clustering method", CLUSTERING_STRATEGIES) + data_name = general_row[1].multiselect("Data Name",data.select_dtypes(include="number").columns, max_selections=3) + + with st.form("cluster_form"): + if isinstance(clustering, KMeansCluster): + row1 = st.columns([1, 1, 1]) + clustering.n_clusters = row1[0].number_input("Number of clusters", min_value=1, max_value=data.shape[0], value=clustering.n_clusters) + clustering.n_init = row1[1].number_input("n_init", min_value=1, value=clustering.n_init) + clustering.max_iter = row1[2].number_input("max_iter", min_value=1, value=clustering.max_iter) + elif isinstance(clustering, DBSCANCluster): + clustering.eps = st.slider("eps", min_value=0.0001, max_value=1.0, step=0.1, value=clustering.eps) + clustering.min_samples = st.number_input("min_samples", min_value=1, value=clustering.min_samples) + + st.form_submit_button("Launch") + + if len(data_name) >= 2 and len(data_name) <=3: + x = data[data_name].to_numpy() + + result = clustering.run(x) + + st.table(result.statistics) + + fig = plt.figure() + if len(data_name) == 2: + ax = fig.add_subplot(projection='rectilinear') + plt.scatter(x[:, 0], x[:, 1], c=result.labels, s=50, cmap="viridis") + if result.centers is not None: + plt.scatter(result.centers[:, 0], result.centers[:, 1], c="black", s=200, marker="X") + else: + ax = fig.add_subplot(projection='3d') + + ax.scatter(x[:, 0], x[:, 1],x[:, 2], c=result.labels, s=50, cmap="viridis") + if result.centers is not None: + ax.scatter(result.centers[:, 0], result.centers[:, 1], result.centers[:, 2], c="black", s=200, marker="X") + st.pyplot(fig) + +else: + st.error("file not loaded") diff --git a/frontend/pages/clustering_dbscan.py b/frontend/pages/clustering_dbscan.py deleted file mode 100644 index 7ca16f6..0000000 --- a/frontend/pages/clustering_dbscan.py +++ /dev/null @@ -1,32 +0,0 @@ -import streamlit as st -import matplotlib.pyplot as plt -from clusters import DBSCAN_cluster - -st.header("Clustering: dbscan") - -if "data" in st.session_state: - data = st.session_state.data - - with st.form("my_form"): - data_name = st.multiselect("Data Name", data.select_dtypes(include="number").columns, max_selections=3) - eps = st.slider("eps", min_value=0.0, max_value=1.0, value=0.5, step=0.01) - min_samples = st.number_input("min_samples", step=1, min_value=1, value=5) - st.form_submit_button("launch") - - if len(data_name) >= 2 and len(data_name) <=3: - x = data[data_name].to_numpy() - - dbscan = DBSCAN_cluster(eps,min_samples,x) - y_dbscan = dbscan.run() - st.table(dbscan.get_stats()) - - fig = plt.figure() - if len(data_name) == 2: - ax = fig.add_subplot(projection='rectilinear') - plt.scatter(x[:, 0], x[:, 1], c=y_dbscan, s=50, cmap="viridis") - else: - ax = fig.add_subplot(projection='3d') - ax.scatter(x[:, 0], x[:, 1],x[:, 2], c=y_dbscan, s=50, cmap="viridis") - st.pyplot(fig) -else: - st.error("file not loaded") \ No newline at end of file diff --git a/frontend/pages/clustering_kmeans.py b/frontend/pages/clustering_kmeans.py deleted file mode 100644 index 63c7d55..0000000 --- a/frontend/pages/clustering_kmeans.py +++ /dev/null @@ -1,44 +0,0 @@ -import streamlit as st -import matplotlib.pyplot as plt -from clusters import KMeans_cluster - -st.header("Clustering: kmeans") - -if "data" in st.session_state: - data = st.session_state.data - - with st.form("my_form"): - row1 = st.columns([1,1,1]) - n_clusters = row1[0].selectbox("Number of clusters", range(1,data.shape[0])) - data_name = row1[1].multiselect("Data Name",data.select_dtypes(include="number").columns, max_selections=3) - n_init = row1[2].number_input("n_init",step=1,min_value=1) - - row2 = st.columns([1,1]) - max_iter = row1[0].number_input("max_iter",step=1,min_value=1) - - - st.form_submit_button("launch") - - if len(data_name) >= 2 and len(data_name) <=3: - x = data[data_name].to_numpy() - - kmeans = KMeans_cluster(n_clusters, n_init, max_iter, x) - y_kmeans = kmeans.run() - - st.table(kmeans.get_stats()) - - centers = kmeans.centers - fig = plt.figure() - if len(data_name) == 2: - ax = fig.add_subplot(projection='rectilinear') - plt.scatter(x[:, 0], x[:, 1], c=y_kmeans, s=50, cmap="viridis") - plt.scatter(centers[:, 0], centers[:, 1], c="black", s=200, marker="X") - else: - ax = fig.add_subplot(projection='3d') - - ax.scatter(x[:, 0], x[:, 1],x[:, 2], c=y_kmeans, s=50, cmap="viridis") - ax.scatter(centers[:, 0], centers[:, 1], centers[:, 2], c="black", s=200, marker="X") - st.pyplot(fig) - -else: - st.error("file not loaded") -- 2.43.0 From 7cb0d559699de6c6b4285a9af8ad23a674dbec37 Mon Sep 17 00:00:00 2001 From: clfreville2 Date: Wed, 26 Jun 2024 20:45:55 +0200 Subject: [PATCH 09/10] Allow using PCA to reduce dataset dimensions --- frontend/pages/clustering.py | 52 +++++++++++++++++++++++++++++++----- 1 file changed, 45 insertions(+), 7 deletions(-) diff --git a/frontend/pages/clustering.py b/frontend/pages/clustering.py index b3bf971..2c2fb8e 100644 --- a/frontend/pages/clustering.py +++ b/frontend/pages/clustering.py @@ -1,15 +1,19 @@ import streamlit as st import matplotlib.pyplot as plt from clusters import DBSCANCluster, KMeansCluster, CLUSTERING_STRATEGIES +from sklearn.decomposition import PCA +from sklearn.metrics import silhouette_score +import numpy as np st.header("Clustering") if "data" in st.session_state: data = st.session_state.data - general_row = st.columns([1, 1]) + general_row = st.columns([1, 1, 1]) clustering = general_row[0].selectbox("Clustering method", CLUSTERING_STRATEGIES) - data_name = general_row[1].multiselect("Data Name",data.select_dtypes(include="number").columns, max_selections=3) + data_name = general_row[1].multiselect("Columns", data.select_dtypes(include="number").columns) + n_components = general_row[2].number_input("Reduce dimensions to (PCA)", min_value=1, max_value=3, value=2) with st.form("cluster_form"): if isinstance(clustering, KMeansCluster): @@ -18,20 +22,50 @@ if "data" in st.session_state: clustering.n_init = row1[1].number_input("n_init", min_value=1, value=clustering.n_init) clustering.max_iter = row1[2].number_input("max_iter", min_value=1, value=clustering.max_iter) elif isinstance(clustering, DBSCANCluster): - clustering.eps = st.slider("eps", min_value=0.0001, max_value=1.0, step=0.1, value=clustering.eps) - clustering.min_samples = st.number_input("min_samples", min_value=1, value=clustering.min_samples) + row1 = st.columns([1, 1]) + clustering.eps = row1[0].slider("eps", min_value=0.0001, max_value=1.0, step=0.05, value=clustering.eps) + clustering.min_samples = row1[1].number_input("min_samples", min_value=1, value=clustering.min_samples) st.form_submit_button("Launch") - if len(data_name) >= 2 and len(data_name) <=3: + if len(data_name) > 0: x = data[data_name].to_numpy() + n_components = min(n_components, len(data_name)) + if len(data_name) > n_components: + pca = PCA(n_components) + x = pca.fit_transform(x) + if n_components == 2: + (fig, ax) = plt.subplots(figsize=(8, 8)) + for i in range(0, pca.components_.shape[1]): + ax.arrow( + 0, + 0, + pca.components_[0, i], + pca.components_[1, i], + head_width=0.1, + head_length=0.1 + ) + + plt.text( + pca.components_[0, i] + 0.05, + pca.components_[1, i] + 0.05, + data_name[i] + ) + circle = plt.Circle((0, 0), radius=1, edgecolor='b', facecolor='None') + ax.add_patch(circle) + plt.axis("equal") + ax.set_title("PCA result - Correlation circle") + st.pyplot(fig) result = clustering.run(x) - + st.write("## Cluster stats") st.table(result.statistics) + st.write("## Graphical representation") fig = plt.figure() - if len(data_name) == 2: + if n_components == 1: + plt.scatter(x, np.zeros_like(x)) + elif n_components == 2: ax = fig.add_subplot(projection='rectilinear') plt.scatter(x[:, 0], x[:, 1], c=result.labels, s=50, cmap="viridis") if result.centers is not None: @@ -43,6 +77,10 @@ if "data" in st.session_state: if result.centers is not None: ax.scatter(result.centers[:, 0], result.centers[:, 1], result.centers[:, 2], c="black", s=200, marker="X") st.pyplot(fig) + if not (result.labels == 0).all(): + st.write("Silhouette score:", silhouette_score(x, result.labels)) + else: + st.error("Select at least one column") else: st.error("file not loaded") -- 2.43.0 From f464f6166ae0cd045df8b7a064af26fcd5e66f74 Mon Sep 17 00:00:00 2001 From: gorky1234 Date: Wed, 26 Jun 2024 21:31:01 +0200 Subject: [PATCH 10/10] corection bug figsize --- frontend/pages/prediction_classification.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/pages/prediction_classification.py b/frontend/pages/prediction_classification.py index bb6bb22..648db06 100644 --- a/frontend/pages/prediction_classification.py +++ b/frontend/pages/prediction_classification.py @@ -74,6 +74,6 @@ if "data" in st.session_state: plt.xlabel('Predicted') plt.ylabel('True') - st.pyplot(fig, figsize=(1, 1)) + st.pyplot(fig) else: st.error("File not loaded") -- 2.43.0