From fd3d6e3b0163c4848a6339c130b97f1781337b89 Mon Sep 17 00:00:00 2001 From: bastien ollier Date: Wed, 5 Jun 2024 09:51:48 +0200 Subject: [PATCH 01/38] debut merge --- frontend/{main.py => exploration.py} | 39 ++++++++++------------------ frontend/pages/visualization.py | 30 +++++++++++++++++++++ 2 files changed, 44 insertions(+), 25 deletions(-) rename frontend/{main.py => exploration.py} (52%) create mode 100644 frontend/pages/visualization.py diff --git a/frontend/main.py b/frontend/exploration.py similarity index 52% rename from frontend/main.py rename to frontend/exploration.py index 79b9a64..b00a56a 100644 --- a/frontend/main.py +++ b/frontend/exploration.py @@ -9,13 +9,24 @@ st.set_page_config( layout="wide" ) +st.title("Home") + ### Exploration uploaded_file = st.file_uploader("Upload your CSV file", type=["csv"]) -if uploaded_file: - data = pd.read_csv(uploaded_file) +if uploaded_file is not None: + st.session_state.data = pd.read_csv(uploaded_file) st.success("File loaded successfully!") +if "data" not in st.session_state: + st.session_state.data = None + + +if st.session_state.data is not None: + data = st.session_state.data + st.write(data.head(10)) + st.write(data.tail(10)) + st.header("Data Preview") st.subheader("First 5 Rows") @@ -39,26 +50,4 @@ if uploaded_file: st.subheader("Statistical Summary") st.write(data.describe()) - - ### Visualization - - st.header("Data Visualization") - - st.subheader("Histogram") - column_to_plot = st.selectbox("Select Column for Histogram", data.columns) - if column_to_plot: - fig, ax = plt.subplots() - ax.hist(data[column_to_plot].dropna(), bins=20, edgecolor='k') - ax.set_title(f'Histogram of {column_to_plot}') - ax.set_xlabel(column_to_plot) - ax.set_ylabel('Frequency') - st.pyplot(fig) - - st.subheader("Boxplot") - dataNumeric = data.select_dtypes(include='number') - column_to_plot = st.selectbox("Select Column for Boxplot", dataNumeric.columns) - if column_to_plot: - fig, ax = plt.subplots() - sns.boxplot(data=data, x=column_to_plot, ax=ax) - ax.set_title(f'Boxplot of {column_to_plot}') - st.pyplot(fig) \ No newline at end of file + \ No newline at end of file diff --git a/frontend/pages/visualization.py b/frontend/pages/visualization.py new file mode 100644 index 0000000..057b0c9 --- /dev/null +++ b/frontend/pages/visualization.py @@ -0,0 +1,30 @@ +import streamlit as st +import matplotlib.pyplot as plt +import seaborn as sns + +st.header("Data Visualization") + + +if "data" in st.session_state: + data = st.session_state.data + + st.subheader("Histogram") + column_to_plot = st.selectbox("Select Column for Histogram", data.columns) + if column_to_plot: + fig, ax = plt.subplots() + ax.hist(data[column_to_plot].dropna(), bins=20, edgecolor='k') + ax.set_title(f"Histogram of {column_to_plot}") + ax.set_xlabel(column_to_plot) + ax.set_ylabel("Frequency") + st.pyplot(fig) + + st.subheader("Boxplot") + dataNumeric = data.select_dtypes(include="number") + column_to_plot = st.selectbox("Select Column for Boxplot", dataNumeric.columns) + if column_to_plot: + fig, ax = plt.subplots() + sns.boxplot(data=data, x=column_to_plot, ax=ax) + ax.set_title(f"Boxplot of {column_to_plot}") + st.pyplot(fig) +else: + st.error("file not loaded") \ No newline at end of file -- 2.43.0 From c190656165a18932ae126bbab47385fdfac8aa89 Mon Sep 17 00:00:00 2001 From: bastien ollier Date: Fri, 7 Jun 2024 10:22:47 +0200 Subject: [PATCH 02/38] fix file not load --- frontend/exploration.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/frontend/exploration.py b/frontend/exploration.py index b00a56a..2becb9b 100644 --- a/frontend/exploration.py +++ b/frontend/exploration.py @@ -18,11 +18,8 @@ if uploaded_file is not None: st.session_state.data = pd.read_csv(uploaded_file) st.success("File loaded successfully!") -if "data" not in st.session_state: - st.session_state.data = None - -if st.session_state.data is not None: +if "data" in st.session_state: data = st.session_state.data st.write(data.head(10)) st.write(data.tail(10)) -- 2.43.0 From 6644d60fa271741caa9e91eecb654ba120b4127c Mon Sep 17 00:00:00 2001 From: clfreville2 Date: Fri, 7 Jun 2024 10:24:02 +0200 Subject: [PATCH 03/38] Remove unused imports --- frontend/exploration.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/frontend/exploration.py b/frontend/exploration.py index 2becb9b..123a22b 100644 --- a/frontend/exploration.py +++ b/frontend/exploration.py @@ -1,8 +1,5 @@ import pandas as pd import streamlit as st -import matplotlib.pyplot as plt -import seaborn as sns -from pandas.api.types import is_numeric_dtype st.set_page_config( page_title="Project Miner", @@ -47,4 +44,4 @@ if "data" in st.session_state: st.subheader("Statistical Summary") st.write(data.describe()) - \ No newline at end of file + -- 2.43.0 From ba1aef5727d106d95daea6826511d61e4284499f Mon Sep 17 00:00:00 2001 From: Bastien OLLIER Date: Fri, 7 Jun 2024 10:25:37 +0200 Subject: [PATCH 04/38] Add navigation (#2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: bastien ollier Co-authored-by: clfreville2 Reviewed-on: https://codefirst.iut.uca.fr/git/clement.freville2/miner/pulls/2 Reviewed-by: Clément FRÉVILLE Co-authored-by: Bastien OLLIER Co-committed-by: Bastien OLLIER --- frontend/exploration.py | 47 ++++++++++++++++++++++++ frontend/main.py | 64 --------------------------------- frontend/pages/visualization.py | 30 ++++++++++++++++ 3 files changed, 77 insertions(+), 64 deletions(-) create mode 100644 frontend/exploration.py delete mode 100644 frontend/main.py create mode 100644 frontend/pages/visualization.py diff --git a/frontend/exploration.py b/frontend/exploration.py new file mode 100644 index 0000000..123a22b --- /dev/null +++ b/frontend/exploration.py @@ -0,0 +1,47 @@ +import pandas as pd +import streamlit as st + +st.set_page_config( + page_title="Project Miner", + layout="wide" +) + +st.title("Home") + +### Exploration +uploaded_file = st.file_uploader("Upload your CSV file", type=["csv"]) + +if uploaded_file is not None: + st.session_state.data = pd.read_csv(uploaded_file) + st.success("File loaded successfully!") + + +if "data" in st.session_state: + data = st.session_state.data + st.write(data.head(10)) + st.write(data.tail(10)) + + st.header("Data Preview") + + st.subheader("First 5 Rows") + st.write(data.head()) + + st.subheader("Last 5 Rows") + st.write(data.tail()) + + st.header("Data Summary") + + st.subheader("Basic Information") + col1, col2 = st.columns(2) + col1.metric("Number of Rows", data.shape[0]) + col2.metric("Number of Columns", data.shape[1]) + + st.write(f"Column Names: {list(data.columns)}") + + st.subheader("Missing Values by Column") + missing_values = data.isnull().sum() + st.write(missing_values) + + st.subheader("Statistical Summary") + st.write(data.describe()) + diff --git a/frontend/main.py b/frontend/main.py deleted file mode 100644 index 79b9a64..0000000 --- a/frontend/main.py +++ /dev/null @@ -1,64 +0,0 @@ -import pandas as pd -import streamlit as st -import matplotlib.pyplot as plt -import seaborn as sns -from pandas.api.types import is_numeric_dtype - -st.set_page_config( - page_title="Project Miner", - layout="wide" -) - -### Exploration -uploaded_file = st.file_uploader("Upload your CSV file", type=["csv"]) - -if uploaded_file: - data = pd.read_csv(uploaded_file) - st.success("File loaded successfully!") - - st.header("Data Preview") - - st.subheader("First 5 Rows") - st.write(data.head()) - - st.subheader("Last 5 Rows") - st.write(data.tail()) - - st.header("Data Summary") - - st.subheader("Basic Information") - col1, col2 = st.columns(2) - col1.metric("Number of Rows", data.shape[0]) - col2.metric("Number of Columns", data.shape[1]) - - st.write(f"Column Names: {list(data.columns)}") - - st.subheader("Missing Values by Column") - missing_values = data.isnull().sum() - st.write(missing_values) - - st.subheader("Statistical Summary") - st.write(data.describe()) - - ### Visualization - - st.header("Data Visualization") - - st.subheader("Histogram") - column_to_plot = st.selectbox("Select Column for Histogram", data.columns) - if column_to_plot: - fig, ax = plt.subplots() - ax.hist(data[column_to_plot].dropna(), bins=20, edgecolor='k') - ax.set_title(f'Histogram of {column_to_plot}') - ax.set_xlabel(column_to_plot) - ax.set_ylabel('Frequency') - st.pyplot(fig) - - st.subheader("Boxplot") - dataNumeric = data.select_dtypes(include='number') - column_to_plot = st.selectbox("Select Column for Boxplot", dataNumeric.columns) - if column_to_plot: - fig, ax = plt.subplots() - sns.boxplot(data=data, x=column_to_plot, ax=ax) - ax.set_title(f'Boxplot of {column_to_plot}') - st.pyplot(fig) \ No newline at end of file diff --git a/frontend/pages/visualization.py b/frontend/pages/visualization.py new file mode 100644 index 0000000..057b0c9 --- /dev/null +++ b/frontend/pages/visualization.py @@ -0,0 +1,30 @@ +import streamlit as st +import matplotlib.pyplot as plt +import seaborn as sns + +st.header("Data Visualization") + + +if "data" in st.session_state: + data = st.session_state.data + + st.subheader("Histogram") + column_to_plot = st.selectbox("Select Column for Histogram", data.columns) + if column_to_plot: + fig, ax = plt.subplots() + ax.hist(data[column_to_plot].dropna(), bins=20, edgecolor='k') + ax.set_title(f"Histogram of {column_to_plot}") + ax.set_xlabel(column_to_plot) + ax.set_ylabel("Frequency") + st.pyplot(fig) + + st.subheader("Boxplot") + dataNumeric = data.select_dtypes(include="number") + column_to_plot = st.selectbox("Select Column for Boxplot", dataNumeric.columns) + if column_to_plot: + fig, ax = plt.subplots() + sns.boxplot(data=data, x=column_to_plot, ax=ax) + ax.set_title(f"Boxplot of {column_to_plot}") + st.pyplot(fig) +else: + st.error("file not loaded") \ No newline at end of file -- 2.43.0 From 63bce82b3b4a222fa6ff18d2ed942455ed3caa6e Mon Sep 17 00:00:00 2001 From: clfreville2 Date: Fri, 7 Jun 2024 10:50:44 +0200 Subject: [PATCH 05/38] Implement base MissingValues strategies --- .gitignore | 1 + frontend/exploration.py | 1 + frontend/mvstrategy.py | 70 +++++++++++++++++++++++++++++++++ frontend/pages/normalization.py | 23 +++++++++++ frontend/pages/visualization.py | 4 +- 5 files changed, 97 insertions(+), 2 deletions(-) create mode 100644 .gitignore create mode 100644 frontend/mvstrategy.py create mode 100644 frontend/pages/normalization.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..bee8a64 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +__pycache__ diff --git a/frontend/exploration.py b/frontend/exploration.py index 123a22b..43d8c72 100644 --- a/frontend/exploration.py +++ b/frontend/exploration.py @@ -13,6 +13,7 @@ uploaded_file = st.file_uploader("Upload your CSV file", type=["csv"]) if uploaded_file is not None: st.session_state.data = pd.read_csv(uploaded_file) + st.session_state.working_data = st.session_state.data st.success("File loaded successfully!") diff --git a/frontend/mvstrategy.py b/frontend/mvstrategy.py new file mode 100644 index 0000000..81db2f8 --- /dev/null +++ b/frontend/mvstrategy.py @@ -0,0 +1,70 @@ +from abc import ABC, abstractmethod +from pandas import DataFrame, Series +from pandas.api.types import is_numeric_dtype +from typing import Any, Union + +class MVStrategy(ABC): + """A way to handle missing values in a dataframe.""" + + @abstractmethod + def apply(self, df: DataFrame, label: str, series: Series) -> DataFrame: + """Apply the current strategy to the given series. + + The series is described by its label and dataframe.""" + return df + + @staticmethod + def list_available(series: Series) -> list['MVStrategy']: + """Get all the strategies that can be used.""" + choices = [DropStrategy(), ModeStrategy()] + if is_numeric_dtype(series): + choices.extend((MeanStrategy(), MedianStrategy())) + return choices + + +class DropStrategy(MVStrategy): + #@typing.override + def apply(self, df: DataFrame, label: str, series: Series) -> DataFrame: + df.dropna(subset=label, inplace=True) + return df + + def __str__(self) -> str: + return "Drop" + + +class PositionStrategy(MVStrategy): + #@typing.override + def apply(self, df: DataFrame, label: str, series: Series) -> DataFrame: + series.fillna(self.get_value(series), inplace=True) + return df + + @abstractmethod + def get_value(self, series: Series) -> Any: + pass + + +class MeanStrategy(PositionStrategy): + #@typing.override + def get_value(self, series: Series) -> Union[int, float]: + return series.mean() + + def __str__(self) -> str: + return "Use mean" + + +class MedianStrategy(PositionStrategy): + #@typing.override + def get_value(self, series: Series) -> Union[int, float]: + return series.median() + + def __str__(self) -> str: + return "Use median" + + +class ModeStrategy(PositionStrategy): + #@typing.override + def get_value(self, series: Series) -> Any: + return series.mode()[0] + + def __str__(self) -> str: + return "Use mode" diff --git a/frontend/pages/normalization.py b/frontend/pages/normalization.py new file mode 100644 index 0000000..4f20c7a --- /dev/null +++ b/frontend/pages/normalization.py @@ -0,0 +1,23 @@ +import streamlit as st +from mvstrategy import MVStrategy + +if "data" in st.session_state: + data = st.session_state.data + st.session_state.data = data.copy() + + for column, series in data.items(): + missing_count = series.isna().sum() + choices = MVStrategy.list_available(series) + option = st.selectbox( + f"Missing values of {column} ({missing_count})", + choices, + index=1, + key=f"mv-{column}", + ) + # Always re-get the series to avoid reusing an invalidated series pointer + data = option.apply(data, column, data[column]) + + st.write(data) + st.session_state.working_data = data +else: + st.error("file not loaded") diff --git a/frontend/pages/visualization.py b/frontend/pages/visualization.py index 057b0c9..6ca8270 100644 --- a/frontend/pages/visualization.py +++ b/frontend/pages/visualization.py @@ -5,8 +5,8 @@ import seaborn as sns st.header("Data Visualization") -if "data" in st.session_state: - data = st.session_state.data +if "working_data" in st.session_state: + data = st.session_state.working_data st.subheader("Histogram") column_to_plot = st.selectbox("Select Column for Histogram", data.columns) -- 2.43.0 From 4ae8512dcb661c184b8bf88b72242d18c89c2b4d Mon Sep 17 00:00:00 2001 From: bastien ollier Date: Fri, 7 Jun 2024 11:29:18 +0200 Subject: [PATCH 06/38] add form --- frontend/pages/clustering.py | 48 ++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 frontend/pages/clustering.py diff --git a/frontend/pages/clustering.py b/frontend/pages/clustering.py new file mode 100644 index 0000000..037780d --- /dev/null +++ b/frontend/pages/clustering.py @@ -0,0 +1,48 @@ +import streamlit as st +from sklearn.cluster import KMeans +import matplotlib.pyplot as plt + +st.header("clustering et Prediction") + + +if "data" in st.session_state: + data = st.session_state.data + + with st.form("my_form"): + header = st.columns([2,1,2]) + header[0].subheader("Dispersion") + header[1].subheader("Number of clusters") + header[2].subheader("Data Name") + + row1 = st.columns([2,1,2]) + cluster_std = row1[0].slider("", 0.2, 3.0, 0.2, 0.2) + n_clusters = row1[1].selectbox("", range(1, 10)) + data_name = row1[2].selectbox("", data.columns) + + st.form_submit_button('launch') + + from sklearn.datasets import make_blobs + from sklearn.cluster import KMeans + import matplotlib.pyplot as plt + import streamlit as st + import random + + # Points generator + x, _ = make_blobs(n_samples=200, n_features=2, centers=5, cluster_std=cluster_std, shuffle=True, random_state=10) + + x = data[["Unit Price","Unit Cost"]].to_numpy() + + # k-means algorithm + kmeans = KMeans(n_clusters=n_clusters, init='random', n_init=10, max_iter=300, random_state=111) + y_kmeans = kmeans.fit_predict(x) + + # Plotting colored clusters + fig, ax = plt.subplots(figsize=(12,8)) + plt.scatter(x[:, 0], x[:, 1], s=100, c=kmeans.labels_, cmap='Set1') + plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s=400, marker='*', color='k') + st.pyplot(fig) + +else: + st.error("file not loaded") + +# Cached function that returns a mutable object with a random number in the range 0-10 \ No newline at end of file -- 2.43.0 From 5bf5f507a595acb12bb3835b00058a0349bdc9c1 Mon Sep 17 00:00:00 2001 From: bastien ollier Date: Fri, 7 Jun 2024 11:56:38 +0200 Subject: [PATCH 07/38] end clustering --- frontend/pages/clustering.py | 45 +++++++++++++----------------------- 1 file changed, 16 insertions(+), 29 deletions(-) diff --git a/frontend/pages/clustering.py b/frontend/pages/clustering.py index 037780d..97698ec 100644 --- a/frontend/pages/clustering.py +++ b/frontend/pages/clustering.py @@ -2,47 +2,34 @@ import streamlit as st from sklearn.cluster import KMeans import matplotlib.pyplot as plt -st.header("clustering et Prediction") +st.header("Clustering") if "data" in st.session_state: data = st.session_state.data with st.form("my_form"): - header = st.columns([2,1,2]) - header[0].subheader("Dispersion") - header[1].subheader("Number of clusters") - header[2].subheader("Data Name") + row1 = st.columns([1,1,1]) + n_clusters = row1[0].selectbox("Number of clusters", range(1, 10)) + data_name = row1[1].multiselect("Data Name",data.select_dtypes(include="number").columns, max_selections=2) + n_init = row1[2].number_input("n_init",step=1,min_value=1) + + row2 = st.columns([1,1]) + max_iter = row1[0].number_input("max_iter",step=1,min_value=1) - row1 = st.columns([2,1,2]) - cluster_std = row1[0].slider("", 0.2, 3.0, 0.2, 0.2) - n_clusters = row1[1].selectbox("", range(1, 10)) - data_name = row1[2].selectbox("", data.columns) st.form_submit_button('launch') - from sklearn.datasets import make_blobs - from sklearn.cluster import KMeans - import matplotlib.pyplot as plt - import streamlit as st - import random + if len(data_name) == 2: + x = data[data_name].to_numpy() - # Points generator - x, _ = make_blobs(n_samples=200, n_features=2, centers=5, cluster_std=cluster_std, shuffle=True, random_state=10) + kmeans = KMeans(n_clusters=n_clusters, init='random', n_init=n_init, max_iter=max_iter, random_state=111) + y_kmeans = kmeans.fit_predict(x) - x = data[["Unit Price","Unit Cost"]].to_numpy() - - # k-means algorithm - kmeans = KMeans(n_clusters=n_clusters, init='random', n_init=10, max_iter=300, random_state=111) - y_kmeans = kmeans.fit_predict(x) - - # Plotting colored clusters - fig, ax = plt.subplots(figsize=(12,8)) - plt.scatter(x[:, 0], x[:, 1], s=100, c=kmeans.labels_, cmap='Set1') - plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s=400, marker='*', color='k') - st.pyplot(fig) + fig, ax = plt.subplots(figsize=(12,8)) + plt.scatter(x[:, 0], x[:, 1], s=100, c=kmeans.labels_, cmap='Set1') + plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s=400, marker='*', color='k') + st.pyplot(fig) else: st.error("file not loaded") - -# Cached function that returns a mutable object with a random number in the range 0-10 \ No newline at end of file -- 2.43.0 From 5f960df83842e866833d863b63d08c0a0348acfd Mon Sep 17 00:00:00 2001 From: clfreville2 Date: Fri, 7 Jun 2024 11:58:52 +0200 Subject: [PATCH 08/38] Support Pandas linear regression --- frontend/mvstrategy.py | 13 +++++++++++-- frontend/pages/normalization.py | 2 +- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/frontend/mvstrategy.py b/frontend/mvstrategy.py index 81db2f8..fb7cc6c 100644 --- a/frontend/mvstrategy.py +++ b/frontend/mvstrategy.py @@ -14,11 +14,11 @@ class MVStrategy(ABC): return df @staticmethod - def list_available(series: Series) -> list['MVStrategy']: + def list_available(df: DataFrame, series: Series) -> list['MVStrategy']: """Get all the strategies that can be used.""" choices = [DropStrategy(), ModeStrategy()] if is_numeric_dtype(series): - choices.extend((MeanStrategy(), MedianStrategy())) + choices.extend((MeanStrategy(), MedianStrategy(), LinearRegressionStrategy())) return choices @@ -68,3 +68,12 @@ class ModeStrategy(PositionStrategy): def __str__(self) -> str: return "Use mode" + + +class LinearRegressionStrategy(MVStrategy): + def apply(self, df: DataFrame, label: str, series: Series) -> DataFrame: + series.interpolate(inplace=True) + return df + + def __str__(self) -> str: + return "Use linear regression" diff --git a/frontend/pages/normalization.py b/frontend/pages/normalization.py index 4f20c7a..7dd5b84 100644 --- a/frontend/pages/normalization.py +++ b/frontend/pages/normalization.py @@ -7,7 +7,7 @@ if "data" in st.session_state: for column, series in data.items(): missing_count = series.isna().sum() - choices = MVStrategy.list_available(series) + choices = MVStrategy.list_available(data, series) option = st.selectbox( f"Missing values of {column} ({missing_count})", choices, -- 2.43.0 From 197939555c6ac4403ddb30c4389ab117219c47a2 Mon Sep 17 00:00:00 2001 From: bastien ollier Date: Wed, 19 Jun 2024 08:45:34 +0200 Subject: [PATCH 09/38] debut dbscan --- frontend/pages/clustering:_dbscan.py | 18 ++++++++++++++++++ .../{clustering.py => clustering:_kmeans.py} | 7 ++++--- 2 files changed, 22 insertions(+), 3 deletions(-) create mode 100644 frontend/pages/clustering:_dbscan.py rename frontend/pages/{clustering.py => clustering:_kmeans.py} (80%) diff --git a/frontend/pages/clustering:_dbscan.py b/frontend/pages/clustering:_dbscan.py new file mode 100644 index 0000000..02fde08 --- /dev/null +++ b/frontend/pages/clustering:_dbscan.py @@ -0,0 +1,18 @@ +import streamlit as st +import matplotlib.pyplot as plt +from sklearn.cluster import DBSCAN +import numpy as np + +st.header("Clustering: dbscan") + + +if "data" in st.session_state: + data = st.session_state.data + + with st.form("my_form"): + data_name = st.multiselect("Data Name",data.select_dtypes(include="number").columns, max_selections=2) + st.form_submit_button('launch') + + +else: + st.error("file not loaded") \ No newline at end of file diff --git a/frontend/pages/clustering.py b/frontend/pages/clustering:_kmeans.py similarity index 80% rename from frontend/pages/clustering.py rename to frontend/pages/clustering:_kmeans.py index 97698ec..ce34e66 100644 --- a/frontend/pages/clustering.py +++ b/frontend/pages/clustering:_kmeans.py @@ -2,7 +2,7 @@ import streamlit as st from sklearn.cluster import KMeans import matplotlib.pyplot as plt -st.header("Clustering") +st.header("Clustering: kmeans") if "data" in st.session_state: @@ -27,8 +27,9 @@ if "data" in st.session_state: y_kmeans = kmeans.fit_predict(x) fig, ax = plt.subplots(figsize=(12,8)) - plt.scatter(x[:, 0], x[:, 1], s=100, c=kmeans.labels_, cmap='Set1') - plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s=400, marker='*', color='k') + plt.scatter(x[:, 0], x[:, 1], c=y_kmeans, s=50, cmap='viridis') + centers = kmeans.cluster_centers_ + plt.scatter(centers[:, 0], centers[:, 1], c='black', s=200, marker='X') st.pyplot(fig) else: -- 2.43.0 From a325603fd96fa3eeebde3f1d9b661019a1cfff82 Mon Sep 17 00:00:00 2001 From: clfreville2 Date: Wed, 19 Jun 2024 09:04:39 +0200 Subject: [PATCH 10/38] Add scaling strategies --- frontend/{mvstrategy.py => normstrategy.py} | 65 ++++++++++++++++++++- frontend/pages/normalization.py | 13 ++++- 2 files changed, 73 insertions(+), 5 deletions(-) rename frontend/{mvstrategy.py => normstrategy.py} (54%) diff --git a/frontend/mvstrategy.py b/frontend/normstrategy.py similarity index 54% rename from frontend/mvstrategy.py rename to frontend/normstrategy.py index fb7cc6c..2896c49 100644 --- a/frontend/mvstrategy.py +++ b/frontend/normstrategy.py @@ -3,16 +3,20 @@ from pandas import DataFrame, Series from pandas.api.types import is_numeric_dtype from typing import Any, Union -class MVStrategy(ABC): - """A way to handle missing values in a dataframe.""" +class DataFrameFunction(ABC): + """A command that may be applied in-place to a dataframe.""" @abstractmethod def apply(self, df: DataFrame, label: str, series: Series) -> DataFrame: - """Apply the current strategy to the given series. + """Apply the current function to the given dataframe, in-place. The series is described by its label and dataframe.""" return df + +class MVStrategy(DataFrameFunction): + """A way to handle missing values in a dataframe.""" + @staticmethod def list_available(df: DataFrame, series: Series) -> list['MVStrategy']: """Get all the strategies that can be used.""" @@ -22,6 +26,20 @@ class MVStrategy(ABC): return choices +class ScalingStrategy(DataFrameFunction): + """A way to handle missing values in a dataframe.""" + + @staticmethod + def list_available(df: DataFrame, series: Series) -> list['MVStrategy']: + """Get all the strategies that can be used.""" + choices = [KeepStrategy()] + if is_numeric_dtype(series): + choices.extend((MinMaxStrategy(), ZScoreStrategy())) + if series.sum() != 0: + choices.append(UnitLengthStrategy()) + return choices + + class DropStrategy(MVStrategy): #@typing.override def apply(self, df: DataFrame, label: str, series: Series) -> DataFrame: @@ -77,3 +95,44 @@ class LinearRegressionStrategy(MVStrategy): def __str__(self) -> str: return "Use linear regression" + + +class KeepStrategy(ScalingStrategy): + #@typing.override + def apply(self, df: DataFrame, label: str, series: Series) -> DataFrame: + return df + + def __str__(self) -> str: + return "No-op" + + +class MinMaxStrategy(ScalingStrategy): + #@typing.override + def apply(self, df: DataFrame, label: str, series: Series) -> DataFrame: + minimum = series.min() + maximum = series.max() + df[label] = (series - minimum) / (maximum - minimum) + return df + + def __str__(self) -> str: + return "Min-max" + + +class ZScoreStrategy(ScalingStrategy): + #@typing.override + def apply(self, df: DataFrame, label: str, series: Series) -> DataFrame: + df[label] = (series - series.mean()) / series.std() + return df + + def __str__(self) -> str: + return "Z-Score" + + +class UnitLengthStrategy(ScalingStrategy): + #@typing.override + def apply(self, df: DataFrame, label: str, series: Series) -> DataFrame: + df[label] = series / series.sum() + return df + + def __str__(self) -> str: + return "Unit length" diff --git a/frontend/pages/normalization.py b/frontend/pages/normalization.py index 7dd5b84..ca40f91 100644 --- a/frontend/pages/normalization.py +++ b/frontend/pages/normalization.py @@ -1,14 +1,15 @@ import streamlit as st -from mvstrategy import MVStrategy +from normstrategy import MVStrategy, ScalingStrategy if "data" in st.session_state: data = st.session_state.data st.session_state.data = data.copy() for column, series in data.items(): + col1, col2 = st.columns(2) missing_count = series.isna().sum() choices = MVStrategy.list_available(data, series) - option = st.selectbox( + option = col1.selectbox( f"Missing values of {column} ({missing_count})", choices, index=1, @@ -17,6 +18,14 @@ if "data" in st.session_state: # Always re-get the series to avoid reusing an invalidated series pointer data = option.apply(data, column, data[column]) + choices = ScalingStrategy.list_available(data, series) + option = col2.selectbox( + "Scaling", + choices, + key=f"scaling-{column}", + ) + data = option.apply(data, column, data[column]) + st.write(data) st.session_state.working_data = data else: -- 2.43.0 From 9fc6d7d2d160a262938066d5d9a2c1a54291a998 Mon Sep 17 00:00:00 2001 From: bastien ollier Date: Wed, 19 Jun 2024 09:16:10 +0200 Subject: [PATCH 11/38] add dbscan --- frontend/pages/clustering:_dbscan.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/frontend/pages/clustering:_dbscan.py b/frontend/pages/clustering:_dbscan.py index 02fde08..2cb4920 100644 --- a/frontend/pages/clustering:_dbscan.py +++ b/frontend/pages/clustering:_dbscan.py @@ -11,8 +11,20 @@ if "data" in st.session_state: with st.form("my_form"): data_name = st.multiselect("Data Name",data.select_dtypes(include="number").columns, max_selections=2) + eps = st.slider("eps", min_value=0.0, max_value=1.0,value=0.5,step=0.01) + min_samples = st.number_input("min_samples",step=1,min_value=1,value=5) st.form_submit_button('launch') + if len(data_name) == 2: + x = data[data_name].to_numpy() + + dbscan = DBSCAN(eps=eps, min_samples=min_samples) + y_dbscan = dbscan.fit_predict(x) + + + fig, ax = plt.subplots(figsize=(12,8)) + plt.scatter(x[:, 0], x[:, 1], c=y_dbscan, s=50, cmap='viridis') + st.pyplot(fig) else: st.error("file not loaded") \ No newline at end of file -- 2.43.0 From 72dcc8ff1cda806bca9416204d51226ba6d3f18a Mon Sep 17 00:00:00 2001 From: bastien ollier Date: Wed, 19 Jun 2024 09:17:12 +0200 Subject: [PATCH 12/38] add dbscan --- frontend/pages/clustering:_dbscan.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/frontend/pages/clustering:_dbscan.py b/frontend/pages/clustering:_dbscan.py index 2cb4920..6a8ca22 100644 --- a/frontend/pages/clustering:_dbscan.py +++ b/frontend/pages/clustering:_dbscan.py @@ -10,20 +10,20 @@ if "data" in st.session_state: data = st.session_state.data with st.form("my_form"): - data_name = st.multiselect("Data Name",data.select_dtypes(include="number").columns, max_selections=2) - eps = st.slider("eps", min_value=0.0, max_value=1.0,value=0.5,step=0.01) - min_samples = st.number_input("min_samples",step=1,min_value=1,value=5) - st.form_submit_button('launch') + data_name = st.multiselect("Data Name", data.select_dtypes(include="number").columns, max_selections=2) + eps = st.slider("eps", min_value=0.0, max_value=1.0, value=0.5, step=0.01) + min_samples = st.number_input("min_samples", step=1, min_value=1, value=5) + st.form_submit_button("launch") if len(data_name) == 2: x = data[data_name].to_numpy() - + dbscan = DBSCAN(eps=eps, min_samples=min_samples) y_dbscan = dbscan.fit_predict(x) fig, ax = plt.subplots(figsize=(12,8)) - plt.scatter(x[:, 0], x[:, 1], c=y_dbscan, s=50, cmap='viridis') + plt.scatter(x[:, 0], x[:, 1], c=y_dbscan, s=50, cmap="viridis") st.pyplot(fig) else: -- 2.43.0 From d4e33e7367bef7a1ce47f119abdf9833cd3bc9b6 Mon Sep 17 00:00:00 2001 From: bastien ollier Date: Wed, 19 Jun 2024 09:20:59 +0200 Subject: [PATCH 13/38] dbscan --- frontend/pages/clustering:_kmeans.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/frontend/pages/clustering:_kmeans.py b/frontend/pages/clustering:_kmeans.py index ce34e66..824e173 100644 --- a/frontend/pages/clustering:_kmeans.py +++ b/frontend/pages/clustering:_kmeans.py @@ -18,18 +18,18 @@ if "data" in st.session_state: max_iter = row1[0].number_input("max_iter",step=1,min_value=1) - st.form_submit_button('launch') + st.form_submit_button("launch") if len(data_name) == 2: x = data[data_name].to_numpy() - kmeans = KMeans(n_clusters=n_clusters, init='random', n_init=n_init, max_iter=max_iter, random_state=111) + kmeans = KMeans(n_clusters=n_clusters, init="random", n_init=n_init, max_iter=max_iter, random_state=111) y_kmeans = kmeans.fit_predict(x) fig, ax = plt.subplots(figsize=(12,8)) - plt.scatter(x[:, 0], x[:, 1], c=y_kmeans, s=50, cmap='viridis') + plt.scatter(x[:, 0], x[:, 1], c=y_kmeans, s=50, cmap="viridis") centers = kmeans.cluster_centers_ - plt.scatter(centers[:, 0], centers[:, 1], c='black', s=200, marker='X') + plt.scatter(centers[:, 0], centers[:, 1], c="black", s=200, marker="X") st.pyplot(fig) else: -- 2.43.0 From 64cf65a4170b8e1cc2e37d9e830ad1bf97090e68 Mon Sep 17 00:00:00 2001 From: bastien ollier Date: Wed, 19 Jun 2024 09:28:25 +0200 Subject: [PATCH 14/38] max nb cluster to nb line --- frontend/pages/clustering:_kmeans.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/pages/clustering:_kmeans.py b/frontend/pages/clustering:_kmeans.py index 824e173..7be976b 100644 --- a/frontend/pages/clustering:_kmeans.py +++ b/frontend/pages/clustering:_kmeans.py @@ -10,7 +10,7 @@ if "data" in st.session_state: with st.form("my_form"): row1 = st.columns([1,1,1]) - n_clusters = row1[0].selectbox("Number of clusters", range(1, 10)) + n_clusters = row1[0].selectbox("Number of clusters", range(1, data.shape[0])) data_name = row1[1].multiselect("Data Name",data.select_dtypes(include="number").columns, max_selections=2) n_init = row1[2].number_input("n_init",step=1,min_value=1) -- 2.43.0 From 34f70b4d792d9bb36f653b9e837f209b98ca1965 Mon Sep 17 00:00:00 2001 From: bastien ollier Date: Wed, 19 Jun 2024 09:34:52 +0200 Subject: [PATCH 15/38] delete np --- frontend/pages/clustering:_dbscan.py | 1 - frontend/pages/clustering:_kmeans.py | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/frontend/pages/clustering:_dbscan.py b/frontend/pages/clustering:_dbscan.py index 6a8ca22..da51aa9 100644 --- a/frontend/pages/clustering:_dbscan.py +++ b/frontend/pages/clustering:_dbscan.py @@ -1,7 +1,6 @@ import streamlit as st import matplotlib.pyplot as plt from sklearn.cluster import DBSCAN -import numpy as np st.header("Clustering: dbscan") diff --git a/frontend/pages/clustering:_kmeans.py b/frontend/pages/clustering:_kmeans.py index 7be976b..69d9920 100644 --- a/frontend/pages/clustering:_kmeans.py +++ b/frontend/pages/clustering:_kmeans.py @@ -10,7 +10,7 @@ if "data" in st.session_state: with st.form("my_form"): row1 = st.columns([1,1,1]) - n_clusters = row1[0].selectbox("Number of clusters", range(1, data.shape[0])) + n_clusters = row1[0].selectbox("Number of clusters", range(1,data.shape[0])) data_name = row1[1].multiselect("Data Name",data.select_dtypes(include="number").columns, max_selections=2) n_init = row1[2].number_input("n_init",step=1,min_value=1) -- 2.43.0 From 6dcca29cbd36f6aa1d51cf2ef4539d6577046384 Mon Sep 17 00:00:00 2001 From: clfreville2 Date: Wed, 19 Jun 2024 09:49:16 +0200 Subject: [PATCH 16/38] Rename to original_data --- frontend/exploration.py | 2 +- frontend/pages/normalization.py | 6 +++--- frontend/pages/visualization.py | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/frontend/exploration.py b/frontend/exploration.py index 43d8c72..4cac622 100644 --- a/frontend/exploration.py +++ b/frontend/exploration.py @@ -13,7 +13,7 @@ uploaded_file = st.file_uploader("Upload your CSV file", type=["csv"]) if uploaded_file is not None: st.session_state.data = pd.read_csv(uploaded_file) - st.session_state.working_data = st.session_state.data + st.session_state.original_data = st.session_state.data st.success("File loaded successfully!") diff --git a/frontend/pages/normalization.py b/frontend/pages/normalization.py index ca40f91..3500988 100644 --- a/frontend/pages/normalization.py +++ b/frontend/pages/normalization.py @@ -2,8 +2,8 @@ import streamlit as st from normstrategy import MVStrategy, ScalingStrategy if "data" in st.session_state: - data = st.session_state.data - st.session_state.data = data.copy() + data = st.session_state.original_data + st.session_state.original_data = data.copy() for column, series in data.items(): col1, col2 = st.columns(2) @@ -27,6 +27,6 @@ if "data" in st.session_state: data = option.apply(data, column, data[column]) st.write(data) - st.session_state.working_data = data + st.session_state.data = data else: st.error("file not loaded") diff --git a/frontend/pages/visualization.py b/frontend/pages/visualization.py index 6ca8270..057b0c9 100644 --- a/frontend/pages/visualization.py +++ b/frontend/pages/visualization.py @@ -5,8 +5,8 @@ import seaborn as sns st.header("Data Visualization") -if "working_data" in st.session_state: - data = st.session_state.working_data +if "data" in st.session_state: + data = st.session_state.data st.subheader("Histogram") column_to_plot = st.selectbox("Select Column for Histogram", data.columns) -- 2.43.0 From 52cb1407463d195cef2550448532e877b28191df Mon Sep 17 00:00:00 2001 From: bastien ollier Date: Wed, 19 Jun 2024 10:03:48 +0200 Subject: [PATCH 17/38] add 3d to kmeans --- frontend/pages/clustering:_kmeans.py | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/frontend/pages/clustering:_kmeans.py b/frontend/pages/clustering:_kmeans.py index 69d9920..c61bf40 100644 --- a/frontend/pages/clustering:_kmeans.py +++ b/frontend/pages/clustering:_kmeans.py @@ -11,7 +11,7 @@ if "data" in st.session_state: with st.form("my_form"): row1 = st.columns([1,1,1]) n_clusters = row1[0].selectbox("Number of clusters", range(1,data.shape[0])) - data_name = row1[1].multiselect("Data Name",data.select_dtypes(include="number").columns, max_selections=2) + data_name = row1[1].multiselect("Data Name",data.select_dtypes(include="number").columns, max_selections=3) n_init = row1[2].number_input("n_init",step=1,min_value=1) row2 = st.columns([1,1]) @@ -20,16 +20,24 @@ if "data" in st.session_state: st.form_submit_button("launch") - if len(data_name) == 2: + if len(data_name) >= 2 and len(data_name) <=3: x = data[data_name].to_numpy() kmeans = KMeans(n_clusters=n_clusters, init="random", n_init=n_init, max_iter=max_iter, random_state=111) y_kmeans = kmeans.fit_predict(x) - fig, ax = plt.subplots(figsize=(12,8)) - plt.scatter(x[:, 0], x[:, 1], c=y_kmeans, s=50, cmap="viridis") - centers = kmeans.cluster_centers_ - plt.scatter(centers[:, 0], centers[:, 1], c="black", s=200, marker="X") + fig = plt.figure() + if len(data_name) == 2: + ax = fig.add_subplot(projection='rectilinear') + plt.scatter(x[:, 0], x[:, 1], c=y_kmeans, s=50, cmap="viridis") + centers = kmeans.cluster_centers_ + plt.scatter(centers[:, 0], centers[:, 1], c="black", s=200, marker="X") + else: + ax = fig.add_subplot(projection='3d') + + ax.scatter(x[:, 0], x[:, 1],x[:, 2], c=y_kmeans, s=50, cmap="viridis") + centers = kmeans.cluster_centers_ + ax.scatter(centers[:, 0], centers[:, 1],centers[:, 2], c="black", s=200, marker="X") st.pyplot(fig) else: -- 2.43.0 From e48c3bfa507041aa7026fa4020702186394d99c6 Mon Sep 17 00:00:00 2001 From: bastien ollier Date: Fri, 21 Jun 2024 13:38:27 +0200 Subject: [PATCH 18/38] add 3d plot to bdscan --- frontend/pages/clustering:_dbscan.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/frontend/pages/clustering:_dbscan.py b/frontend/pages/clustering:_dbscan.py index da51aa9..d06b10a 100644 --- a/frontend/pages/clustering:_dbscan.py +++ b/frontend/pages/clustering:_dbscan.py @@ -9,21 +9,27 @@ if "data" in st.session_state: data = st.session_state.data with st.form("my_form"): - data_name = st.multiselect("Data Name", data.select_dtypes(include="number").columns, max_selections=2) + data_name = st.multiselect("Data Name", data.select_dtypes(include="number").columns, max_selections=3) eps = st.slider("eps", min_value=0.0, max_value=1.0, value=0.5, step=0.01) min_samples = st.number_input("min_samples", step=1, min_value=1, value=5) st.form_submit_button("launch") - if len(data_name) == 2: + if len(data_name) >= 2 and len(data_name) <=3: x = data[data_name].to_numpy() dbscan = DBSCAN(eps=eps, min_samples=min_samples) y_dbscan = dbscan.fit_predict(x) - - fig, ax = plt.subplots(figsize=(12,8)) - plt.scatter(x[:, 0], x[:, 1], c=y_dbscan, s=50, cmap="viridis") + fig = plt.figure() + if len(data_name) == 2: + ax = fig.add_subplot(projection='rectilinear') + plt.scatter(x[:, 0], x[:, 1], c=y_dbscan, s=50, cmap="viridis") + else: + ax = fig.add_subplot(projection='3d') + ax.scatter(x[:, 0], x[:, 1],x[:, 2], c=y_dbscan, s=50, cmap="viridis") st.pyplot(fig) + + else: st.error("file not loaded") \ No newline at end of file -- 2.43.0 From 972fde561f3070b5135a71880c720cf3789c80f5 Mon Sep 17 00:00:00 2001 From: Bastien OLLIER Date: Fri, 21 Jun 2024 14:41:28 +0200 Subject: [PATCH 19/38] =?UTF-8?q?Mise=20=C3=A0=20jour=20de=20'frontend/pag?= =?UTF-8?q?es/clustering=5Fdbscan.py'?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- frontend/pages/{clustering:_dbscan.py => clustering_dbscan.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename frontend/pages/{clustering:_dbscan.py => clustering_dbscan.py} (100%) diff --git a/frontend/pages/clustering:_dbscan.py b/frontend/pages/clustering_dbscan.py similarity index 100% rename from frontend/pages/clustering:_dbscan.py rename to frontend/pages/clustering_dbscan.py -- 2.43.0 From e5f05a2c8af54383c6498a1b124cf00075af09db Mon Sep 17 00:00:00 2001 From: Bastien OLLIER Date: Fri, 21 Jun 2024 14:41:44 +0200 Subject: [PATCH 20/38] =?UTF-8?q?Mise=20=C3=A0=20jour=20de=20'frontend/pag?= =?UTF-8?q?es/clustering=5Fkmeans.py'?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- frontend/pages/{clustering:_kmeans.py => clustering_kmeans.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename frontend/pages/{clustering:_kmeans.py => clustering_kmeans.py} (100%) diff --git a/frontend/pages/clustering:_kmeans.py b/frontend/pages/clustering_kmeans.py similarity index 100% rename from frontend/pages/clustering:_kmeans.py rename to frontend/pages/clustering_kmeans.py -- 2.43.0 From 70641ebca48a0a4c681b4288380ba62104140f79 Mon Sep 17 00:00:00 2001 From: "hugo.pradier2" Date: Wed, 19 Jun 2024 08:18:14 +0200 Subject: [PATCH 21/38] debut prediction --- .gitignore | 1 + frontend/pages/prediction.py | 56 ++++++++++++++++++++++++++++++++++++ 2 files changed, 57 insertions(+) create mode 100644 frontend/pages/prediction.py diff --git a/.gitignore b/.gitignore index bee8a64..9f7550b 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,2 @@ __pycache__ +.venv diff --git a/frontend/pages/prediction.py b/frontend/pages/prediction.py new file mode 100644 index 0000000..25c074e --- /dev/null +++ b/frontend/pages/prediction.py @@ -0,0 +1,56 @@ +import streamlit as st +from sklearn.ensemble import RandomForestClassifier +from sklearn.ensemble import RandomForestRegressor +from sklearn.linear_model import LogisticRegression +from sklearn.linear_model import LinearRegression +from sklearn.model_selection import train_test_split +from sklearn.metrics import accuracy_score +from sklearn import datasets +from sklearn.impute import SimpleImputer # Add this line +import pandas as pd +import numpy as np +st.header("Prediction") + +if "data" in st.session_state: + data = st.session_state.data + + with st.form("my_form"): + header = st.columns([2,1,2]) + header[0].subheader("Model") + header[1].subheader("Data Name") + + row1 = st.columns([2,1,2]) + model = row1[0].selectbox("", ["Random Forest Classifier", "Random Forest Regressor", "Logistic Regression", "Linear Regression"]) + data_name = row1[1].selectbox("", data.columns) + + st.form_submit_button('launch') + + if model == "Random Forest Classifier": + model = RandomForestClassifier() + elif model == "Random Forest Regressor": + model = RandomForestRegressor() + elif model == "Logistic Regression": + model = LogisticRegression() + elif model == "Linear Regression": + model = LinearRegression() + + x = data.drop(data_name, axis=1) + y = data[data_name] + + # Convert categorical data to numerical values + x = pd.get_dummies(x) + + # Handle missing values + imputer = SimpleImputer() + x = imputer.fit_transform(x) + x = pd.get_dummies(x) + + x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42) + model.fit(x_train, y_train) + y_pred = model.predict(x_test) + if model == "Random Forest Classifier": + st.write("Accuracy: ", accuracy_score(y_test, y_pred)) + elif model == "Random Forest Regressor" or model == "Logistic Regression" or model == "Linear Regression": + st.write("Mean Squared Error: ", np.mean((y_pred - y_test) ** 2)) +else: + st.error("file not loaded") -- 2.43.0 From a914c3f8f9620addac469f35620ed4e33d863225 Mon Sep 17 00:00:00 2001 From: "hugo.pradier2" Date: Wed, 19 Jun 2024 09:50:28 +0200 Subject: [PATCH 22/38] prediction de regression terminee --- frontend/pages/prediction.py | 56 --------------------- frontend/pages/prediction_classification.py | 41 +++++++++++++++ frontend/pages/prediction_regression.py | 28 +++++++++++ 3 files changed, 69 insertions(+), 56 deletions(-) delete mode 100644 frontend/pages/prediction.py create mode 100644 frontend/pages/prediction_classification.py create mode 100644 frontend/pages/prediction_regression.py diff --git a/frontend/pages/prediction.py b/frontend/pages/prediction.py deleted file mode 100644 index 25c074e..0000000 --- a/frontend/pages/prediction.py +++ /dev/null @@ -1,56 +0,0 @@ -import streamlit as st -from sklearn.ensemble import RandomForestClassifier -from sklearn.ensemble import RandomForestRegressor -from sklearn.linear_model import LogisticRegression -from sklearn.linear_model import LinearRegression -from sklearn.model_selection import train_test_split -from sklearn.metrics import accuracy_score -from sklearn import datasets -from sklearn.impute import SimpleImputer # Add this line -import pandas as pd -import numpy as np -st.header("Prediction") - -if "data" in st.session_state: - data = st.session_state.data - - with st.form("my_form"): - header = st.columns([2,1,2]) - header[0].subheader("Model") - header[1].subheader("Data Name") - - row1 = st.columns([2,1,2]) - model = row1[0].selectbox("", ["Random Forest Classifier", "Random Forest Regressor", "Logistic Regression", "Linear Regression"]) - data_name = row1[1].selectbox("", data.columns) - - st.form_submit_button('launch') - - if model == "Random Forest Classifier": - model = RandomForestClassifier() - elif model == "Random Forest Regressor": - model = RandomForestRegressor() - elif model == "Logistic Regression": - model = LogisticRegression() - elif model == "Linear Regression": - model = LinearRegression() - - x = data.drop(data_name, axis=1) - y = data[data_name] - - # Convert categorical data to numerical values - x = pd.get_dummies(x) - - # Handle missing values - imputer = SimpleImputer() - x = imputer.fit_transform(x) - x = pd.get_dummies(x) - - x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42) - model.fit(x_train, y_train) - y_pred = model.predict(x_test) - if model == "Random Forest Classifier": - st.write("Accuracy: ", accuracy_score(y_test, y_pred)) - elif model == "Random Forest Regressor" or model == "Logistic Regression" or model == "Linear Regression": - st.write("Mean Squared Error: ", np.mean((y_pred - y_test) ** 2)) -else: - st.error("file not loaded") diff --git a/frontend/pages/prediction_classification.py b/frontend/pages/prediction_classification.py new file mode 100644 index 0000000..8d368d4 --- /dev/null +++ b/frontend/pages/prediction_classification.py @@ -0,0 +1,41 @@ +import streamlit as st +from sklearn.ensemble import RandomForestClassifier +from sklearn.preprocessing import LabelEncoder +import pandas as pd + +st.header("Prediction: Classification") + +if "data" in st.session_state: + data = st.session_state.data + + with st.form("classification_form"): + st.subheader("Random Forest Parameters") + + data_name = st.multiselect("Features", data.select_dtypes(include="object").columns, help="Sélectionnez les caractéristiques pour l'entraînement.") + target_name = st.selectbox("Target", data.columns, help="Sélectionnez la variable cible pour l'entraînement.") + + n_estimators = st.number_input("Number of estimators", step=1, min_value=1, value=100, help="Nombre d'arbres dans la forêt.") + max_depth = st.number_input("Max depth", step=1, min_value=1, value=10, help="Profondeur maximale des arbres.") + + submit_button = st.form_submit_button('Train and Predict') + + if submit_button and data_name and target_name: + le = LabelEncoder() + X = data[data_name].apply(le.fit_transform) + y = le.fit_transform(data[target_name]) + + model = RandomForestClassifier(n_estimators=n_estimators, max_depth=max_depth, random_state=111) + model.fit(X, y) + + st.subheader("Enter values for prediction") + + pred_values = [st.selectbox(f"Value for {feature}", options=data[feature].unique(), key=f"value_{feature}") for feature in data_name] + pred_values_encoded = [le.transform([val])[0] for val in pred_values] + + prediction = model.predict([pred_values_encoded]) + + prediction_decoded = le.inverse_transform(prediction) + + st.write("Prediction:", prediction_decoded[0]) +else: + st.error("File not loaded") diff --git a/frontend/pages/prediction_regression.py b/frontend/pages/prediction_regression.py new file mode 100644 index 0000000..c6a6a38 --- /dev/null +++ b/frontend/pages/prediction_regression.py @@ -0,0 +1,28 @@ +import streamlit as st +from sklearn.linear_model import LinearRegression + +st.header("Prediction: Regression") + +if "data" in st.session_state: + data = st.session_state.data + + with st.form("regression_form"): + st.subheader("Linear Regression Parameters") + data_name = st.multiselect("Features", data.select_dtypes(include="number").columns) + target_name = st.selectbox("Target", data.select_dtypes(include="number").columns) + st.form_submit_button('Train and Predict') + + if data_name and target_name: + X = data[data_name] + y = data[target_name] + + model = LinearRegression() + model.fit(X, y) + + st.subheader("Enter values for prediction") + pred_values = [st.number_input(f"Value for {feature}", value=0.0) for feature in data_name] + prediction = model.predict([pred_values]) + + st.write("Prediction:", prediction[0]) +else: + st.error("File not loaded") -- 2.43.0 From 2d1c867bed07d10adda8fd5cc00aad0db39c87c2 Mon Sep 17 00:00:00 2001 From: "hugo.pradier2" Date: Fri, 21 Jun 2024 14:15:23 +0200 Subject: [PATCH 23/38] ajout prediction classification --- frontend/pages/prediction_classification.py | 72 ++++++++++++++------- 1 file changed, 47 insertions(+), 25 deletions(-) diff --git a/frontend/pages/prediction_classification.py b/frontend/pages/prediction_classification.py index 8d368d4..dc70175 100644 --- a/frontend/pages/prediction_classification.py +++ b/frontend/pages/prediction_classification.py @@ -1,7 +1,8 @@ import streamlit as st -from sklearn.ensemble import RandomForestClassifier +from sklearn.linear_model import LogisticRegression +from sklearn.model_selection import train_test_split +from sklearn.metrics import accuracy_score from sklearn.preprocessing import LabelEncoder -import pandas as pd st.header("Prediction: Classification") @@ -9,33 +10,54 @@ if "data" in st.session_state: data = st.session_state.data with st.form("classification_form"): - st.subheader("Random Forest Parameters") - - data_name = st.multiselect("Features", data.select_dtypes(include="object").columns, help="Sélectionnez les caractéristiques pour l'entraînement.") - target_name = st.selectbox("Target", data.columns, help="Sélectionnez la variable cible pour l'entraînement.") - - n_estimators = st.number_input("Number of estimators", step=1, min_value=1, value=100, help="Nombre d'arbres dans la forêt.") - max_depth = st.number_input("Max depth", step=1, min_value=1, value=10, help="Profondeur maximale des arbres.") - - submit_button = st.form_submit_button('Train and Predict') + st.subheader("Classification Parameters") + data_name = st.multiselect("Features", data.columns) + target_name = st.selectbox("Target", data.columns) + test_size = st.slider("Test Size", min_value=0.1, max_value=0.5, value=0.2, step=0.1) + st.form_submit_button('Train and Predict') - if submit_button and data_name and target_name: - le = LabelEncoder() - X = data[data_name].apply(le.fit_transform) - y = le.fit_transform(data[target_name]) + if data_name and target_name: + X = data[data_name] + y = data[target_name] - model = RandomForestClassifier(n_estimators=n_estimators, max_depth=max_depth, random_state=111) - model.fit(X, y) + label_encoders = {} + for column in X.select_dtypes(include=['object']).columns: + le = LabelEncoder() + X[column] = le.fit_transform(X[column]) + label_encoders[column] = le + + if y.dtype == 'object': + le = LabelEncoder() + y = le.fit_transform(y) + label_encoders[target_name] = le + + X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=42) + + model = LogisticRegression() + model.fit(X_train, y_train) + y_pred = model.predict(X_test) + accuracy = accuracy_score(y_test, y_pred) + + st.subheader("Model Accuracy") + st.write(f"Accuracy on test data: {accuracy:.2f}") st.subheader("Enter values for prediction") + pred_values = [] + for feature in data_name: + if feature in label_encoders: + values = list(label_encoders[feature].classes_) + value = st.selectbox(f"Value for {feature}", values) + value_encoded = label_encoders[feature].transform([value])[0] + pred_values.append(value_encoded) + else: + value = st.number_input(f"Value for {feature}", value=0.0) + pred_values.append(value) + + prediction = model.predict([pred_values]) - pred_values = [st.selectbox(f"Value for {feature}", options=data[feature].unique(), key=f"value_{feature}") for feature in data_name] - pred_values_encoded = [le.transform([val])[0] for val in pred_values] - - prediction = model.predict([pred_values_encoded]) - - prediction_decoded = le.inverse_transform(prediction) - - st.write("Prediction:", prediction_decoded[0]) + if target_name in label_encoders: + prediction = label_encoders[target_name].inverse_transform(prediction) + + st.write("Prediction:", prediction[0]) else: st.error("File not loaded") -- 2.43.0 From 089cc660426b17c04e561f333f07934244b71a7a Mon Sep 17 00:00:00 2001 From: "hugo.pradier2" Date: Fri, 21 Jun 2024 14:51:06 +0200 Subject: [PATCH 24/38] correctifs --- frontend/pages/prediction_classification.py | 3 ++- frontend/pages/prediction_regression.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/frontend/pages/prediction_classification.py b/frontend/pages/prediction_classification.py index dc70175..5aaf52f 100644 --- a/frontend/pages/prediction_classification.py +++ b/frontend/pages/prediction_classification.py @@ -3,6 +3,7 @@ from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score from sklearn.preprocessing import LabelEncoder +import pandas as pd st.header("Prediction: Classification") @@ -53,7 +54,7 @@ if "data" in st.session_state: value = st.number_input(f"Value for {feature}", value=0.0) pred_values.append(value) - prediction = model.predict([pred_values]) + prediction = model.predict(pd.DataFrame([pred_values], columns=data_name)) if target_name in label_encoders: prediction = label_encoders[target_name].inverse_transform(prediction) diff --git a/frontend/pages/prediction_regression.py b/frontend/pages/prediction_regression.py index c6a6a38..377274e 100644 --- a/frontend/pages/prediction_regression.py +++ b/frontend/pages/prediction_regression.py @@ -1,5 +1,6 @@ import streamlit as st from sklearn.linear_model import LinearRegression +import pandas as pd st.header("Prediction: Regression") @@ -21,7 +22,7 @@ if "data" in st.session_state: st.subheader("Enter values for prediction") pred_values = [st.number_input(f"Value for {feature}", value=0.0) for feature in data_name] - prediction = model.predict([pred_values]) + prediction = model.predict(pd.DataFrame([pred_values], columns=data_name)) st.write("Prediction:", prediction[0]) else: -- 2.43.0 From cd0c85ea4426e262064e26bb32bab274c5719d1a Mon Sep 17 00:00:00 2001 From: clfreville2 Date: Fri, 21 Jun 2024 15:45:33 +0200 Subject: [PATCH 25/38] Support kNN as an imputation method --- frontend/normstrategy.py | 39 ++++++++++++++++++++++++++++++++- frontend/pages/normalization.py | 8 +++++-- 2 files changed, 44 insertions(+), 3 deletions(-) diff --git a/frontend/normstrategy.py b/frontend/normstrategy.py index 2896c49..af4dde3 100644 --- a/frontend/normstrategy.py +++ b/frontend/normstrategy.py @@ -1,6 +1,7 @@ from abc import ABC, abstractmethod from pandas import DataFrame, Series from pandas.api.types import is_numeric_dtype +from sklearn.neighbors import KNeighborsClassifier from typing import Any, Union class DataFrameFunction(ABC): @@ -18,11 +19,14 @@ class MVStrategy(DataFrameFunction): """A way to handle missing values in a dataframe.""" @staticmethod - def list_available(df: DataFrame, series: Series) -> list['MVStrategy']: + def list_available(df: DataFrame, label: str, series: Series) -> list['MVStrategy']: """Get all the strategies that can be used.""" choices = [DropStrategy(), ModeStrategy()] if is_numeric_dtype(series): choices.extend((MeanStrategy(), MedianStrategy(), LinearRegressionStrategy())) + other_columns = df.select_dtypes(include="number").drop(label, axis=1).columns.to_list() + if len(other_columns): + choices.append(KNNStrategy(other_columns)) return choices @@ -97,6 +101,39 @@ class LinearRegressionStrategy(MVStrategy): return "Use linear regression" +class KNNStrategy(MVStrategy): + def __init__(self, training_features: list[str]): + self.available_features = training_features + self.training_features = training_features + self.n_neighbors = 3 + + def apply(self, df: DataFrame, label: str, series: Series) -> DataFrame: + # Remove any training column that have any missing values + usable_data = df.dropna(subset=self.training_features) + # Select columns to impute from + train_data = usable_data.dropna(subset=label) + # Create train dataframe + x_train = train_data.drop(label, axis=1) + y_train = train_data[label] + + reg = KNeighborsClassifier(self.n_neighbors).fit(x_train, y_train) + + # Create test dataframe + test_data = usable_data[usable_data[label].isnull()] + if test_data.empty: + return df + x_test = test_data.drop(label, axis=1) + predicted = reg.predict(x_test) + + # Fill with predicated values and patch the original data + usable_data[label].fillna(Series(predicted), inplace=True) + df.fillna(usable_data, inplace=True) + return df + + def __str__(self) -> str: + return "kNN" + + class KeepStrategy(ScalingStrategy): #@typing.override def apply(self, df: DataFrame, label: str, series: Series) -> DataFrame: diff --git a/frontend/pages/normalization.py b/frontend/pages/normalization.py index 3500988..b543f87 100644 --- a/frontend/pages/normalization.py +++ b/frontend/pages/normalization.py @@ -1,5 +1,5 @@ import streamlit as st -from normstrategy import MVStrategy, ScalingStrategy +from normstrategy import MVStrategy, ScalingStrategy, KNNStrategy if "data" in st.session_state: data = st.session_state.original_data @@ -8,13 +8,17 @@ if "data" in st.session_state: for column, series in data.items(): col1, col2 = st.columns(2) missing_count = series.isna().sum() - choices = MVStrategy.list_available(data, series) + choices = MVStrategy.list_available(data, column, series) option = col1.selectbox( f"Missing values of {column} ({missing_count})", choices, index=1, key=f"mv-{column}", ) + if isinstance(option, KNNStrategy): + print(option.available_features) + option.training_features = st.multiselect("Training columns", option.training_features, default=option.available_features, key=f"cols-{column}") + option.n_neighbors = st.number_input("Number of neighbors", min_value=1, value=option.n_neighbors, key=f"neighbors-{column}") # Always re-get the series to avoid reusing an invalidated series pointer data = option.apply(data, column, data[column]) -- 2.43.0 From d4aeb87f754636a26c2225f41f26fb4f67c79c79 Mon Sep 17 00:00:00 2001 From: clfreville2 Date: Fri, 21 Jun 2024 16:03:54 +0200 Subject: [PATCH 26/38] Limit the number of neighbors based on the dataframe --- frontend/normstrategy.py | 4 ++++ frontend/pages/normalization.py | 3 +-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/frontend/normstrategy.py b/frontend/normstrategy.py index af4dde3..5a50bab 100644 --- a/frontend/normstrategy.py +++ b/frontend/normstrategy.py @@ -130,6 +130,10 @@ class KNNStrategy(MVStrategy): df.fillna(usable_data, inplace=True) return df + def count_max(self, df: DataFrame, label: str) -> int: + usable_data = df.dropna(subset=self.training_features) + return usable_data[label].count() + def __str__(self) -> str: return "kNN" diff --git a/frontend/pages/normalization.py b/frontend/pages/normalization.py index b543f87..34de383 100644 --- a/frontend/pages/normalization.py +++ b/frontend/pages/normalization.py @@ -16,9 +16,8 @@ if "data" in st.session_state: key=f"mv-{column}", ) if isinstance(option, KNNStrategy): - print(option.available_features) option.training_features = st.multiselect("Training columns", option.training_features, default=option.available_features, key=f"cols-{column}") - option.n_neighbors = st.number_input("Number of neighbors", min_value=1, value=option.n_neighbors, key=f"neighbors-{column}") + option.n_neighbors = st.number_input("Number of neighbors", min_value=1, max_value=option.count_max(data, column), value=option.n_neighbors, key=f"neighbors-{column}") # Always re-get the series to avoid reusing an invalidated series pointer data = option.apply(data, column, data[column]) -- 2.43.0 From c87308cc21f36963f8cfff5735e5a25b5c0afd64 Mon Sep 17 00:00:00 2001 From: clfreville2 Date: Fri, 21 Jun 2024 16:46:35 +0200 Subject: [PATCH 27/38] Support multiple column delimiters --- frontend/exploration.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/frontend/exploration.py b/frontend/exploration.py index 4cac622..7c233b4 100644 --- a/frontend/exploration.py +++ b/frontend/exploration.py @@ -1,5 +1,6 @@ import pandas as pd import streamlit as st +import codecs st.set_page_config( page_title="Project Miner", @@ -9,10 +10,13 @@ st.set_page_config( st.title("Home") ### Exploration -uploaded_file = st.file_uploader("Upload your CSV file", type=["csv"]) +uploaded_file = st.file_uploader("Upload your CSV file", type=["csv", "tsv"]) +separator = st.selectbox("Separator", [",", ";", "\\t"]) +separator = codecs.getdecoder("unicode_escape")(separator)[0] +has_header = st.checkbox("Has header", value=True) if uploaded_file is not None: - st.session_state.data = pd.read_csv(uploaded_file) + st.session_state.data = pd.read_csv(uploaded_file, sep=separator, header=0 if has_header else 1) st.session_state.original_data = st.session_state.data st.success("File loaded successfully!") -- 2.43.0 From 9cb0d90eb14f26143da71f2a9cb226cc04e8191f Mon Sep 17 00:00:00 2001 From: Bastien OLLIER Date: Fri, 21 Jun 2024 16:53:00 +0200 Subject: [PATCH 28/38] Add CI/CD (#9) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: clfreville2 Co-authored-by: bastien ollier Reviewed-on: https://codefirst.iut.uca.fr/git/clement.freville2/miner/pulls/9 Reviewed-by: Clément FRÉVILLE Co-authored-by: Bastien OLLIER Co-committed-by: Bastien OLLIER --- .drone.yml | 44 ++++++++++++++++++++++++++++++++++++++++++++ Dockerfile | 9 +++++++++ requirements.txt | 5 +++++ 3 files changed, 58 insertions(+) create mode 100644 .drone.yml create mode 100644 Dockerfile create mode 100644 requirements.txt diff --git a/.drone.yml b/.drone.yml new file mode 100644 index 0000000..4610933 --- /dev/null +++ b/.drone.yml @@ -0,0 +1,44 @@ +kind: pipeline +name: default +type: docker + +trigger: + event: + - push + +steps: + - name: lint + image: python:3.12 + commands: + - pip install --root-user-action=ignore -r requirements.txt + - ruff check . + + - name: docker-image + image: plugins/docker + settings: + dockerfile: Dockerfile + registry: hub.codefirst.iut.uca.fr + repo: hub.codefirst.iut.uca.fr/bastien.ollier/miner + username: + from_secret: REGISTRY_USER + password: + from_secret: REGISTRY_PASSWORD + cache_from: + - hub.codefirst.iut.uca.fr/bastien.ollier/miner:latest + depends_on: [ lint ] + + - name: deploy-miner + image: hub.codefirst.iut.uca.fr/clement.freville2/codefirst-dockerproxy-clientdrone:latest + settings: + image: hub.codefirst.iut.uca.fr/bastien.ollier/miner:latest + container: miner + command: create + overwrite: true + admins: bastienollier,clementfreville2,hugopradier2 + environment: + DRONE_REPO_OWNER: bastien.ollier + depends_on: [ docker-image ] + when: + branch: + - main + - ci/* diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..dd96397 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,9 @@ +FROM python:3.12-slim + +WORKDIR /app + +COPY . . +RUN pip3 install -r requirements.txt + +EXPOSE 80 +ENTRYPOINT ["streamlit", "run", "frontend/exploration.py", "--server.port=80", "--server.address=0.0.0.0", "--server.baseUrlPath=/containers/bastienollier-miner"] diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..63b5398 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,5 @@ +matplotlib>=3.5.0 +pandas>=1.5.0 +seaborn>=0.12.0 +streamlit>=1.35.0 +ruff>=0.4.8 -- 2.43.0 From 4d82767c68d3a81402ba97a5e6d86df2fb62a16c Mon Sep 17 00:00:00 2001 From: clfreville2 Date: Fri, 21 Jun 2024 16:59:51 +0200 Subject: [PATCH 29/38] Add SkLearn to requirements.txt --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index 63b5398..03bf2ac 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,6 @@ matplotlib>=3.5.0 pandas>=1.5.0 seaborn>=0.12.0 +scikit-learn>=0.23.0 streamlit>=1.35.0 ruff>=0.4.8 -- 2.43.0 From 9da6e2d594688346d398f9fb8bf2e6ee5684050b Mon Sep 17 00:00:00 2001 From: Bastien OLLIER Date: Tue, 25 Jun 2024 08:37:38 +0200 Subject: [PATCH 30/38] Add cluster stats (#13) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: bastien ollier Reviewed-on: https://codefirst.iut.uca.fr/git/clement.freville2/miner/pulls/13 Reviewed-by: Hugo PRADIER Reviewed-by: Clément FRÉVILLE Co-authored-by: Bastien OLLIER Co-committed-by: Bastien OLLIER --- frontend/clusters.py | 63 +++++++++++++++++++++++++++++ frontend/pages/clustering_dbscan.py | 11 ++--- frontend/pages/clustering_kmeans.py | 14 +++---- 3 files changed, 74 insertions(+), 14 deletions(-) create mode 100644 frontend/clusters.py diff --git a/frontend/clusters.py b/frontend/clusters.py new file mode 100644 index 0000000..ac2af4c --- /dev/null +++ b/frontend/clusters.py @@ -0,0 +1,63 @@ +from sklearn.cluster import DBSCAN, KMeans +import numpy as np + +class DBSCAN_cluster(): + def __init__(self, eps, min_samples,data): + self.eps = eps + self.min_samples = min_samples + self.data = data + self.labels = np.array([]) + + def run(self): + dbscan = DBSCAN(eps=self.eps, min_samples=self.min_samples) + self.labels = dbscan.fit_predict(self.data) + return self.labels + + def get_stats(self): + unique_labels = np.unique(self.labels) + stats = [] + for label in unique_labels: + if label == -1: + continue + cluster_points = self.data[self.labels == label] + num_points = len(cluster_points) + density = num_points / (np.max(cluster_points, axis=0) - np.min(cluster_points, axis=0)).prod() + stats.append({ + "cluster": label, + "num_points": num_points, + "density": density + }) + + return stats + + +class KMeans_cluster(): + def __init__(self, n_clusters, n_init, max_iter, data): + self.n_clusters = n_clusters + self.n_init = n_init + self.max_iter = max_iter + self.data = data + self.labels = np.array([]) + self.centers = [] + + def run(self): + kmeans = KMeans(n_clusters=self.n_clusters, init="random", n_init=self.n_init, max_iter=self.max_iter, random_state=111) + self.labels = kmeans.fit_predict(self.data) + self.centers = kmeans.cluster_centers_ + return self.labels + + + def get_stats(self): + unique_labels = np.unique(self.labels) + stats = [] + + for label in unique_labels: + cluster_points = self.data[self.labels == label] + num_points = len(cluster_points) + center = self.centers[label] + stats.append({ + 'cluster': label, + 'num_points': num_points, + 'center': center + }) + return stats diff --git a/frontend/pages/clustering_dbscan.py b/frontend/pages/clustering_dbscan.py index d06b10a..7ca16f6 100644 --- a/frontend/pages/clustering_dbscan.py +++ b/frontend/pages/clustering_dbscan.py @@ -1,10 +1,9 @@ import streamlit as st import matplotlib.pyplot as plt -from sklearn.cluster import DBSCAN +from clusters import DBSCAN_cluster st.header("Clustering: dbscan") - if "data" in st.session_state: data = st.session_state.data @@ -17,8 +16,9 @@ if "data" in st.session_state: if len(data_name) >= 2 and len(data_name) <=3: x = data[data_name].to_numpy() - dbscan = DBSCAN(eps=eps, min_samples=min_samples) - y_dbscan = dbscan.fit_predict(x) + dbscan = DBSCAN_cluster(eps,min_samples,x) + y_dbscan = dbscan.run() + st.table(dbscan.get_stats()) fig = plt.figure() if len(data_name) == 2: @@ -28,8 +28,5 @@ if "data" in st.session_state: ax = fig.add_subplot(projection='3d') ax.scatter(x[:, 0], x[:, 1],x[:, 2], c=y_dbscan, s=50, cmap="viridis") st.pyplot(fig) - - - else: st.error("file not loaded") \ No newline at end of file diff --git a/frontend/pages/clustering_kmeans.py b/frontend/pages/clustering_kmeans.py index c61bf40..63c7d55 100644 --- a/frontend/pages/clustering_kmeans.py +++ b/frontend/pages/clustering_kmeans.py @@ -1,10 +1,9 @@ import streamlit as st -from sklearn.cluster import KMeans import matplotlib.pyplot as plt +from clusters import KMeans_cluster st.header("Clustering: kmeans") - if "data" in st.session_state: data = st.session_state.data @@ -23,21 +22,22 @@ if "data" in st.session_state: if len(data_name) >= 2 and len(data_name) <=3: x = data[data_name].to_numpy() - kmeans = KMeans(n_clusters=n_clusters, init="random", n_init=n_init, max_iter=max_iter, random_state=111) - y_kmeans = kmeans.fit_predict(x) + kmeans = KMeans_cluster(n_clusters, n_init, max_iter, x) + y_kmeans = kmeans.run() + st.table(kmeans.get_stats()) + + centers = kmeans.centers fig = plt.figure() if len(data_name) == 2: ax = fig.add_subplot(projection='rectilinear') plt.scatter(x[:, 0], x[:, 1], c=y_kmeans, s=50, cmap="viridis") - centers = kmeans.cluster_centers_ plt.scatter(centers[:, 0], centers[:, 1], c="black", s=200, marker="X") else: ax = fig.add_subplot(projection='3d') ax.scatter(x[:, 0], x[:, 1],x[:, 2], c=y_kmeans, s=50, cmap="viridis") - centers = kmeans.cluster_centers_ - ax.scatter(centers[:, 0], centers[:, 1],centers[:, 2], c="black", s=200, marker="X") + ax.scatter(centers[:, 0], centers[:, 1], centers[:, 2], c="black", s=200, marker="X") st.pyplot(fig) else: -- 2.43.0 From 01168f3588d9449ad92d36333635334642b8cef8 Mon Sep 17 00:00:00 2001 From: bastien Date: Tue, 25 Jun 2024 18:06:30 +0200 Subject: [PATCH 31/38] add visu to prediction regression --- frontend/pages/prediction_regression.py | 31 +++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/frontend/pages/prediction_regression.py b/frontend/pages/prediction_regression.py index 377274e..42acf34 100644 --- a/frontend/pages/prediction_regression.py +++ b/frontend/pages/prediction_regression.py @@ -1,6 +1,8 @@ import streamlit as st from sklearn.linear_model import LinearRegression import pandas as pd +import matplotlib.pyplot as plt +import numpy as np st.header("Prediction: Regression") @@ -25,5 +27,34 @@ if "data" in st.session_state: prediction = model.predict(pd.DataFrame([pred_values], columns=data_name)) st.write("Prediction:", prediction[0]) + + fig = plt.figure() + dataframe_sorted = pd.concat([X, y], axis=1).sort_values(by=data_name) + + if len(data_name) == 1: + X = dataframe_sorted[data_name[0]] + y = dataframe_sorted[target_name] + + prediction_array_y = [ + model.predict(pd.DataFrame([[dataframe_sorted[data_name[0]].iloc[i]]], columns=data_name))[0] + for i in range(dataframe_sorted.shape[0]) + ] + + plt.scatter(dataframe_sorted[data_name[0]], dataframe_sorted[target_name], color='b') + plt.scatter(dataframe_sorted[data_name[0]], prediction_array_y, color='r') + else: + ax = fig.add_subplot(111, projection='3d') + + prediction_array_y = [ + model.predict(pd.DataFrame([[dataframe_sorted[data_name[0]].iloc[i], dataframe_sorted[data_name[1]].iloc[i]]], columns=data_name))[0] + for i in range(dataframe_sorted.shape[0]) + ] + + ax.scatter(dataframe_sorted[data_name[0]], dataframe_sorted[data_name[1]], dataframe_sorted[target_name], color='b') + ax.scatter(dataframe_sorted[data_name[0]], dataframe_sorted[data_name[1]], prediction_array_y, color='r') + + st.pyplot(fig) + + else: st.error("File not loaded") -- 2.43.0 From 405439564147fc6430b9a933e156fe1e480372da Mon Sep 17 00:00:00 2001 From: bastien Date: Tue, 25 Jun 2024 19:54:35 +0200 Subject: [PATCH 32/38] update --- frontend/pages/prediction_classification.py | 23 +++++++++++++++++++++ frontend/pages/prediction_regression.py | 7 +++---- 2 files changed, 26 insertions(+), 4 deletions(-) diff --git a/frontend/pages/prediction_classification.py b/frontend/pages/prediction_classification.py index 5aaf52f..20ae5e1 100644 --- a/frontend/pages/prediction_classification.py +++ b/frontend/pages/prediction_classification.py @@ -4,6 +4,8 @@ from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score from sklearn.preprocessing import LabelEncoder import pandas as pd +import matplotlib.pyplot as plt + st.header("Prediction: Classification") @@ -60,5 +62,26 @@ if "data" in st.session_state: prediction = label_encoders[target_name].inverse_transform(prediction) st.write("Prediction:", prediction[0]) + + + + + fig = plt.figure() + dataframe_sorted = pd.concat([X, y], axis=1).sort_values(by=data_name) + + X = dataframe_sorted[data_name[0]] + y = dataframe_sorted[target_name] + + prediction_array_y = [ + model.predict(pd.DataFrame([[dataframe_sorted[data_name[0]].iloc[i]]], columns=data_name))[0] + for i in range(dataframe_sorted.shape[0]) + ] + + plt.scatter(dataframe_sorted[data_name[0]], dataframe_sorted[target_name], color='b') + plt.scatter(dataframe_sorted[data_name[0]], prediction_array_y, color='r') + + st.pyplot(fig) + + else: st.error("File not loaded") diff --git a/frontend/pages/prediction_regression.py b/frontend/pages/prediction_regression.py index 42acf34..6d125e0 100644 --- a/frontend/pages/prediction_regression.py +++ b/frontend/pages/prediction_regression.py @@ -41,8 +41,8 @@ if "data" in st.session_state: ] plt.scatter(dataframe_sorted[data_name[0]], dataframe_sorted[target_name], color='b') - plt.scatter(dataframe_sorted[data_name[0]], prediction_array_y, color='r') - else: + plt.plot(dataframe_sorted[data_name[0]], prediction_array_y, color='r') + elif len(data_name) == 2: ax = fig.add_subplot(111, projection='3d') prediction_array_y = [ @@ -51,10 +51,9 @@ if "data" in st.session_state: ] ax.scatter(dataframe_sorted[data_name[0]], dataframe_sorted[data_name[1]], dataframe_sorted[target_name], color='b') - ax.scatter(dataframe_sorted[data_name[0]], dataframe_sorted[data_name[1]], prediction_array_y, color='r') + ax.plot(dataframe_sorted[data_name[0]], dataframe_sorted[data_name[1]], prediction_array_y, color='r') st.pyplot(fig) - else: st.error("File not loaded") -- 2.43.0 From 27e69b2af8b4dfd1adb7c5441f8e0a713d1192d4 Mon Sep 17 00:00:00 2001 From: bastien ollier Date: Wed, 26 Jun 2024 10:45:50 +0200 Subject: [PATCH 33/38] add confusion_matrix --- frontend/pages/prediction_classification.py | 26 +++++++++------------ frontend/pages/prediction_regression.py | 2 +- 2 files changed, 12 insertions(+), 16 deletions(-) diff --git a/frontend/pages/prediction_classification.py b/frontend/pages/prediction_classification.py index 20ae5e1..c11d7ee 100644 --- a/frontend/pages/prediction_classification.py +++ b/frontend/pages/prediction_classification.py @@ -1,11 +1,11 @@ import streamlit as st from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split -from sklearn.metrics import accuracy_score +from sklearn.metrics import accuracy_score,confusion_matrix from sklearn.preprocessing import LabelEncoder import pandas as pd import matplotlib.pyplot as plt - +import seaborn as sns st.header("Prediction: Classification") @@ -63,24 +63,20 @@ if "data" in st.session_state: st.write("Prediction:", prediction[0]) + if len(data_name) == 1: + fig = plt.figure() + y_pred = [model.predict(pd.DataFrame([pred_value[0]], columns=data_name)) for pred_value in X.values.tolist()] + print([x[0] for x in X.values.tolist()]) + cm = confusion_matrix(y, y_pred) + sns.heatmap(cm, annot=True, fmt="d") - fig = plt.figure() - dataframe_sorted = pd.concat([X, y], axis=1).sort_values(by=data_name) + plt.xlabel('Predicted') + plt.ylabel('True') - X = dataframe_sorted[data_name[0]] - y = dataframe_sorted[target_name] + st.pyplot(fig) - prediction_array_y = [ - model.predict(pd.DataFrame([[dataframe_sorted[data_name[0]].iloc[i]]], columns=data_name))[0] - for i in range(dataframe_sorted.shape[0]) - ] - - plt.scatter(dataframe_sorted[data_name[0]], dataframe_sorted[target_name], color='b') - plt.scatter(dataframe_sorted[data_name[0]], prediction_array_y, color='r') - - st.pyplot(fig) else: diff --git a/frontend/pages/prediction_regression.py b/frontend/pages/prediction_regression.py index 6d125e0..e06fa12 100644 --- a/frontend/pages/prediction_regression.py +++ b/frontend/pages/prediction_regression.py @@ -2,7 +2,6 @@ import streamlit as st from sklearn.linear_model import LinearRegression import pandas as pd import matplotlib.pyplot as plt -import numpy as np st.header("Prediction: Regression") @@ -31,6 +30,7 @@ if "data" in st.session_state: fig = plt.figure() dataframe_sorted = pd.concat([X, y], axis=1).sort_values(by=data_name) + if len(data_name) == 1: X = dataframe_sorted[data_name[0]] y = dataframe_sorted[target_name] -- 2.43.0 From da1e97f07f3dca68e0762700ba4a1b79f023dad3 Mon Sep 17 00:00:00 2001 From: bastien ollier Date: Wed, 26 Jun 2024 10:59:25 +0200 Subject: [PATCH 34/38] add r2 score --- frontend/pages/prediction_classification.py | 6 +----- frontend/pages/prediction_regression.py | 5 +++++ 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/frontend/pages/prediction_classification.py b/frontend/pages/prediction_classification.py index c11d7ee..bb6bb22 100644 --- a/frontend/pages/prediction_classification.py +++ b/frontend/pages/prediction_classification.py @@ -67,7 +67,6 @@ if "data" in st.session_state: fig = plt.figure() y_pred = [model.predict(pd.DataFrame([pred_value[0]], columns=data_name)) for pred_value in X.values.tolist()] - print([x[0] for x in X.values.tolist()]) cm = confusion_matrix(y, y_pred) sns.heatmap(cm, annot=True, fmt="d") @@ -75,9 +74,6 @@ if "data" in st.session_state: plt.xlabel('Predicted') plt.ylabel('True') - st.pyplot(fig) - - - + st.pyplot(fig, figsize=(1, 1)) else: st.error("File not loaded") diff --git a/frontend/pages/prediction_regression.py b/frontend/pages/prediction_regression.py index e06fa12..35b648d 100644 --- a/frontend/pages/prediction_regression.py +++ b/frontend/pages/prediction_regression.py @@ -1,5 +1,6 @@ import streamlit as st from sklearn.linear_model import LinearRegression +from sklearn.metrics import r2_score import pandas as pd import matplotlib.pyplot as plt @@ -21,6 +22,10 @@ if "data" in st.session_state: model = LinearRegression() model.fit(X, y) + y_pred = [model.predict(pd.DataFrame([pred_value[0]], columns=data_name)) for pred_value in X.values.tolist()] + r2 = r2_score(y, y_pred) + st.write('R-squared score:', r2) + st.subheader("Enter values for prediction") pred_values = [st.number_input(f"Value for {feature}", value=0.0) for feature in data_name] prediction = model.predict(pd.DataFrame([pred_values], columns=data_name)) -- 2.43.0 From 9bc9e21e45412fca2f2ffce6230cb61573ad928c Mon Sep 17 00:00:00 2001 From: bastien ollier Date: Wed, 26 Jun 2024 11:05:04 +0200 Subject: [PATCH 35/38] add r2 score --- frontend/pages/prediction_regression.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/frontend/pages/prediction_regression.py b/frontend/pages/prediction_regression.py index 35b648d..a290c10 100644 --- a/frontend/pages/prediction_regression.py +++ b/frontend/pages/prediction_regression.py @@ -22,10 +22,6 @@ if "data" in st.session_state: model = LinearRegression() model.fit(X, y) - y_pred = [model.predict(pd.DataFrame([pred_value[0]], columns=data_name)) for pred_value in X.values.tolist()] - r2 = r2_score(y, y_pred) - st.write('R-squared score:', r2) - st.subheader("Enter values for prediction") pred_values = [st.number_input(f"Value for {feature}", value=0.0) for feature in data_name] prediction = model.predict(pd.DataFrame([pred_values], columns=data_name)) @@ -35,8 +31,11 @@ if "data" in st.session_state: fig = plt.figure() dataframe_sorted = pd.concat([X, y], axis=1).sort_values(by=data_name) - if len(data_name) == 1: + y_pred = [model.predict(pd.DataFrame([pred_value[0]], columns=data_name)) for pred_value in X.values.tolist()] + r2 = r2_score(y, y_pred) + st.write('R-squared score:', r2) + X = dataframe_sorted[data_name[0]] y = dataframe_sorted[target_name] -- 2.43.0 From 01ef19a2f80888d559a8fff910039bc0b057c2bc Mon Sep 17 00:00:00 2001 From: clfreville2 Date: Wed, 26 Jun 2024 12:00:21 +0200 Subject: [PATCH 36/38] Merge files using strategies --- frontend/clusters.py | 78 ++++++++++++++++++----------- frontend/pages/clustering.py | 48 ++++++++++++++++++ frontend/pages/clustering_dbscan.py | 32 ------------ frontend/pages/clustering_kmeans.py | 44 ---------------- 4 files changed, 97 insertions(+), 105 deletions(-) create mode 100644 frontend/pages/clustering.py delete mode 100644 frontend/pages/clustering_dbscan.py delete mode 100644 frontend/pages/clustering_kmeans.py diff --git a/frontend/clusters.py b/frontend/clusters.py index ac2af4c..20e1927 100644 --- a/frontend/clusters.py +++ b/frontend/clusters.py @@ -1,25 +1,40 @@ from sklearn.cluster import DBSCAN, KMeans import numpy as np +from dataclasses import dataclass +from abc import ABC, abstractmethod +from typing import Any, Optional -class DBSCAN_cluster(): - def __init__(self, eps, min_samples,data): +@dataclass +class ClusterResult: + labels: np.array + centers: Optional[np.array] + statistics: list[dict[str, Any]] + + +class Cluster(ABC): + @abstractmethod + def run(self, data: np.array) -> ClusterResult: + pass + + +class DBSCANCluster(Cluster): + def __init__(self, eps: float = 0.5, min_samples: int = 5): self.eps = eps self.min_samples = min_samples - self.data = data - self.labels = np.array([]) - def run(self): + #@typing.override + def run(self, data: np.array) -> ClusterResult: dbscan = DBSCAN(eps=self.eps, min_samples=self.min_samples) - self.labels = dbscan.fit_predict(self.data) - return self.labels + labels = dbscan.fit_predict(data) + return ClusterResult(labels, None, self.get_statistics(data, labels)) - def get_stats(self): - unique_labels = np.unique(self.labels) + def get_statistics(self, data: np.array, labels: np.array) -> list[dict[str, Any]]: + unique_labels = np.unique(labels) stats = [] for label in unique_labels: if label == -1: continue - cluster_points = self.data[self.labels == label] + cluster_points = data[labels == label] num_points = len(cluster_points) density = num_points / (np.max(cluster_points, axis=0) - np.min(cluster_points, axis=0)).prod() stats.append({ @@ -27,37 +42,42 @@ class DBSCAN_cluster(): "num_points": num_points, "density": density }) - return stats - -class KMeans_cluster(): - def __init__(self, n_clusters, n_init, max_iter, data): + def __str__(self) -> str: + return "DBScan" + + +class KMeansCluster(Cluster): + def __init__(self, n_clusters: int = 8, n_init: int = 1, max_iter: int = 300): self.n_clusters = n_clusters self.n_init = n_init self.max_iter = max_iter - self.data = data - self.labels = np.array([]) - self.centers = [] - def run(self): + #@typing.override + def run(self, data: np.array) -> ClusterResult: kmeans = KMeans(n_clusters=self.n_clusters, init="random", n_init=self.n_init, max_iter=self.max_iter, random_state=111) - self.labels = kmeans.fit_predict(self.data) - self.centers = kmeans.cluster_centers_ - return self.labels + labels = kmeans.fit_predict(data) + centers = kmeans.cluster_centers_ + return ClusterResult(labels, centers, self.get_statistics(data, labels, centers)) - - def get_stats(self): - unique_labels = np.unique(self.labels) + def get_statistics(self, data: np.array, labels: np.array, centers: np.array) -> list[dict[str, Any]]: + unique_labels = np.unique(labels) stats = [] for label in unique_labels: - cluster_points = self.data[self.labels == label] + cluster_points = data[labels == label] num_points = len(cluster_points) - center = self.centers[label] + center = centers[label] stats.append({ - 'cluster': label, - 'num_points': num_points, - 'center': center + "cluster": label, + "num_points": num_points, + "center": center, }) return stats + + def __str__(self) -> str: + return "KMeans" + + +CLUSTERING_STRATEGIES = [DBSCANCluster(), KMeansCluster()] diff --git a/frontend/pages/clustering.py b/frontend/pages/clustering.py new file mode 100644 index 0000000..b3bf971 --- /dev/null +++ b/frontend/pages/clustering.py @@ -0,0 +1,48 @@ +import streamlit as st +import matplotlib.pyplot as plt +from clusters import DBSCANCluster, KMeansCluster, CLUSTERING_STRATEGIES + +st.header("Clustering") + +if "data" in st.session_state: + data = st.session_state.data + + general_row = st.columns([1, 1]) + clustering = general_row[0].selectbox("Clustering method", CLUSTERING_STRATEGIES) + data_name = general_row[1].multiselect("Data Name",data.select_dtypes(include="number").columns, max_selections=3) + + with st.form("cluster_form"): + if isinstance(clustering, KMeansCluster): + row1 = st.columns([1, 1, 1]) + clustering.n_clusters = row1[0].number_input("Number of clusters", min_value=1, max_value=data.shape[0], value=clustering.n_clusters) + clustering.n_init = row1[1].number_input("n_init", min_value=1, value=clustering.n_init) + clustering.max_iter = row1[2].number_input("max_iter", min_value=1, value=clustering.max_iter) + elif isinstance(clustering, DBSCANCluster): + clustering.eps = st.slider("eps", min_value=0.0001, max_value=1.0, step=0.1, value=clustering.eps) + clustering.min_samples = st.number_input("min_samples", min_value=1, value=clustering.min_samples) + + st.form_submit_button("Launch") + + if len(data_name) >= 2 and len(data_name) <=3: + x = data[data_name].to_numpy() + + result = clustering.run(x) + + st.table(result.statistics) + + fig = plt.figure() + if len(data_name) == 2: + ax = fig.add_subplot(projection='rectilinear') + plt.scatter(x[:, 0], x[:, 1], c=result.labels, s=50, cmap="viridis") + if result.centers is not None: + plt.scatter(result.centers[:, 0], result.centers[:, 1], c="black", s=200, marker="X") + else: + ax = fig.add_subplot(projection='3d') + + ax.scatter(x[:, 0], x[:, 1],x[:, 2], c=result.labels, s=50, cmap="viridis") + if result.centers is not None: + ax.scatter(result.centers[:, 0], result.centers[:, 1], result.centers[:, 2], c="black", s=200, marker="X") + st.pyplot(fig) + +else: + st.error("file not loaded") diff --git a/frontend/pages/clustering_dbscan.py b/frontend/pages/clustering_dbscan.py deleted file mode 100644 index 7ca16f6..0000000 --- a/frontend/pages/clustering_dbscan.py +++ /dev/null @@ -1,32 +0,0 @@ -import streamlit as st -import matplotlib.pyplot as plt -from clusters import DBSCAN_cluster - -st.header("Clustering: dbscan") - -if "data" in st.session_state: - data = st.session_state.data - - with st.form("my_form"): - data_name = st.multiselect("Data Name", data.select_dtypes(include="number").columns, max_selections=3) - eps = st.slider("eps", min_value=0.0, max_value=1.0, value=0.5, step=0.01) - min_samples = st.number_input("min_samples", step=1, min_value=1, value=5) - st.form_submit_button("launch") - - if len(data_name) >= 2 and len(data_name) <=3: - x = data[data_name].to_numpy() - - dbscan = DBSCAN_cluster(eps,min_samples,x) - y_dbscan = dbscan.run() - st.table(dbscan.get_stats()) - - fig = plt.figure() - if len(data_name) == 2: - ax = fig.add_subplot(projection='rectilinear') - plt.scatter(x[:, 0], x[:, 1], c=y_dbscan, s=50, cmap="viridis") - else: - ax = fig.add_subplot(projection='3d') - ax.scatter(x[:, 0], x[:, 1],x[:, 2], c=y_dbscan, s=50, cmap="viridis") - st.pyplot(fig) -else: - st.error("file not loaded") \ No newline at end of file diff --git a/frontend/pages/clustering_kmeans.py b/frontend/pages/clustering_kmeans.py deleted file mode 100644 index 63c7d55..0000000 --- a/frontend/pages/clustering_kmeans.py +++ /dev/null @@ -1,44 +0,0 @@ -import streamlit as st -import matplotlib.pyplot as plt -from clusters import KMeans_cluster - -st.header("Clustering: kmeans") - -if "data" in st.session_state: - data = st.session_state.data - - with st.form("my_form"): - row1 = st.columns([1,1,1]) - n_clusters = row1[0].selectbox("Number of clusters", range(1,data.shape[0])) - data_name = row1[1].multiselect("Data Name",data.select_dtypes(include="number").columns, max_selections=3) - n_init = row1[2].number_input("n_init",step=1,min_value=1) - - row2 = st.columns([1,1]) - max_iter = row1[0].number_input("max_iter",step=1,min_value=1) - - - st.form_submit_button("launch") - - if len(data_name) >= 2 and len(data_name) <=3: - x = data[data_name].to_numpy() - - kmeans = KMeans_cluster(n_clusters, n_init, max_iter, x) - y_kmeans = kmeans.run() - - st.table(kmeans.get_stats()) - - centers = kmeans.centers - fig = plt.figure() - if len(data_name) == 2: - ax = fig.add_subplot(projection='rectilinear') - plt.scatter(x[:, 0], x[:, 1], c=y_kmeans, s=50, cmap="viridis") - plt.scatter(centers[:, 0], centers[:, 1], c="black", s=200, marker="X") - else: - ax = fig.add_subplot(projection='3d') - - ax.scatter(x[:, 0], x[:, 1],x[:, 2], c=y_kmeans, s=50, cmap="viridis") - ax.scatter(centers[:, 0], centers[:, 1], centers[:, 2], c="black", s=200, marker="X") - st.pyplot(fig) - -else: - st.error("file not loaded") -- 2.43.0 From 7cb0d559699de6c6b4285a9af8ad23a674dbec37 Mon Sep 17 00:00:00 2001 From: clfreville2 Date: Wed, 26 Jun 2024 20:45:55 +0200 Subject: [PATCH 37/38] Allow using PCA to reduce dataset dimensions --- frontend/pages/clustering.py | 52 +++++++++++++++++++++++++++++++----- 1 file changed, 45 insertions(+), 7 deletions(-) diff --git a/frontend/pages/clustering.py b/frontend/pages/clustering.py index b3bf971..2c2fb8e 100644 --- a/frontend/pages/clustering.py +++ b/frontend/pages/clustering.py @@ -1,15 +1,19 @@ import streamlit as st import matplotlib.pyplot as plt from clusters import DBSCANCluster, KMeansCluster, CLUSTERING_STRATEGIES +from sklearn.decomposition import PCA +from sklearn.metrics import silhouette_score +import numpy as np st.header("Clustering") if "data" in st.session_state: data = st.session_state.data - general_row = st.columns([1, 1]) + general_row = st.columns([1, 1, 1]) clustering = general_row[0].selectbox("Clustering method", CLUSTERING_STRATEGIES) - data_name = general_row[1].multiselect("Data Name",data.select_dtypes(include="number").columns, max_selections=3) + data_name = general_row[1].multiselect("Columns", data.select_dtypes(include="number").columns) + n_components = general_row[2].number_input("Reduce dimensions to (PCA)", min_value=1, max_value=3, value=2) with st.form("cluster_form"): if isinstance(clustering, KMeansCluster): @@ -18,20 +22,50 @@ if "data" in st.session_state: clustering.n_init = row1[1].number_input("n_init", min_value=1, value=clustering.n_init) clustering.max_iter = row1[2].number_input("max_iter", min_value=1, value=clustering.max_iter) elif isinstance(clustering, DBSCANCluster): - clustering.eps = st.slider("eps", min_value=0.0001, max_value=1.0, step=0.1, value=clustering.eps) - clustering.min_samples = st.number_input("min_samples", min_value=1, value=clustering.min_samples) + row1 = st.columns([1, 1]) + clustering.eps = row1[0].slider("eps", min_value=0.0001, max_value=1.0, step=0.05, value=clustering.eps) + clustering.min_samples = row1[1].number_input("min_samples", min_value=1, value=clustering.min_samples) st.form_submit_button("Launch") - if len(data_name) >= 2 and len(data_name) <=3: + if len(data_name) > 0: x = data[data_name].to_numpy() + n_components = min(n_components, len(data_name)) + if len(data_name) > n_components: + pca = PCA(n_components) + x = pca.fit_transform(x) + if n_components == 2: + (fig, ax) = plt.subplots(figsize=(8, 8)) + for i in range(0, pca.components_.shape[1]): + ax.arrow( + 0, + 0, + pca.components_[0, i], + pca.components_[1, i], + head_width=0.1, + head_length=0.1 + ) + + plt.text( + pca.components_[0, i] + 0.05, + pca.components_[1, i] + 0.05, + data_name[i] + ) + circle = plt.Circle((0, 0), radius=1, edgecolor='b', facecolor='None') + ax.add_patch(circle) + plt.axis("equal") + ax.set_title("PCA result - Correlation circle") + st.pyplot(fig) result = clustering.run(x) - + st.write("## Cluster stats") st.table(result.statistics) + st.write("## Graphical representation") fig = plt.figure() - if len(data_name) == 2: + if n_components == 1: + plt.scatter(x, np.zeros_like(x)) + elif n_components == 2: ax = fig.add_subplot(projection='rectilinear') plt.scatter(x[:, 0], x[:, 1], c=result.labels, s=50, cmap="viridis") if result.centers is not None: @@ -43,6 +77,10 @@ if "data" in st.session_state: if result.centers is not None: ax.scatter(result.centers[:, 0], result.centers[:, 1], result.centers[:, 2], c="black", s=200, marker="X") st.pyplot(fig) + if not (result.labels == 0).all(): + st.write("Silhouette score:", silhouette_score(x, result.labels)) + else: + st.error("Select at least one column") else: st.error("file not loaded") -- 2.43.0 From f464f6166ae0cd045df8b7a064af26fcd5e66f74 Mon Sep 17 00:00:00 2001 From: gorky1234 Date: Wed, 26 Jun 2024 21:31:01 +0200 Subject: [PATCH 38/38] corection bug figsize --- frontend/pages/prediction_classification.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/pages/prediction_classification.py b/frontend/pages/prediction_classification.py index bb6bb22..648db06 100644 --- a/frontend/pages/prediction_classification.py +++ b/frontend/pages/prediction_classification.py @@ -74,6 +74,6 @@ if "data" in st.session_state: plt.xlabel('Predicted') plt.ylabel('True') - st.pyplot(fig, figsize=(1, 1)) + st.pyplot(fig) else: st.error("File not loaded") -- 2.43.0