1 Commits

Author SHA1 Message Date
Bastien OLLIER
7be9d5a6c8 Mise à jour de 'frontend/pages/clustering_dbscan.py' 2024-06-21 14:41:05 +02:00
20 changed files with 87 additions and 359 deletions

View File

@@ -1,44 +0,0 @@
kind: pipeline
name: default
type: docker
trigger:
event:
- push
steps:
- name: lint
image: python:3.12
commands:
- pip install --root-user-action=ignore -r requirements.txt
- ruff check .
- name: docker-image
image: plugins/docker
settings:
dockerfile: Dockerfile
registry: hub.codefirst.iut.uca.fr
repo: hub.codefirst.iut.uca.fr/bastien.ollier/miner
username:
from_secret: REGISTRY_USER
password:
from_secret: REGISTRY_PASSWORD
cache_from:
- hub.codefirst.iut.uca.fr/bastien.ollier/miner:latest
depends_on: [ lint ]
- name: deploy-miner
image: hub.codefirst.iut.uca.fr/clement.freville2/codefirst-dockerproxy-clientdrone:latest
settings:
image: hub.codefirst.iut.uca.fr/bastien.ollier/miner:latest
container: miner
command: create
overwrite: true
admins: bastienollier,clementfreville2,hugopradier2
environment:
DRONE_REPO_OWNER: bastien.ollier
depends_on: [ docker-image ]
when:
branch:
- main
- ci/*

1
.gitignore vendored
View File

@@ -1,2 +1 @@
__pycache__ __pycache__
*/myenv

View File

@@ -1,9 +0,0 @@
FROM python:3.12-slim
WORKDIR /app
COPY . .
RUN pip3 install -r requirements.txt
EXPOSE 80
ENTRYPOINT ["streamlit", "run", "frontend/exploration.py", "--server.port=80", "--server.address=0.0.0.0", "--server.baseUrlPath=/containers/bastienollier-miner"]

View File

View File

@@ -1,45 +0,0 @@
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder
def perform_classification(data, data_name, target_name, test_size):
X = data[data_name]
y = data[target_name]
label_encoders = {}
for column in X.select_dtypes(include=['object']).columns:
le = LabelEncoder()
X[column] = le.fit_transform(X[column])
label_encoders[column] = le
if y.dtype == 'object':
le = LabelEncoder()
y = le.fit_transform(y)
label_encoders[target_name] = le
else:
if y.nunique() > 10:
raise ValueError("The target variable seems to be continuous. Please select a categorical target for classification.")
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=42)
model = LogisticRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
return model, label_encoders, accuracy
def make_prediction(model, label_encoders, data_name, target_name, input_values):
X_new = []
for feature, value in zip(data_name, input_values):
if feature in label_encoders:
value = label_encoders[feature].transform([value])[0]
X_new.append(value)
prediction = model.predict([X_new])
if target_name in label_encoders:
prediction = label_encoders[target_name].inverse_transform(prediction)
return prediction[0]

View File

@@ -1,16 +0,0 @@
import matplotlib.pyplot as plt
from sklearn.cluster import DBSCAN
def perform_dbscan_clustering(data, data_name, eps, min_samples):
x = data[data_name].to_numpy()
dbscan = DBSCAN(eps=eps, min_samples=min_samples)
y_dbscan = dbscan.fit_predict(x)
fig = plt.figure()
if len(data_name) == 2:
ax = fig.add_subplot(projection='rectilinear')
plt.scatter(x[:, 0], x[:, 1], c=y_dbscan, s=50, cmap="viridis")
else:
ax = fig.add_subplot(projection='3d')
ax.scatter(x[:, 0], x[:, 1], x[:, 2], c=y_dbscan, s=50, cmap="viridis")
return fig

View File

@@ -1,20 +0,0 @@
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
def perform_kmeans_clustering(data, data_name, n_clusters, n_init, max_iter):
x = data[data_name].to_numpy()
kmeans = KMeans(n_clusters=n_clusters, init="random", n_init=n_init, max_iter=max_iter, random_state=111)
y_kmeans = kmeans.fit_predict(x)
fig = plt.figure()
if len(data_name) == 2:
ax = fig.add_subplot(projection='rectilinear')
plt.scatter(x[:, 0], x[:, 1], c=y_kmeans, s=50, cmap="viridis")
centers = kmeans.cluster_centers_
plt.scatter(centers[:, 0], centers[:, 1], c="black", s=200, marker="X")
else:
ax = fig.add_subplot(projection='3d')
ax.scatter(x[:, 0], x[:, 1], x[:, 2], c=y_kmeans, s=50, cmap="viridis")
centers = kmeans.cluster_centers_
ax.scatter(centers[:, 0], centers[:, 1], centers[:, 2], c="black", s=200, marker="X")
return fig

View File

@@ -1,18 +0,0 @@
from sklearn.linear_model import LinearRegression
def perform_regression(data, data_name, target_name):
X = data[data_name]
y = data[target_name]
if not isinstance(y.iloc[0], (int, float)):
raise ValueError("The target variable should be numeric (continuous) for regression.")
model = LinearRegression()
model.fit(X, y)
return model
def make_prediction(model, feature_names, input_values):
prediction = model.predict([input_values])
return prediction[0]

View File

@@ -1,16 +0,0 @@
import matplotlib.pyplot as plt
import seaborn as sns
def plot_histogram(data, column):
fig, ax = plt.subplots()
ax.hist(data[column].dropna(), bins=20, edgecolor='k')
ax.set_title(f"Histogram of {column}")
ax.set_xlabel(column)
ax.set_ylabel("Frequency")
return fig
def plot_boxplot(data, column):
fig, ax = plt.subplots()
sns.boxplot(data=data, x=column, ax=ax)
ax.set_title(f"Boxplot of {column}")
return fig

View File

View File

@@ -1,6 +1,5 @@
import pandas as pd import pandas as pd
import streamlit as st import streamlit as st
import codecs
st.set_page_config( st.set_page_config(
page_title="Project Miner", page_title="Project Miner",
@@ -10,13 +9,10 @@ st.set_page_config(
st.title("Home") st.title("Home")
### Exploration ### Exploration
uploaded_file = st.file_uploader("Upload your CSV file", type=["csv", "tsv"]) uploaded_file = st.file_uploader("Upload your CSV file", type=["csv"])
separator = st.selectbox("Separator", [",", ";", "\\t"])
separator = codecs.getdecoder("unicode_escape")(separator)[0]
has_header = st.checkbox("Has header", value=True)
if uploaded_file is not None: if uploaded_file is not None:
st.session_state.data = pd.read_csv(uploaded_file, sep=separator, header=0 if has_header else 1) st.session_state.data = pd.read_csv(uploaded_file)
st.session_state.original_data = st.session_state.data st.session_state.original_data = st.session_state.data
st.success("File loaded successfully!") st.success("File loaded successfully!")

View File

@@ -1,7 +1,6 @@
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from pandas import DataFrame, Series from pandas import DataFrame, Series
from pandas.api.types import is_numeric_dtype from pandas.api.types import is_numeric_dtype
from sklearn.neighbors import KNeighborsClassifier
from typing import Any, Union from typing import Any, Union
class DataFrameFunction(ABC): class DataFrameFunction(ABC):
@@ -19,14 +18,11 @@ class MVStrategy(DataFrameFunction):
"""A way to handle missing values in a dataframe.""" """A way to handle missing values in a dataframe."""
@staticmethod @staticmethod
def list_available(df: DataFrame, label: str, series: Series) -> list['MVStrategy']: def list_available(df: DataFrame, series: Series) -> list['MVStrategy']:
"""Get all the strategies that can be used.""" """Get all the strategies that can be used."""
choices = [DropStrategy(), ModeStrategy()] choices = [DropStrategy(), ModeStrategy()]
if is_numeric_dtype(series): if is_numeric_dtype(series):
choices.extend((MeanStrategy(), MedianStrategy(), LinearRegressionStrategy())) choices.extend((MeanStrategy(), MedianStrategy(), LinearRegressionStrategy()))
other_columns = df.select_dtypes(include="number").drop(label, axis=1).columns.to_list()
if len(other_columns):
choices.append(KNNStrategy(other_columns))
return choices return choices
@@ -101,43 +97,6 @@ class LinearRegressionStrategy(MVStrategy):
return "Use linear regression" return "Use linear regression"
class KNNStrategy(MVStrategy):
def __init__(self, training_features: list[str]):
self.available_features = training_features
self.training_features = training_features
self.n_neighbors = 3
def apply(self, df: DataFrame, label: str, series: Series) -> DataFrame:
# Remove any training column that have any missing values
usable_data = df.dropna(subset=self.training_features)
# Select columns to impute from
train_data = usable_data.dropna(subset=label)
# Create train dataframe
x_train = train_data.drop(label, axis=1)
y_train = train_data[label]
reg = KNeighborsClassifier(self.n_neighbors).fit(x_train, y_train)
# Create test dataframe
test_data = usable_data[usable_data[label].isnull()]
if test_data.empty:
return df
x_test = test_data.drop(label, axis=1)
predicted = reg.predict(x_test)
# Fill with predicated values and patch the original data
usable_data[label].fillna(Series(predicted), inplace=True)
df.fillna(usable_data, inplace=True)
return df
def count_max(self, df: DataFrame, label: str) -> int:
usable_data = df.dropna(subset=self.training_features)
return usable_data[label].count()
def __str__(self) -> str:
return "kNN"
class KeepStrategy(ScalingStrategy): class KeepStrategy(ScalingStrategy):
#@typing.override #@typing.override
def apply(self, df: DataFrame, label: str, series: Series) -> DataFrame: def apply(self, df: DataFrame, label: str, series: Series) -> DataFrame:

View File

@@ -0,0 +1,44 @@
import streamlit as st
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
st.header("Clustering: kmeans")
if "data" in st.session_state:
data = st.session_state.data
with st.form("my_form"):
row1 = st.columns([1,1,1])
n_clusters = row1[0].selectbox("Number of clusters", range(1,data.shape[0]))
data_name = row1[1].multiselect("Data Name",data.select_dtypes(include="number").columns, max_selections=3)
n_init = row1[2].number_input("n_init",step=1,min_value=1)
row2 = st.columns([1,1])
max_iter = row1[0].number_input("max_iter",step=1,min_value=1)
st.form_submit_button("launch")
if len(data_name) >= 2 and len(data_name) <=3:
x = data[data_name].to_numpy()
kmeans = KMeans(n_clusters=n_clusters, init="random", n_init=n_init, max_iter=max_iter, random_state=111)
y_kmeans = kmeans.fit_predict(x)
fig = plt.figure()
if len(data_name) == 2:
ax = fig.add_subplot(projection='rectilinear')
plt.scatter(x[:, 0], x[:, 1], c=y_kmeans, s=50, cmap="viridis")
centers = kmeans.cluster_centers_
plt.scatter(centers[:, 0], centers[:, 1], c="black", s=200, marker="X")
else:
ax = fig.add_subplot(projection='3d')
ax.scatter(x[:, 0], x[:, 1],x[:, 2], c=y_kmeans, s=50, cmap="viridis")
centers = kmeans.cluster_centers_
ax.scatter(centers[:, 0], centers[:, 1],centers[:, 2], c="black", s=200, marker="X")
st.pyplot(fig)
else:
st.error("file not loaded")

View File

@@ -1,22 +1,35 @@
import streamlit as st import streamlit as st
import sys import matplotlib.pyplot as plt
import os from sklearn.cluster import DBSCAN
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../backend')))
from dbscan_strategy import perform_dbscan_clustering st.header("Clustering: dbscan")
st.header("Clustering: DBSCAN")
if "data" in st.session_state: if "data" in st.session_state:
data = st.session_state.data data = st.session_state.data
with st.form("dbscan_form"): with st.form("my_form"):
data_name = st.multiselect("Data Name", data.select_dtypes(include="number").columns, max_selections=3) data_name = st.multiselect("Data Name", data.select_dtypes(include="number").columns, max_selections=3)
eps = st.slider("eps", min_value=0.0, max_value=1.0, value=0.5, step=0.01) eps = st.slider("eps", min_value=0.0, max_value=1.0, value=0.5, step=0.01)
min_samples = st.number_input("min_samples", step=1, min_value=1, value=5) min_samples = st.number_input("min_samples", step=1, min_value=1, value=5)
submitted = st.form_submit_button("Launch") st.form_submit_button("launch")
if submitted and 2 <= len(data_name) <= 3: if len(data_name) >= 2 and len(data_name) <=3:
fig = perform_dbscan_clustering(data, data_name, eps, min_samples) x = data[data_name].to_numpy()
st.pyplot(fig)
dbscan = DBSCAN(eps=eps, min_samples=min_samples)
y_dbscan = dbscan.fit_predict(x)
fig = plt.figure()
if len(data_name) == 2:
ax = fig.add_subplot(projection='rectilinear')
plt.scatter(x[:, 0], x[:, 1], c=y_dbscan, s=50, cmap="viridis")
else: else:
st.error("File not loaded") ax = fig.add_subplot(projection='3d')
ax.scatter(x[:, 0], x[:, 1],x[:, 2], c=y_dbscan, s=50, cmap="viridis")
st.pyplot(fig)
else:
st.error("file not loaded")

View File

@@ -1,26 +0,0 @@
import streamlit as st
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../backend')))
from kmeans_strategy import perform_kmeans_clustering
st.header("Clustering: KMeans")
if "data" in st.session_state:
data = st.session_state.data
with st.form("kmeans_form"):
row1 = st.columns([1, 1, 1])
n_clusters = row1[0].selectbox("Number of clusters", range(1, data.shape[0]))
data_name = row1[1].multiselect("Data Name", data.select_dtypes(include="number").columns, max_selections=3)
n_init = row1[2].number_input("n_init", step=1, min_value=1)
row2 = st.columns([1, 1])
max_iter = row2[0].number_input("max_iter", step=1, min_value=1)
submitted = st.form_submit_button("Launch")
if submitted and 2 <= len(data_name) <= 3:
fig = perform_kmeans_clustering(data, data_name, n_clusters, n_init, max_iter)
st.pyplot(fig)
else:
st.error("File not loaded")

View File

@@ -1,8 +1,5 @@
import streamlit as st import streamlit as st
import sys from normstrategy import MVStrategy, ScalingStrategy
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../backend')))
from norm_strategy import MVStrategy, ScalingStrategy, KNNStrategy
if "data" in st.session_state: if "data" in st.session_state:
data = st.session_state.original_data data = st.session_state.original_data
@@ -11,16 +8,13 @@ if "data" in st.session_state:
for column, series in data.items(): for column, series in data.items():
col1, col2 = st.columns(2) col1, col2 = st.columns(2)
missing_count = series.isna().sum() missing_count = series.isna().sum()
choices = MVStrategy.list_available(data, column, series) choices = MVStrategy.list_available(data, series)
option = col1.selectbox( option = col1.selectbox(
f"Missing values of {column} ({missing_count})", f"Missing values of {column} ({missing_count})",
choices, choices,
index=1, index=1,
key=f"mv-{column}", key=f"mv-{column}",
) )
if isinstance(option, KNNStrategy):
option.training_features = st.multiselect("Training columns", option.training_features, default=option.available_features, key=f"cols-{column}")
option.n_neighbors = st.number_input("Number of neighbors", min_value=1, max_value=option.count_max(data, column), value=option.n_neighbors, key=f"neighbors-{column}")
# Always re-get the series to avoid reusing an invalidated series pointer # Always re-get the series to avoid reusing an invalidated series pointer
data = option.apply(data, column, data[column]) data = option.apply(data, column, data[column])

View File

@@ -1,48 +0,0 @@
import streamlit as st
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../backend')))
from classification_strategy import perform_classification, make_prediction
st.header("Prediction: Classification")
if "data" in st.session_state:
data = st.session_state.data
with st.form("classification_form"):
st.subheader("Classification Parameters")
data_name = st.multiselect("Features", data.columns, key="classification_features")
target_name = st.selectbox("Target", data.columns, key="classification_target")
test_size = st.slider("Test Size", min_value=0.1, max_value=0.5, value=0.2, step=0.1, key="classification_test_size")
submitted = st.form_submit_button('Train and Predict')
if submitted and data_name and target_name:
try:
model, label_encoders, accuracy = perform_classification(data, data_name, target_name, test_size)
st.session_state.classification_model = model
st.session_state.classification_label_encoders = label_encoders
st.session_state.classification_accuracy = accuracy
st.session_state.classification_features_selected = data_name
st.session_state.classification_target_selected = target_name
except ValueError as e:
st.error(e)
if "classification_model" in st.session_state:
st.subheader("Model Accuracy")
st.write(f"Accuracy on test data: {st.session_state.classification_accuracy:.2f}")
st.subheader("Enter values for prediction")
input_values = []
for feature in st.session_state.classification_features_selected:
if feature in st.session_state.classification_label_encoders:
values = list(st.session_state.classification_label_encoders[feature].classes_)
value = st.selectbox(f"Value for {feature}", values, key=f"classification_input_{feature}")
else:
value = st.number_input(f"Value for {feature}", value=0.0, key=f"classification_input_{feature}")
input_values.append(value)
prediction = make_prediction(st.session_state.classification_model, st.session_state.classification_label_encoders, st.session_state.classification_features_selected, st.session_state.classification_target_selected, input_values)
st.write("Prediction:", prediction)
else:
st.error("File not loaded")

View File

@@ -1,34 +0,0 @@
import streamlit as st
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../backend')))
from regression_strategy import perform_regression, make_prediction
st.header("Prediction: Regression")
if "data" in st.session_state:
data = st.session_state.data
with st.form("regression_form"):
st.subheader("Linear Regression Parameters")
data_name = st.multiselect("Features", data.select_dtypes(include="number").columns, key="regression_features")
target_name = st.selectbox("Target", data.select_dtypes(include="number").columns, key="regression_target")
submitted = st.form_submit_button('Train and Predict')
if submitted and data_name and target_name:
try:
model = perform_regression(data, data_name, target_name)
st.session_state.regression_model = model
st.session_state.regression_features_selected = data_name
st.session_state.regression_target_selected = target_name
except ValueError as e:
st.error(e)
if "regression_model" in st.session_state:
st.subheader("Enter values for prediction")
input_values = [st.number_input(f"Value for {feature}", value=0.0, key=f"regression_input_{feature}") for feature in st.session_state.regression_features_selected]
prediction = make_prediction(st.session_state.regression_model, st.session_state.regression_features_selected, input_values)
st.write("Prediction:", prediction)
else:
st.error("File not loaded")

View File

@@ -1,25 +1,30 @@
import streamlit as st import streamlit as st
import sys import matplotlib.pyplot as plt
import os import seaborn as sns
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../backend')))
from visualization_strategy import plot_histogram, plot_boxplot
st.header("Data Visualization") st.header("Data Visualization")
if "data" in st.session_state: if "data" in st.session_state:
data = st.session_state.data data = st.session_state.data
st.subheader("Histogram") st.subheader("Histogram")
column_to_plot = st.selectbox("Select Column for Histogram", data.columns) column_to_plot = st.selectbox("Select Column for Histogram", data.columns)
if column_to_plot: if column_to_plot:
fig = plot_histogram(data, column_to_plot) fig, ax = plt.subplots()
ax.hist(data[column_to_plot].dropna(), bins=20, edgecolor='k')
ax.set_title(f"Histogram of {column_to_plot}")
ax.set_xlabel(column_to_plot)
ax.set_ylabel("Frequency")
st.pyplot(fig) st.pyplot(fig)
st.subheader("Boxplot") st.subheader("Boxplot")
dataNumeric = data.select_dtypes(include="number") dataNumeric = data.select_dtypes(include="number")
column_to_plot = st.selectbox("Select Column for Boxplot", dataNumeric.columns) column_to_plot = st.selectbox("Select Column for Boxplot", dataNumeric.columns)
if column_to_plot: if column_to_plot:
fig = plot_boxplot(data, column_to_plot) fig, ax = plt.subplots()
sns.boxplot(data=data, x=column_to_plot, ax=ax)
ax.set_title(f"Boxplot of {column_to_plot}")
st.pyplot(fig) st.pyplot(fig)
else: else:
st.error("file not loaded") st.error("file not loaded")

View File

@@ -1,6 +0,0 @@
matplotlib>=3.5.0
pandas>=1.5.0
seaborn>=0.12.0
scikit-learn>=0.23.0
streamlit>=1.35.0
ruff>=0.4.8