Compare commits
14 Commits
knn
...
separation
Author | SHA1 | Date | |
---|---|---|---|
![]() |
cc17a9323d | ||
![]() |
7dafa78bc4 | ||
![]() |
15e1674cb2 | ||
4d82767c68 | |||
![]() |
9cb0d90eb1 | ||
![]() |
3eac3f6b8d | ||
c87308cc21 | |||
d4aeb87f75 | |||
![]() |
3c5f6849f8 | ||
![]() |
96d390c749 | ||
![]() |
089cc66042 | ||
![]() |
2d1c867bed | ||
![]() |
a914c3f8f9 | ||
![]() |
70641ebca4 |
44
.drone.yml
Normal file
44
.drone.yml
Normal file
@@ -0,0 +1,44 @@
|
||||
kind: pipeline
|
||||
name: default
|
||||
type: docker
|
||||
|
||||
trigger:
|
||||
event:
|
||||
- push
|
||||
|
||||
steps:
|
||||
- name: lint
|
||||
image: python:3.12
|
||||
commands:
|
||||
- pip install --root-user-action=ignore -r requirements.txt
|
||||
- ruff check .
|
||||
|
||||
- name: docker-image
|
||||
image: plugins/docker
|
||||
settings:
|
||||
dockerfile: Dockerfile
|
||||
registry: hub.codefirst.iut.uca.fr
|
||||
repo: hub.codefirst.iut.uca.fr/bastien.ollier/miner
|
||||
username:
|
||||
from_secret: REGISTRY_USER
|
||||
password:
|
||||
from_secret: REGISTRY_PASSWORD
|
||||
cache_from:
|
||||
- hub.codefirst.iut.uca.fr/bastien.ollier/miner:latest
|
||||
depends_on: [ lint ]
|
||||
|
||||
- name: deploy-miner
|
||||
image: hub.codefirst.iut.uca.fr/clement.freville2/codefirst-dockerproxy-clientdrone:latest
|
||||
settings:
|
||||
image: hub.codefirst.iut.uca.fr/bastien.ollier/miner:latest
|
||||
container: miner
|
||||
command: create
|
||||
overwrite: true
|
||||
admins: bastienollier,clementfreville2,hugopradier2
|
||||
environment:
|
||||
DRONE_REPO_OWNER: bastien.ollier
|
||||
depends_on: [ docker-image ]
|
||||
when:
|
||||
branch:
|
||||
- main
|
||||
- ci/*
|
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1 +1,2 @@
|
||||
__pycache__
|
||||
*/myenv
|
||||
|
9
Dockerfile
Normal file
9
Dockerfile
Normal file
@@ -0,0 +1,9 @@
|
||||
FROM python:3.12-slim
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY . .
|
||||
RUN pip3 install -r requirements.txt
|
||||
|
||||
EXPOSE 80
|
||||
ENTRYPOINT ["streamlit", "run", "frontend/exploration.py", "--server.port=80", "--server.address=0.0.0.0", "--server.baseUrlPath=/containers/bastienollier-miner"]
|
0
backend/__init__.py
Normal file
0
backend/__init__.py
Normal file
45
backend/classification_strategy.py
Normal file
45
backend/classification_strategy.py
Normal file
@@ -0,0 +1,45 @@
|
||||
from sklearn.linear_model import LogisticRegression
|
||||
from sklearn.model_selection import train_test_split
|
||||
from sklearn.metrics import accuracy_score
|
||||
from sklearn.preprocessing import LabelEncoder
|
||||
|
||||
def perform_classification(data, data_name, target_name, test_size):
|
||||
X = data[data_name]
|
||||
y = data[target_name]
|
||||
|
||||
label_encoders = {}
|
||||
for column in X.select_dtypes(include=['object']).columns:
|
||||
le = LabelEncoder()
|
||||
X[column] = le.fit_transform(X[column])
|
||||
label_encoders[column] = le
|
||||
|
||||
if y.dtype == 'object':
|
||||
le = LabelEncoder()
|
||||
y = le.fit_transform(y)
|
||||
label_encoders[target_name] = le
|
||||
else:
|
||||
if y.nunique() > 10:
|
||||
raise ValueError("The target variable seems to be continuous. Please select a categorical target for classification.")
|
||||
|
||||
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=42)
|
||||
|
||||
model = LogisticRegression()
|
||||
model.fit(X_train, y_train)
|
||||
y_pred = model.predict(X_test)
|
||||
accuracy = accuracy_score(y_test, y_pred)
|
||||
|
||||
return model, label_encoders, accuracy
|
||||
|
||||
def make_prediction(model, label_encoders, data_name, target_name, input_values):
|
||||
X_new = []
|
||||
for feature, value in zip(data_name, input_values):
|
||||
if feature in label_encoders:
|
||||
value = label_encoders[feature].transform([value])[0]
|
||||
X_new.append(value)
|
||||
|
||||
prediction = model.predict([X_new])
|
||||
|
||||
if target_name in label_encoders:
|
||||
prediction = label_encoders[target_name].inverse_transform(prediction)
|
||||
|
||||
return prediction[0]
|
16
backend/dbscan_strategy.py
Normal file
16
backend/dbscan_strategy.py
Normal file
@@ -0,0 +1,16 @@
|
||||
import matplotlib.pyplot as plt
|
||||
from sklearn.cluster import DBSCAN
|
||||
|
||||
def perform_dbscan_clustering(data, data_name, eps, min_samples):
|
||||
x = data[data_name].to_numpy()
|
||||
dbscan = DBSCAN(eps=eps, min_samples=min_samples)
|
||||
y_dbscan = dbscan.fit_predict(x)
|
||||
|
||||
fig = plt.figure()
|
||||
if len(data_name) == 2:
|
||||
ax = fig.add_subplot(projection='rectilinear')
|
||||
plt.scatter(x[:, 0], x[:, 1], c=y_dbscan, s=50, cmap="viridis")
|
||||
else:
|
||||
ax = fig.add_subplot(projection='3d')
|
||||
ax.scatter(x[:, 0], x[:, 1], x[:, 2], c=y_dbscan, s=50, cmap="viridis")
|
||||
return fig
|
20
backend/kmeans_strategy.py
Normal file
20
backend/kmeans_strategy.py
Normal file
@@ -0,0 +1,20 @@
|
||||
import matplotlib.pyplot as plt
|
||||
from sklearn.cluster import KMeans
|
||||
|
||||
def perform_kmeans_clustering(data, data_name, n_clusters, n_init, max_iter):
|
||||
x = data[data_name].to_numpy()
|
||||
kmeans = KMeans(n_clusters=n_clusters, init="random", n_init=n_init, max_iter=max_iter, random_state=111)
|
||||
y_kmeans = kmeans.fit_predict(x)
|
||||
|
||||
fig = plt.figure()
|
||||
if len(data_name) == 2:
|
||||
ax = fig.add_subplot(projection='rectilinear')
|
||||
plt.scatter(x[:, 0], x[:, 1], c=y_kmeans, s=50, cmap="viridis")
|
||||
centers = kmeans.cluster_centers_
|
||||
plt.scatter(centers[:, 0], centers[:, 1], c="black", s=200, marker="X")
|
||||
else:
|
||||
ax = fig.add_subplot(projection='3d')
|
||||
ax.scatter(x[:, 0], x[:, 1], x[:, 2], c=y_kmeans, s=50, cmap="viridis")
|
||||
centers = kmeans.cluster_centers_
|
||||
ax.scatter(centers[:, 0], centers[:, 1], centers[:, 2], c="black", s=200, marker="X")
|
||||
return fig
|
@@ -130,6 +130,10 @@ class KNNStrategy(MVStrategy):
|
||||
df.fillna(usable_data, inplace=True)
|
||||
return df
|
||||
|
||||
def count_max(self, df: DataFrame, label: str) -> int:
|
||||
usable_data = df.dropna(subset=self.training_features)
|
||||
return usable_data[label].count()
|
||||
|
||||
def __str__(self) -> str:
|
||||
return "kNN"
|
||||
|
18
backend/regression_strategy.py
Normal file
18
backend/regression_strategy.py
Normal file
@@ -0,0 +1,18 @@
|
||||
from sklearn.linear_model import LinearRegression
|
||||
|
||||
def perform_regression(data, data_name, target_name):
|
||||
X = data[data_name]
|
||||
y = data[target_name]
|
||||
|
||||
if not isinstance(y.iloc[0], (int, float)):
|
||||
raise ValueError("The target variable should be numeric (continuous) for regression.")
|
||||
|
||||
model = LinearRegression()
|
||||
model.fit(X, y)
|
||||
|
||||
return model
|
||||
|
||||
def make_prediction(model, feature_names, input_values):
|
||||
prediction = model.predict([input_values])
|
||||
|
||||
return prediction[0]
|
16
backend/visualization_strategy.py
Normal file
16
backend/visualization_strategy.py
Normal file
@@ -0,0 +1,16 @@
|
||||
import matplotlib.pyplot as plt
|
||||
import seaborn as sns
|
||||
|
||||
def plot_histogram(data, column):
|
||||
fig, ax = plt.subplots()
|
||||
ax.hist(data[column].dropna(), bins=20, edgecolor='k')
|
||||
ax.set_title(f"Histogram of {column}")
|
||||
ax.set_xlabel(column)
|
||||
ax.set_ylabel("Frequency")
|
||||
return fig
|
||||
|
||||
def plot_boxplot(data, column):
|
||||
fig, ax = plt.subplots()
|
||||
sns.boxplot(data=data, x=column, ax=ax)
|
||||
ax.set_title(f"Boxplot of {column}")
|
||||
return fig
|
0
frontend/__init__.py
Normal file
0
frontend/__init__.py
Normal file
@@ -1,5 +1,6 @@
|
||||
import pandas as pd
|
||||
import streamlit as st
|
||||
import codecs
|
||||
|
||||
st.set_page_config(
|
||||
page_title="Project Miner",
|
||||
@@ -9,10 +10,13 @@ st.set_page_config(
|
||||
st.title("Home")
|
||||
|
||||
### Exploration
|
||||
uploaded_file = st.file_uploader("Upload your CSV file", type=["csv"])
|
||||
uploaded_file = st.file_uploader("Upload your CSV file", type=["csv", "tsv"])
|
||||
separator = st.selectbox("Separator", [",", ";", "\\t"])
|
||||
separator = codecs.getdecoder("unicode_escape")(separator)[0]
|
||||
has_header = st.checkbox("Has header", value=True)
|
||||
|
||||
if uploaded_file is not None:
|
||||
st.session_state.data = pd.read_csv(uploaded_file)
|
||||
st.session_state.data = pd.read_csv(uploaded_file, sep=separator, header=0 if has_header else 1)
|
||||
st.session_state.original_data = st.session_state.data
|
||||
st.success("File loaded successfully!")
|
||||
|
||||
|
@@ -1,35 +1,22 @@
|
||||
import streamlit as st
|
||||
import matplotlib.pyplot as plt
|
||||
from sklearn.cluster import DBSCAN
|
||||
|
||||
st.header("Clustering: dbscan")
|
||||
import sys
|
||||
import os
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../backend')))
|
||||
from dbscan_strategy import perform_dbscan_clustering
|
||||
|
||||
st.header("Clustering: DBSCAN")
|
||||
|
||||
if "data" in st.session_state:
|
||||
data = st.session_state.data
|
||||
|
||||
with st.form("my_form"):
|
||||
with st.form("dbscan_form"):
|
||||
data_name = st.multiselect("Data Name", data.select_dtypes(include="number").columns, max_selections=3)
|
||||
eps = st.slider("eps", min_value=0.0, max_value=1.0, value=0.5, step=0.01)
|
||||
min_samples = st.number_input("min_samples", step=1, min_value=1, value=5)
|
||||
st.form_submit_button("launch")
|
||||
submitted = st.form_submit_button("Launch")
|
||||
|
||||
if len(data_name) >= 2 and len(data_name) <=3:
|
||||
x = data[data_name].to_numpy()
|
||||
|
||||
dbscan = DBSCAN(eps=eps, min_samples=min_samples)
|
||||
y_dbscan = dbscan.fit_predict(x)
|
||||
|
||||
fig = plt.figure()
|
||||
if len(data_name) == 2:
|
||||
ax = fig.add_subplot(projection='rectilinear')
|
||||
plt.scatter(x[:, 0], x[:, 1], c=y_dbscan, s=50, cmap="viridis")
|
||||
else:
|
||||
ax = fig.add_subplot(projection='3d')
|
||||
ax.scatter(x[:, 0], x[:, 1],x[:, 2], c=y_dbscan, s=50, cmap="viridis")
|
||||
if submitted and 2 <= len(data_name) <= 3:
|
||||
fig = perform_dbscan_clustering(data, data_name, eps, min_samples)
|
||||
st.pyplot(fig)
|
||||
|
||||
|
||||
|
||||
else:
|
||||
st.error("file not loaded")
|
||||
st.error("File not loaded")
|
||||
|
@@ -1,44 +1,26 @@
|
||||
import streamlit as st
|
||||
from sklearn.cluster import KMeans
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
st.header("Clustering: kmeans")
|
||||
import sys
|
||||
import os
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../backend')))
|
||||
from kmeans_strategy import perform_kmeans_clustering
|
||||
|
||||
st.header("Clustering: KMeans")
|
||||
|
||||
if "data" in st.session_state:
|
||||
data = st.session_state.data
|
||||
|
||||
with st.form("my_form"):
|
||||
row1 = st.columns([1,1,1])
|
||||
n_clusters = row1[0].selectbox("Number of clusters", range(1,data.shape[0]))
|
||||
data_name = row1[1].multiselect("Data Name",data.select_dtypes(include="number").columns, max_selections=3)
|
||||
n_init = row1[2].number_input("n_init",step=1,min_value=1)
|
||||
with st.form("kmeans_form"):
|
||||
row1 = st.columns([1, 1, 1])
|
||||
n_clusters = row1[0].selectbox("Number of clusters", range(1, data.shape[0]))
|
||||
data_name = row1[1].multiselect("Data Name", data.select_dtypes(include="number").columns, max_selections=3)
|
||||
n_init = row1[2].number_input("n_init", step=1, min_value=1)
|
||||
|
||||
row2 = st.columns([1,1])
|
||||
max_iter = row1[0].number_input("max_iter",step=1,min_value=1)
|
||||
row2 = st.columns([1, 1])
|
||||
max_iter = row2[0].number_input("max_iter", step=1, min_value=1)
|
||||
submitted = st.form_submit_button("Launch")
|
||||
|
||||
|
||||
st.form_submit_button("launch")
|
||||
|
||||
if len(data_name) >= 2 and len(data_name) <=3:
|
||||
x = data[data_name].to_numpy()
|
||||
|
||||
kmeans = KMeans(n_clusters=n_clusters, init="random", n_init=n_init, max_iter=max_iter, random_state=111)
|
||||
y_kmeans = kmeans.fit_predict(x)
|
||||
|
||||
fig = plt.figure()
|
||||
if len(data_name) == 2:
|
||||
ax = fig.add_subplot(projection='rectilinear')
|
||||
plt.scatter(x[:, 0], x[:, 1], c=y_kmeans, s=50, cmap="viridis")
|
||||
centers = kmeans.cluster_centers_
|
||||
plt.scatter(centers[:, 0], centers[:, 1], c="black", s=200, marker="X")
|
||||
else:
|
||||
ax = fig.add_subplot(projection='3d')
|
||||
|
||||
ax.scatter(x[:, 0], x[:, 1],x[:, 2], c=y_kmeans, s=50, cmap="viridis")
|
||||
centers = kmeans.cluster_centers_
|
||||
ax.scatter(centers[:, 0], centers[:, 1],centers[:, 2], c="black", s=200, marker="X")
|
||||
if submitted and 2 <= len(data_name) <= 3:
|
||||
fig = perform_kmeans_clustering(data, data_name, n_clusters, n_init, max_iter)
|
||||
st.pyplot(fig)
|
||||
|
||||
else:
|
||||
st.error("file not loaded")
|
||||
st.error("File not loaded")
|
||||
|
@@ -1,5 +1,8 @@
|
||||
import streamlit as st
|
||||
from normstrategy import MVStrategy, ScalingStrategy, KNNStrategy
|
||||
import sys
|
||||
import os
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../backend')))
|
||||
from norm_strategy import MVStrategy, ScalingStrategy, KNNStrategy
|
||||
|
||||
if "data" in st.session_state:
|
||||
data = st.session_state.original_data
|
||||
@@ -16,9 +19,8 @@ if "data" in st.session_state:
|
||||
key=f"mv-{column}",
|
||||
)
|
||||
if isinstance(option, KNNStrategy):
|
||||
print(option.available_features)
|
||||
option.training_features = st.multiselect("Training columns", option.training_features, default=option.available_features, key=f"cols-{column}")
|
||||
option.n_neighbors = st.number_input("Number of neighbors", min_value=1, value=option.n_neighbors, key=f"neighbors-{column}")
|
||||
option.n_neighbors = st.number_input("Number of neighbors", min_value=1, max_value=option.count_max(data, column), value=option.n_neighbors, key=f"neighbors-{column}")
|
||||
# Always re-get the series to avoid reusing an invalidated series pointer
|
||||
data = option.apply(data, column, data[column])
|
||||
|
||||
|
48
frontend/pages/prediction_classification.py
Normal file
48
frontend/pages/prediction_classification.py
Normal file
@@ -0,0 +1,48 @@
|
||||
import streamlit as st
|
||||
import sys
|
||||
import os
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../backend')))
|
||||
from classification_strategy import perform_classification, make_prediction
|
||||
|
||||
st.header("Prediction: Classification")
|
||||
|
||||
if "data" in st.session_state:
|
||||
data = st.session_state.data
|
||||
|
||||
with st.form("classification_form"):
|
||||
st.subheader("Classification Parameters")
|
||||
data_name = st.multiselect("Features", data.columns, key="classification_features")
|
||||
target_name = st.selectbox("Target", data.columns, key="classification_target")
|
||||
test_size = st.slider("Test Size", min_value=0.1, max_value=0.5, value=0.2, step=0.1, key="classification_test_size")
|
||||
submitted = st.form_submit_button('Train and Predict')
|
||||
|
||||
if submitted and data_name and target_name:
|
||||
try:
|
||||
model, label_encoders, accuracy = perform_classification(data, data_name, target_name, test_size)
|
||||
st.session_state.classification_model = model
|
||||
st.session_state.classification_label_encoders = label_encoders
|
||||
st.session_state.classification_accuracy = accuracy
|
||||
st.session_state.classification_features_selected = data_name
|
||||
st.session_state.classification_target_selected = target_name
|
||||
except ValueError as e:
|
||||
st.error(e)
|
||||
|
||||
if "classification_model" in st.session_state:
|
||||
st.subheader("Model Accuracy")
|
||||
st.write(f"Accuracy on test data: {st.session_state.classification_accuracy:.2f}")
|
||||
|
||||
st.subheader("Enter values for prediction")
|
||||
input_values = []
|
||||
for feature in st.session_state.classification_features_selected:
|
||||
if feature in st.session_state.classification_label_encoders:
|
||||
values = list(st.session_state.classification_label_encoders[feature].classes_)
|
||||
value = st.selectbox(f"Value for {feature}", values, key=f"classification_input_{feature}")
|
||||
else:
|
||||
value = st.number_input(f"Value for {feature}", value=0.0, key=f"classification_input_{feature}")
|
||||
input_values.append(value)
|
||||
|
||||
prediction = make_prediction(st.session_state.classification_model, st.session_state.classification_label_encoders, st.session_state.classification_features_selected, st.session_state.classification_target_selected, input_values)
|
||||
|
||||
st.write("Prediction:", prediction)
|
||||
else:
|
||||
st.error("File not loaded")
|
34
frontend/pages/prediction_regression.py
Normal file
34
frontend/pages/prediction_regression.py
Normal file
@@ -0,0 +1,34 @@
|
||||
import streamlit as st
|
||||
import sys
|
||||
import os
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../backend')))
|
||||
from regression_strategy import perform_regression, make_prediction
|
||||
|
||||
st.header("Prediction: Regression")
|
||||
|
||||
if "data" in st.session_state:
|
||||
data = st.session_state.data
|
||||
|
||||
with st.form("regression_form"):
|
||||
st.subheader("Linear Regression Parameters")
|
||||
data_name = st.multiselect("Features", data.select_dtypes(include="number").columns, key="regression_features")
|
||||
target_name = st.selectbox("Target", data.select_dtypes(include="number").columns, key="regression_target")
|
||||
submitted = st.form_submit_button('Train and Predict')
|
||||
|
||||
if submitted and data_name and target_name:
|
||||
try:
|
||||
model = perform_regression(data, data_name, target_name)
|
||||
st.session_state.regression_model = model
|
||||
st.session_state.regression_features_selected = data_name
|
||||
st.session_state.regression_target_selected = target_name
|
||||
except ValueError as e:
|
||||
st.error(e)
|
||||
|
||||
if "regression_model" in st.session_state:
|
||||
st.subheader("Enter values for prediction")
|
||||
input_values = [st.number_input(f"Value for {feature}", value=0.0, key=f"regression_input_{feature}") for feature in st.session_state.regression_features_selected]
|
||||
prediction = make_prediction(st.session_state.regression_model, st.session_state.regression_features_selected, input_values)
|
||||
|
||||
st.write("Prediction:", prediction)
|
||||
else:
|
||||
st.error("File not loaded")
|
@@ -1,30 +1,25 @@
|
||||
import streamlit as st
|
||||
import matplotlib.pyplot as plt
|
||||
import seaborn as sns
|
||||
import sys
|
||||
import os
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../backend')))
|
||||
from visualization_strategy import plot_histogram, plot_boxplot
|
||||
|
||||
st.header("Data Visualization")
|
||||
|
||||
|
||||
if "data" in st.session_state:
|
||||
data = st.session_state.data
|
||||
|
||||
st.subheader("Histogram")
|
||||
column_to_plot = st.selectbox("Select Column for Histogram", data.columns)
|
||||
if column_to_plot:
|
||||
fig, ax = plt.subplots()
|
||||
ax.hist(data[column_to_plot].dropna(), bins=20, edgecolor='k')
|
||||
ax.set_title(f"Histogram of {column_to_plot}")
|
||||
ax.set_xlabel(column_to_plot)
|
||||
ax.set_ylabel("Frequency")
|
||||
fig = plot_histogram(data, column_to_plot)
|
||||
st.pyplot(fig)
|
||||
|
||||
st.subheader("Boxplot")
|
||||
dataNumeric = data.select_dtypes(include="number")
|
||||
column_to_plot = st.selectbox("Select Column for Boxplot", dataNumeric.columns)
|
||||
if column_to_plot:
|
||||
fig, ax = plt.subplots()
|
||||
sns.boxplot(data=data, x=column_to_plot, ax=ax)
|
||||
ax.set_title(f"Boxplot of {column_to_plot}")
|
||||
fig = plot_boxplot(data, column_to_plot)
|
||||
st.pyplot(fig)
|
||||
else:
|
||||
st.error("file not loaded")
|
6
requirements.txt
Normal file
6
requirements.txt
Normal file
@@ -0,0 +1,6 @@
|
||||
matplotlib>=3.5.0
|
||||
pandas>=1.5.0
|
||||
seaborn>=0.12.0
|
||||
scikit-learn>=0.23.0
|
||||
streamlit>=1.35.0
|
||||
ruff>=0.4.8
|
Reference in New Issue
Block a user