11 Commits

Author SHA1 Message Date
hugo.pradier2
cc17a9323d correctifs pour lint 2024-06-23 17:47:34 +02:00
hugo.pradier2
7dafa78bc4 fin separation front/back 2024-06-23 17:44:26 +02:00
hugo.pradier2
15e1674cb2 debut separation front/back 2024-06-21 17:22:20 +02:00
4d82767c68 Add SkLearn to requirements.txt 2024-06-21 16:59:51 +02:00
Bastien OLLIER
9cb0d90eb1 Add CI/CD (#9)
Co-authored-by: clfreville2 <clement.freville2@etu.uca.fr>
Co-authored-by: bastien ollier <bastien.ollier@etu.uca.fr>
Reviewed-on: https://codefirst.iut.uca.fr/git/clement.freville2/miner/pulls/9
Reviewed-by: Clément FRÉVILLE <clement.freville2@etu.uca.fr>
Co-authored-by: Bastien OLLIER <bastien.ollier@noreply.codefirst.iut.uca.fr>
Co-committed-by: Bastien OLLIER <bastien.ollier@noreply.codefirst.iut.uca.fr>
2024-06-21 16:53:00 +02:00
Bastien OLLIER
3eac3f6b8d Merge pull request 'Support multiple column delimiters' (#10) from csv-delimiters into main
Reviewed-on: https://codefirst.iut.uca.fr/git/clement.freville2/miner/pulls/10
2024-06-21 16:49:01 +02:00
c87308cc21 Support multiple column delimiters 2024-06-21 16:46:35 +02:00
d4aeb87f75 Limit the number of neighbors based on the dataframe 2024-06-21 16:09:30 +02:00
Hugo PRADIER
3c5f6849f8 Merge pull request 'Support kNN as an imputation method' (#8) from knn into main
Reviewed-on: https://codefirst.iut.uca.fr/git/clement.freville2/miner/pulls/8
2024-06-21 15:51:46 +02:00
cd0c85ea44 Support kNN as an imputation method 2024-06-21 15:45:33 +02:00
Hugo PRADIER
96d390c749 Merge pull request 'Ajout de la prédiction avec deux algos (un de prédiction et un de classification)' (#7) from prediction into main
Reviewed-on: https://codefirst.iut.uca.fr/git/clement.freville2/miner/pulls/7
Reviewed-by: Clément FRÉVILLE <clement.freville2@etu.uca.fr>
2024-06-21 14:56:28 +02:00
19 changed files with 315 additions and 137 deletions

44
.drone.yml Normal file
View File

@@ -0,0 +1,44 @@
kind: pipeline
name: default
type: docker
trigger:
event:
- push
steps:
- name: lint
image: python:3.12
commands:
- pip install --root-user-action=ignore -r requirements.txt
- ruff check .
- name: docker-image
image: plugins/docker
settings:
dockerfile: Dockerfile
registry: hub.codefirst.iut.uca.fr
repo: hub.codefirst.iut.uca.fr/bastien.ollier/miner
username:
from_secret: REGISTRY_USER
password:
from_secret: REGISTRY_PASSWORD
cache_from:
- hub.codefirst.iut.uca.fr/bastien.ollier/miner:latest
depends_on: [ lint ]
- name: deploy-miner
image: hub.codefirst.iut.uca.fr/clement.freville2/codefirst-dockerproxy-clientdrone:latest
settings:
image: hub.codefirst.iut.uca.fr/bastien.ollier/miner:latest
container: miner
command: create
overwrite: true
admins: bastienollier,clementfreville2,hugopradier2
environment:
DRONE_REPO_OWNER: bastien.ollier
depends_on: [ docker-image ]
when:
branch:
- main
- ci/*

2
.gitignore vendored
View File

@@ -1,2 +1,2 @@
__pycache__ __pycache__
.venv */myenv

9
Dockerfile Normal file
View File

@@ -0,0 +1,9 @@
FROM python:3.12-slim
WORKDIR /app
COPY . .
RUN pip3 install -r requirements.txt
EXPOSE 80
ENTRYPOINT ["streamlit", "run", "frontend/exploration.py", "--server.port=80", "--server.address=0.0.0.0", "--server.baseUrlPath=/containers/bastienollier-miner"]

0
backend/__init__.py Normal file
View File

View File

@@ -0,0 +1,45 @@
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder
def perform_classification(data, data_name, target_name, test_size):
X = data[data_name]
y = data[target_name]
label_encoders = {}
for column in X.select_dtypes(include=['object']).columns:
le = LabelEncoder()
X[column] = le.fit_transform(X[column])
label_encoders[column] = le
if y.dtype == 'object':
le = LabelEncoder()
y = le.fit_transform(y)
label_encoders[target_name] = le
else:
if y.nunique() > 10:
raise ValueError("The target variable seems to be continuous. Please select a categorical target for classification.")
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=42)
model = LogisticRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
return model, label_encoders, accuracy
def make_prediction(model, label_encoders, data_name, target_name, input_values):
X_new = []
for feature, value in zip(data_name, input_values):
if feature in label_encoders:
value = label_encoders[feature].transform([value])[0]
X_new.append(value)
prediction = model.predict([X_new])
if target_name in label_encoders:
prediction = label_encoders[target_name].inverse_transform(prediction)
return prediction[0]

View File

@@ -0,0 +1,16 @@
import matplotlib.pyplot as plt
from sklearn.cluster import DBSCAN
def perform_dbscan_clustering(data, data_name, eps, min_samples):
x = data[data_name].to_numpy()
dbscan = DBSCAN(eps=eps, min_samples=min_samples)
y_dbscan = dbscan.fit_predict(x)
fig = plt.figure()
if len(data_name) == 2:
ax = fig.add_subplot(projection='rectilinear')
plt.scatter(x[:, 0], x[:, 1], c=y_dbscan, s=50, cmap="viridis")
else:
ax = fig.add_subplot(projection='3d')
ax.scatter(x[:, 0], x[:, 1], x[:, 2], c=y_dbscan, s=50, cmap="viridis")
return fig

View File

@@ -0,0 +1,20 @@
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
def perform_kmeans_clustering(data, data_name, n_clusters, n_init, max_iter):
x = data[data_name].to_numpy()
kmeans = KMeans(n_clusters=n_clusters, init="random", n_init=n_init, max_iter=max_iter, random_state=111)
y_kmeans = kmeans.fit_predict(x)
fig = plt.figure()
if len(data_name) == 2:
ax = fig.add_subplot(projection='rectilinear')
plt.scatter(x[:, 0], x[:, 1], c=y_kmeans, s=50, cmap="viridis")
centers = kmeans.cluster_centers_
plt.scatter(centers[:, 0], centers[:, 1], c="black", s=200, marker="X")
else:
ax = fig.add_subplot(projection='3d')
ax.scatter(x[:, 0], x[:, 1], x[:, 2], c=y_kmeans, s=50, cmap="viridis")
centers = kmeans.cluster_centers_
ax.scatter(centers[:, 0], centers[:, 1], centers[:, 2], c="black", s=200, marker="X")
return fig

View File

@@ -1,6 +1,7 @@
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from pandas import DataFrame, Series from pandas import DataFrame, Series
from pandas.api.types import is_numeric_dtype from pandas.api.types import is_numeric_dtype
from sklearn.neighbors import KNeighborsClassifier
from typing import Any, Union from typing import Any, Union
class DataFrameFunction(ABC): class DataFrameFunction(ABC):
@@ -18,11 +19,14 @@ class MVStrategy(DataFrameFunction):
"""A way to handle missing values in a dataframe.""" """A way to handle missing values in a dataframe."""
@staticmethod @staticmethod
def list_available(df: DataFrame, series: Series) -> list['MVStrategy']: def list_available(df: DataFrame, label: str, series: Series) -> list['MVStrategy']:
"""Get all the strategies that can be used.""" """Get all the strategies that can be used."""
choices = [DropStrategy(), ModeStrategy()] choices = [DropStrategy(), ModeStrategy()]
if is_numeric_dtype(series): if is_numeric_dtype(series):
choices.extend((MeanStrategy(), MedianStrategy(), LinearRegressionStrategy())) choices.extend((MeanStrategy(), MedianStrategy(), LinearRegressionStrategy()))
other_columns = df.select_dtypes(include="number").drop(label, axis=1).columns.to_list()
if len(other_columns):
choices.append(KNNStrategy(other_columns))
return choices return choices
@@ -97,6 +101,43 @@ class LinearRegressionStrategy(MVStrategy):
return "Use linear regression" return "Use linear regression"
class KNNStrategy(MVStrategy):
def __init__(self, training_features: list[str]):
self.available_features = training_features
self.training_features = training_features
self.n_neighbors = 3
def apply(self, df: DataFrame, label: str, series: Series) -> DataFrame:
# Remove any training column that have any missing values
usable_data = df.dropna(subset=self.training_features)
# Select columns to impute from
train_data = usable_data.dropna(subset=label)
# Create train dataframe
x_train = train_data.drop(label, axis=1)
y_train = train_data[label]
reg = KNeighborsClassifier(self.n_neighbors).fit(x_train, y_train)
# Create test dataframe
test_data = usable_data[usable_data[label].isnull()]
if test_data.empty:
return df
x_test = test_data.drop(label, axis=1)
predicted = reg.predict(x_test)
# Fill with predicated values and patch the original data
usable_data[label].fillna(Series(predicted), inplace=True)
df.fillna(usable_data, inplace=True)
return df
def count_max(self, df: DataFrame, label: str) -> int:
usable_data = df.dropna(subset=self.training_features)
return usable_data[label].count()
def __str__(self) -> str:
return "kNN"
class KeepStrategy(ScalingStrategy): class KeepStrategy(ScalingStrategy):
#@typing.override #@typing.override
def apply(self, df: DataFrame, label: str, series: Series) -> DataFrame: def apply(self, df: DataFrame, label: str, series: Series) -> DataFrame:

View File

@@ -0,0 +1,18 @@
from sklearn.linear_model import LinearRegression
def perform_regression(data, data_name, target_name):
X = data[data_name]
y = data[target_name]
if not isinstance(y.iloc[0], (int, float)):
raise ValueError("The target variable should be numeric (continuous) for regression.")
model = LinearRegression()
model.fit(X, y)
return model
def make_prediction(model, feature_names, input_values):
prediction = model.predict([input_values])
return prediction[0]

View File

@@ -0,0 +1,16 @@
import matplotlib.pyplot as plt
import seaborn as sns
def plot_histogram(data, column):
fig, ax = plt.subplots()
ax.hist(data[column].dropna(), bins=20, edgecolor='k')
ax.set_title(f"Histogram of {column}")
ax.set_xlabel(column)
ax.set_ylabel("Frequency")
return fig
def plot_boxplot(data, column):
fig, ax = plt.subplots()
sns.boxplot(data=data, x=column, ax=ax)
ax.set_title(f"Boxplot of {column}")
return fig

0
frontend/__init__.py Normal file
View File

View File

@@ -1,5 +1,6 @@
import pandas as pd import pandas as pd
import streamlit as st import streamlit as st
import codecs
st.set_page_config( st.set_page_config(
page_title="Project Miner", page_title="Project Miner",
@@ -9,10 +10,13 @@ st.set_page_config(
st.title("Home") st.title("Home")
### Exploration ### Exploration
uploaded_file = st.file_uploader("Upload your CSV file", type=["csv"]) uploaded_file = st.file_uploader("Upload your CSV file", type=["csv", "tsv"])
separator = st.selectbox("Separator", [",", ";", "\\t"])
separator = codecs.getdecoder("unicode_escape")(separator)[0]
has_header = st.checkbox("Has header", value=True)
if uploaded_file is not None: if uploaded_file is not None:
st.session_state.data = pd.read_csv(uploaded_file) st.session_state.data = pd.read_csv(uploaded_file, sep=separator, header=0 if has_header else 1)
st.session_state.original_data = st.session_state.data st.session_state.original_data = st.session_state.data
st.success("File loaded successfully!") st.success("File loaded successfully!")

View File

@@ -1,35 +1,22 @@
import streamlit as st import streamlit as st
import matplotlib.pyplot as plt import sys
from sklearn.cluster import DBSCAN import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../backend')))
st.header("Clustering: dbscan") from dbscan_strategy import perform_dbscan_clustering
st.header("Clustering: DBSCAN")
if "data" in st.session_state: if "data" in st.session_state:
data = st.session_state.data data = st.session_state.data
with st.form("my_form"): with st.form("dbscan_form"):
data_name = st.multiselect("Data Name", data.select_dtypes(include="number").columns, max_selections=3) data_name = st.multiselect("Data Name", data.select_dtypes(include="number").columns, max_selections=3)
eps = st.slider("eps", min_value=0.0, max_value=1.0, value=0.5, step=0.01) eps = st.slider("eps", min_value=0.0, max_value=1.0, value=0.5, step=0.01)
min_samples = st.number_input("min_samples", step=1, min_value=1, value=5) min_samples = st.number_input("min_samples", step=1, min_value=1, value=5)
st.form_submit_button("launch") submitted = st.form_submit_button("Launch")
if len(data_name) >= 2 and len(data_name) <=3: if submitted and 2 <= len(data_name) <= 3:
x = data[data_name].to_numpy() fig = perform_dbscan_clustering(data, data_name, eps, min_samples)
dbscan = DBSCAN(eps=eps, min_samples=min_samples)
y_dbscan = dbscan.fit_predict(x)
fig = plt.figure()
if len(data_name) == 2:
ax = fig.add_subplot(projection='rectilinear')
plt.scatter(x[:, 0], x[:, 1], c=y_dbscan, s=50, cmap="viridis")
else:
ax = fig.add_subplot(projection='3d')
ax.scatter(x[:, 0], x[:, 1],x[:, 2], c=y_dbscan, s=50, cmap="viridis")
st.pyplot(fig) st.pyplot(fig)
else: else:
st.error("file not loaded") st.error("File not loaded")

View File

@@ -1,44 +1,26 @@
import streamlit as st import streamlit as st
from sklearn.cluster import KMeans import sys
import matplotlib.pyplot as plt import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../backend')))
st.header("Clustering: kmeans") from kmeans_strategy import perform_kmeans_clustering
st.header("Clustering: KMeans")
if "data" in st.session_state: if "data" in st.session_state:
data = st.session_state.data data = st.session_state.data
with st.form("my_form"): with st.form("kmeans_form"):
row1 = st.columns([1,1,1]) row1 = st.columns([1, 1, 1])
n_clusters = row1[0].selectbox("Number of clusters", range(1,data.shape[0])) n_clusters = row1[0].selectbox("Number of clusters", range(1, data.shape[0]))
data_name = row1[1].multiselect("Data Name",data.select_dtypes(include="number").columns, max_selections=3) data_name = row1[1].multiselect("Data Name", data.select_dtypes(include="number").columns, max_selections=3)
n_init = row1[2].number_input("n_init",step=1,min_value=1) n_init = row1[2].number_input("n_init", step=1, min_value=1)
row2 = st.columns([1,1]) row2 = st.columns([1, 1])
max_iter = row1[0].number_input("max_iter",step=1,min_value=1) max_iter = row2[0].number_input("max_iter", step=1, min_value=1)
submitted = st.form_submit_button("Launch")
if submitted and 2 <= len(data_name) <= 3:
st.form_submit_button("launch") fig = perform_kmeans_clustering(data, data_name, n_clusters, n_init, max_iter)
if len(data_name) >= 2 and len(data_name) <=3:
x = data[data_name].to_numpy()
kmeans = KMeans(n_clusters=n_clusters, init="random", n_init=n_init, max_iter=max_iter, random_state=111)
y_kmeans = kmeans.fit_predict(x)
fig = plt.figure()
if len(data_name) == 2:
ax = fig.add_subplot(projection='rectilinear')
plt.scatter(x[:, 0], x[:, 1], c=y_kmeans, s=50, cmap="viridis")
centers = kmeans.cluster_centers_
plt.scatter(centers[:, 0], centers[:, 1], c="black", s=200, marker="X")
else:
ax = fig.add_subplot(projection='3d')
ax.scatter(x[:, 0], x[:, 1],x[:, 2], c=y_kmeans, s=50, cmap="viridis")
centers = kmeans.cluster_centers_
ax.scatter(centers[:, 0], centers[:, 1],centers[:, 2], c="black", s=200, marker="X")
st.pyplot(fig) st.pyplot(fig)
else: else:
st.error("file not loaded") st.error("File not loaded")

View File

@@ -1,5 +1,8 @@
import streamlit as st import streamlit as st
from normstrategy import MVStrategy, ScalingStrategy import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../backend')))
from norm_strategy import MVStrategy, ScalingStrategy, KNNStrategy
if "data" in st.session_state: if "data" in st.session_state:
data = st.session_state.original_data data = st.session_state.original_data
@@ -8,13 +11,16 @@ if "data" in st.session_state:
for column, series in data.items(): for column, series in data.items():
col1, col2 = st.columns(2) col1, col2 = st.columns(2)
missing_count = series.isna().sum() missing_count = series.isna().sum()
choices = MVStrategy.list_available(data, series) choices = MVStrategy.list_available(data, column, series)
option = col1.selectbox( option = col1.selectbox(
f"Missing values of {column} ({missing_count})", f"Missing values of {column} ({missing_count})",
choices, choices,
index=1, index=1,
key=f"mv-{column}", key=f"mv-{column}",
) )
if isinstance(option, KNNStrategy):
option.training_features = st.multiselect("Training columns", option.training_features, default=option.available_features, key=f"cols-{column}")
option.n_neighbors = st.number_input("Number of neighbors", min_value=1, max_value=option.count_max(data, column), value=option.n_neighbors, key=f"neighbors-{column}")
# Always re-get the series to avoid reusing an invalidated series pointer # Always re-get the series to avoid reusing an invalidated series pointer
data = option.apply(data, column, data[column]) data = option.apply(data, column, data[column])

View File

@@ -1,9 +1,8 @@
import streamlit as st import streamlit as st
from sklearn.linear_model import LogisticRegression import sys
from sklearn.model_selection import train_test_split import os
from sklearn.metrics import accuracy_score sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../backend')))
from sklearn.preprocessing import LabelEncoder from classification_strategy import perform_classification, make_prediction
import pandas as pd
st.header("Prediction: Classification") st.header("Prediction: Classification")
@@ -12,53 +11,38 @@ if "data" in st.session_state:
with st.form("classification_form"): with st.form("classification_form"):
st.subheader("Classification Parameters") st.subheader("Classification Parameters")
data_name = st.multiselect("Features", data.columns) data_name = st.multiselect("Features", data.columns, key="classification_features")
target_name = st.selectbox("Target", data.columns) target_name = st.selectbox("Target", data.columns, key="classification_target")
test_size = st.slider("Test Size", min_value=0.1, max_value=0.5, value=0.2, step=0.1) test_size = st.slider("Test Size", min_value=0.1, max_value=0.5, value=0.2, step=0.1, key="classification_test_size")
st.form_submit_button('Train and Predict') submitted = st.form_submit_button('Train and Predict')
if data_name and target_name: if submitted and data_name and target_name:
X = data[data_name] try:
y = data[target_name] model, label_encoders, accuracy = perform_classification(data, data_name, target_name, test_size)
st.session_state.classification_model = model
label_encoders = {} st.session_state.classification_label_encoders = label_encoders
for column in X.select_dtypes(include=['object']).columns: st.session_state.classification_accuracy = accuracy
le = LabelEncoder() st.session_state.classification_features_selected = data_name
X[column] = le.fit_transform(X[column]) st.session_state.classification_target_selected = target_name
label_encoders[column] = le except ValueError as e:
st.error(e)
if y.dtype == 'object':
le = LabelEncoder()
y = le.fit_transform(y)
label_encoders[target_name] = le
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=42)
model = LogisticRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
if "classification_model" in st.session_state:
st.subheader("Model Accuracy") st.subheader("Model Accuracy")
st.write(f"Accuracy on test data: {accuracy:.2f}") st.write(f"Accuracy on test data: {st.session_state.classification_accuracy:.2f}")
st.subheader("Enter values for prediction") st.subheader("Enter values for prediction")
pred_values = [] input_values = []
for feature in data_name: for feature in st.session_state.classification_features_selected:
if feature in label_encoders: if feature in st.session_state.classification_label_encoders:
values = list(label_encoders[feature].classes_) values = list(st.session_state.classification_label_encoders[feature].classes_)
value = st.selectbox(f"Value for {feature}", values) value = st.selectbox(f"Value for {feature}", values, key=f"classification_input_{feature}")
value_encoded = label_encoders[feature].transform([value])[0]
pred_values.append(value_encoded)
else: else:
value = st.number_input(f"Value for {feature}", value=0.0) value = st.number_input(f"Value for {feature}", value=0.0, key=f"classification_input_{feature}")
pred_values.append(value) input_values.append(value)
prediction = model.predict(pd.DataFrame([pred_values], columns=data_name)) prediction = make_prediction(st.session_state.classification_model, st.session_state.classification_label_encoders, st.session_state.classification_features_selected, st.session_state.classification_target_selected, input_values)
if target_name in label_encoders: st.write("Prediction:", prediction)
prediction = label_encoders[target_name].inverse_transform(prediction)
st.write("Prediction:", prediction[0])
else: else:
st.error("File not loaded") st.error("File not loaded")

View File

@@ -1,6 +1,8 @@
import streamlit as st import streamlit as st
from sklearn.linear_model import LinearRegression import sys
import pandas as pd import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../backend')))
from regression_strategy import perform_regression, make_prediction
st.header("Prediction: Regression") st.header("Prediction: Regression")
@@ -9,21 +11,24 @@ if "data" in st.session_state:
with st.form("regression_form"): with st.form("regression_form"):
st.subheader("Linear Regression Parameters") st.subheader("Linear Regression Parameters")
data_name = st.multiselect("Features", data.select_dtypes(include="number").columns) data_name = st.multiselect("Features", data.select_dtypes(include="number").columns, key="regression_features")
target_name = st.selectbox("Target", data.select_dtypes(include="number").columns) target_name = st.selectbox("Target", data.select_dtypes(include="number").columns, key="regression_target")
st.form_submit_button('Train and Predict') submitted = st.form_submit_button('Train and Predict')
if data_name and target_name: if submitted and data_name and target_name:
X = data[data_name] try:
y = data[target_name] model = perform_regression(data, data_name, target_name)
st.session_state.regression_model = model
model = LinearRegression() st.session_state.regression_features_selected = data_name
model.fit(X, y) st.session_state.regression_target_selected = target_name
except ValueError as e:
st.error(e)
if "regression_model" in st.session_state:
st.subheader("Enter values for prediction") st.subheader("Enter values for prediction")
pred_values = [st.number_input(f"Value for {feature}", value=0.0) for feature in data_name] input_values = [st.number_input(f"Value for {feature}", value=0.0, key=f"regression_input_{feature}") for feature in st.session_state.regression_features_selected]
prediction = model.predict(pd.DataFrame([pred_values], columns=data_name)) prediction = make_prediction(st.session_state.regression_model, st.session_state.regression_features_selected, input_values)
st.write("Prediction:", prediction[0]) st.write("Prediction:", prediction)
else: else:
st.error("File not loaded") st.error("File not loaded")

View File

@@ -1,30 +1,25 @@
import streamlit as st import streamlit as st
import matplotlib.pyplot as plt import sys
import seaborn as sns import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../backend')))
from visualization_strategy import plot_histogram, plot_boxplot
st.header("Data Visualization") st.header("Data Visualization")
if "data" in st.session_state: if "data" in st.session_state:
data = st.session_state.data data = st.session_state.data
st.subheader("Histogram") st.subheader("Histogram")
column_to_plot = st.selectbox("Select Column for Histogram", data.columns) column_to_plot = st.selectbox("Select Column for Histogram", data.columns)
if column_to_plot: if column_to_plot:
fig, ax = plt.subplots() fig = plot_histogram(data, column_to_plot)
ax.hist(data[column_to_plot].dropna(), bins=20, edgecolor='k')
ax.set_title(f"Histogram of {column_to_plot}")
ax.set_xlabel(column_to_plot)
ax.set_ylabel("Frequency")
st.pyplot(fig) st.pyplot(fig)
st.subheader("Boxplot") st.subheader("Boxplot")
dataNumeric = data.select_dtypes(include="number") dataNumeric = data.select_dtypes(include="number")
column_to_plot = st.selectbox("Select Column for Boxplot", dataNumeric.columns) column_to_plot = st.selectbox("Select Column for Boxplot", dataNumeric.columns)
if column_to_plot: if column_to_plot:
fig, ax = plt.subplots() fig = plot_boxplot(data, column_to_plot)
sns.boxplot(data=data, x=column_to_plot, ax=ax)
ax.set_title(f"Boxplot of {column_to_plot}")
st.pyplot(fig) st.pyplot(fig)
else: else:
st.error("file not loaded") st.error("file not loaded")

6
requirements.txt Normal file
View File

@@ -0,0 +1,6 @@
matplotlib>=3.5.0
pandas>=1.5.0
seaborn>=0.12.0
scikit-learn>=0.23.0
streamlit>=1.35.0
ruff>=0.4.8