Compare commits
6 Commits
stat
...
stat_predi
Author | SHA1 | Date | |
---|---|---|---|
![]() |
9bc9e21e45 | ||
![]() |
da1e97f07f | ||
![]() |
27e69b2af8 | ||
![]() |
4054395641 | ||
![]() |
01168f3588 | ||
![]() |
9da6e2d594 |
63
frontend/clusters.py
Normal file
63
frontend/clusters.py
Normal file
@@ -0,0 +1,63 @@
|
||||
from sklearn.cluster import DBSCAN, KMeans
|
||||
import numpy as np
|
||||
|
||||
class DBSCAN_cluster():
|
||||
def __init__(self, eps, min_samples,data):
|
||||
self.eps = eps
|
||||
self.min_samples = min_samples
|
||||
self.data = data
|
||||
self.labels = np.array([])
|
||||
|
||||
def run(self):
|
||||
dbscan = DBSCAN(eps=self.eps, min_samples=self.min_samples)
|
||||
self.labels = dbscan.fit_predict(self.data)
|
||||
return self.labels
|
||||
|
||||
def get_stats(self):
|
||||
unique_labels = np.unique(self.labels)
|
||||
stats = []
|
||||
for label in unique_labels:
|
||||
if label == -1:
|
||||
continue
|
||||
cluster_points = self.data[self.labels == label]
|
||||
num_points = len(cluster_points)
|
||||
density = num_points / (np.max(cluster_points, axis=0) - np.min(cluster_points, axis=0)).prod()
|
||||
stats.append({
|
||||
"cluster": label,
|
||||
"num_points": num_points,
|
||||
"density": density
|
||||
})
|
||||
|
||||
return stats
|
||||
|
||||
|
||||
class KMeans_cluster():
|
||||
def __init__(self, n_clusters, n_init, max_iter, data):
|
||||
self.n_clusters = n_clusters
|
||||
self.n_init = n_init
|
||||
self.max_iter = max_iter
|
||||
self.data = data
|
||||
self.labels = np.array([])
|
||||
self.centers = []
|
||||
|
||||
def run(self):
|
||||
kmeans = KMeans(n_clusters=self.n_clusters, init="random", n_init=self.n_init, max_iter=self.max_iter, random_state=111)
|
||||
self.labels = kmeans.fit_predict(self.data)
|
||||
self.centers = kmeans.cluster_centers_
|
||||
return self.labels
|
||||
|
||||
|
||||
def get_stats(self):
|
||||
unique_labels = np.unique(self.labels)
|
||||
stats = []
|
||||
|
||||
for label in unique_labels:
|
||||
cluster_points = self.data[self.labels == label]
|
||||
num_points = len(cluster_points)
|
||||
center = self.centers[label]
|
||||
stats.append({
|
||||
'cluster': label,
|
||||
'num_points': num_points,
|
||||
'center': center
|
||||
})
|
||||
return stats
|
@@ -1,10 +1,9 @@
|
||||
import streamlit as st
|
||||
import matplotlib.pyplot as plt
|
||||
from sklearn.cluster import DBSCAN
|
||||
from clusters import DBSCAN_cluster
|
||||
|
||||
st.header("Clustering: dbscan")
|
||||
|
||||
|
||||
if "data" in st.session_state:
|
||||
data = st.session_state.data
|
||||
|
||||
@@ -17,8 +16,9 @@ if "data" in st.session_state:
|
||||
if len(data_name) >= 2 and len(data_name) <=3:
|
||||
x = data[data_name].to_numpy()
|
||||
|
||||
dbscan = DBSCAN(eps=eps, min_samples=min_samples)
|
||||
y_dbscan = dbscan.fit_predict(x)
|
||||
dbscan = DBSCAN_cluster(eps,min_samples,x)
|
||||
y_dbscan = dbscan.run()
|
||||
st.table(dbscan.get_stats())
|
||||
|
||||
fig = plt.figure()
|
||||
if len(data_name) == 2:
|
||||
@@ -28,8 +28,5 @@ if "data" in st.session_state:
|
||||
ax = fig.add_subplot(projection='3d')
|
||||
ax.scatter(x[:, 0], x[:, 1],x[:, 2], c=y_dbscan, s=50, cmap="viridis")
|
||||
st.pyplot(fig)
|
||||
|
||||
|
||||
|
||||
else:
|
||||
st.error("file not loaded")
|
@@ -1,10 +1,9 @@
|
||||
import streamlit as st
|
||||
from sklearn.cluster import KMeans
|
||||
import matplotlib.pyplot as plt
|
||||
from clusters import KMeans_cluster
|
||||
|
||||
st.header("Clustering: kmeans")
|
||||
|
||||
|
||||
if "data" in st.session_state:
|
||||
data = st.session_state.data
|
||||
|
||||
@@ -23,21 +22,22 @@ if "data" in st.session_state:
|
||||
if len(data_name) >= 2 and len(data_name) <=3:
|
||||
x = data[data_name].to_numpy()
|
||||
|
||||
kmeans = KMeans(n_clusters=n_clusters, init="random", n_init=n_init, max_iter=max_iter, random_state=111)
|
||||
y_kmeans = kmeans.fit_predict(x)
|
||||
kmeans = KMeans_cluster(n_clusters, n_init, max_iter, x)
|
||||
y_kmeans = kmeans.run()
|
||||
|
||||
st.table(kmeans.get_stats())
|
||||
|
||||
centers = kmeans.centers
|
||||
fig = plt.figure()
|
||||
if len(data_name) == 2:
|
||||
ax = fig.add_subplot(projection='rectilinear')
|
||||
plt.scatter(x[:, 0], x[:, 1], c=y_kmeans, s=50, cmap="viridis")
|
||||
centers = kmeans.cluster_centers_
|
||||
plt.scatter(centers[:, 0], centers[:, 1], c="black", s=200, marker="X")
|
||||
else:
|
||||
ax = fig.add_subplot(projection='3d')
|
||||
|
||||
ax.scatter(x[:, 0], x[:, 1],x[:, 2], c=y_kmeans, s=50, cmap="viridis")
|
||||
centers = kmeans.cluster_centers_
|
||||
ax.scatter(centers[:, 0], centers[:, 1],centers[:, 2], c="black", s=200, marker="X")
|
||||
ax.scatter(centers[:, 0], centers[:, 1], centers[:, 2], c="black", s=200, marker="X")
|
||||
st.pyplot(fig)
|
||||
|
||||
else:
|
||||
|
@@ -1,9 +1,11 @@
|
||||
import streamlit as st
|
||||
from sklearn.linear_model import LogisticRegression
|
||||
from sklearn.model_selection import train_test_split
|
||||
from sklearn.metrics import accuracy_score
|
||||
from sklearn.metrics import accuracy_score,confusion_matrix
|
||||
from sklearn.preprocessing import LabelEncoder
|
||||
import pandas as pd
|
||||
import matplotlib.pyplot as plt
|
||||
import seaborn as sns
|
||||
|
||||
st.header("Prediction: Classification")
|
||||
|
||||
@@ -60,5 +62,18 @@ if "data" in st.session_state:
|
||||
prediction = label_encoders[target_name].inverse_transform(prediction)
|
||||
|
||||
st.write("Prediction:", prediction[0])
|
||||
|
||||
if len(data_name) == 1:
|
||||
fig = plt.figure()
|
||||
|
||||
y_pred = [model.predict(pd.DataFrame([pred_value[0]], columns=data_name)) for pred_value in X.values.tolist()]
|
||||
cm = confusion_matrix(y, y_pred)
|
||||
|
||||
sns.heatmap(cm, annot=True, fmt="d")
|
||||
|
||||
plt.xlabel('Predicted')
|
||||
plt.ylabel('True')
|
||||
|
||||
st.pyplot(fig, figsize=(1, 1))
|
||||
else:
|
||||
st.error("File not loaded")
|
||||
|
@@ -1,6 +1,8 @@
|
||||
import streamlit as st
|
||||
from sklearn.linear_model import LinearRegression
|
||||
from sklearn.metrics import r2_score
|
||||
import pandas as pd
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
st.header("Prediction: Regression")
|
||||
|
||||
@@ -25,5 +27,37 @@ if "data" in st.session_state:
|
||||
prediction = model.predict(pd.DataFrame([pred_values], columns=data_name))
|
||||
|
||||
st.write("Prediction:", prediction[0])
|
||||
|
||||
fig = plt.figure()
|
||||
dataframe_sorted = pd.concat([X, y], axis=1).sort_values(by=data_name)
|
||||
|
||||
if len(data_name) == 1:
|
||||
y_pred = [model.predict(pd.DataFrame([pred_value[0]], columns=data_name)) for pred_value in X.values.tolist()]
|
||||
r2 = r2_score(y, y_pred)
|
||||
st.write('R-squared score:', r2)
|
||||
|
||||
X = dataframe_sorted[data_name[0]]
|
||||
y = dataframe_sorted[target_name]
|
||||
|
||||
prediction_array_y = [
|
||||
model.predict(pd.DataFrame([[dataframe_sorted[data_name[0]].iloc[i]]], columns=data_name))[0]
|
||||
for i in range(dataframe_sorted.shape[0])
|
||||
]
|
||||
|
||||
plt.scatter(dataframe_sorted[data_name[0]], dataframe_sorted[target_name], color='b')
|
||||
plt.plot(dataframe_sorted[data_name[0]], prediction_array_y, color='r')
|
||||
elif len(data_name) == 2:
|
||||
ax = fig.add_subplot(111, projection='3d')
|
||||
|
||||
prediction_array_y = [
|
||||
model.predict(pd.DataFrame([[dataframe_sorted[data_name[0]].iloc[i], dataframe_sorted[data_name[1]].iloc[i]]], columns=data_name))[0]
|
||||
for i in range(dataframe_sorted.shape[0])
|
||||
]
|
||||
|
||||
ax.scatter(dataframe_sorted[data_name[0]], dataframe_sorted[data_name[1]], dataframe_sorted[target_name], color='b')
|
||||
ax.plot(dataframe_sorted[data_name[0]], dataframe_sorted[data_name[1]], prediction_array_y, color='r')
|
||||
|
||||
st.pyplot(fig)
|
||||
|
||||
else:
|
||||
st.error("File not loaded")
|
||||
|
Reference in New Issue
Block a user