Workspace
Paul Williams/

Project: Interpreting Unsupervised Learning Models

0
Beta
Spinner

Arctic Penguin Exploration: Unraveling Clusters in the Icy Domain with K-means clustering

Alt text source: @allison_horst https://github.com/allisonhorst/penguins

You have been asked to support a team of researchers who have been collecting data about penguins in Antartica!

Origin of this data : Data were collected and made available by Dr. Kristen Gorman and the Palmer Station, Antarctica LTER, a member of the Long Term Ecological Research Network.

The dataset consists of 5 columns.

  • culmen_length_mm: culmen length (mm)
  • culmen_depth_mm: culmen depth (mm)
  • flipper_length_mm: flipper length (mm)
  • body_mass_g: body mass (g)
  • sex: penguin sex

Unfortunately, they have not been able to record the species of penguin, but they know that there are three species that are native to the region: Adelie, Chinstrap, and Gentoo, so your task is to apply your data science skills to help them identify groups in the dataset!

# Import Required Packages
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler

# Step 1 - Loading and examining the dataset
penguins_df = pd.read_csv("data/penguins.csv")
penguins_df.head()
penguins_df.info()

# Step 2 - Dealing with null values and outliers
penguins_df.boxplot()  
plt.show()
penguins_df = penguins_df.dropna()
penguins_df[penguins_df['flipper_length_mm']>4000]
penguins_df[penguins_df['flipper_length_mm']<0]
penguins_clean = penguins_df.drop([9,14])

# Step 3 - Perform preprocessing steps on the dataset to create dummy variables
df = pd.get_dummies(penguins_clean).drop('sex_.',axis=1)

# Step 4 - Perform preprocessing steps on the dataset - scaling
scaler = StandardScaler()
X = scaler.fit_transform(df)
penguins_preprocessed = pd.DataFrame(data=X,columns=df.columns)
penguins_preprocessed.head(10)

# Step 5 - Perform PCA
pca = PCA(n_components=None)
dfx_pca = pca.fit(penguins_preprocessed)
dfx_pca.explained_variance_ratio_
n_components=sum(dfx_pca.explained_variance_ratio_>0.1)
pca = PCA(n_components=n_components)
penguins_PCA = pca.fit_transform(penguins_preprocessed)

# Step 6 - Detect the optimal number of clusters for k-means clustering
inertia = []
for k in range(1, 10):
    kmeans = KMeans(n_clusters=k, random_state=42).fit(penguins_PCA)
    inertia.append(kmeans.inertia_)    
plt.plot(range(1, 10), inertia, marker='o')
plt.xlabel('Number of clusters')
plt.ylabel('Inertia')
plt.title('Elbow Method')
plt.show()
n_clusters=4

# Step 7 - Run the k-means clustering algorithm
# with the optimal number of clusters 
# and visualize the resulting clusters.
kmeans = KMeans(n_clusters=n_clusters, random_state=42).fit(penguins_PCA)
plt.scatter(penguins_PCA[:, 0], penguins_PCA[:, 1], c=kmeans.labels_, cmap='viridis')
plt.xlabel('First Principal Component')
plt.ylabel('Second Principal Component')
plt.title(f'K-means Clustering (K={n_clusters})')
plt.legend()
plt.show()

# Step 8 - Create a final statistical DataFrame for each cluster.
penguins_clean['label'] = kmeans.labels_
numeric_columns = ['culmen_length_mm', 'culmen_depth_mm', 'flipper_length_mm','label']
stat_penguins = penguins_clean[numeric_columns].groupby('label').mean()
stat_penguins
penguins_df.head()
penguins_df.info()
penguins_df.isna().sum()
sns.pairplot(penguins_df)
plt.show()
penguins_no_na = penguins_df.dropna()
penguins_no_na.isna().sum()
penguins_no_na.head()
sns.pairplot(penguins_no_na)
plt.show()
sns.boxplot(data=penguins_no_na, y='flipper_length_mm')
plt.show()
# Write function to visualize features with outliers
def plotting_out(df):
    for col in df.columns:
        if df[col].dtype in ['int64', 'float64']:  # Check if column is numeric
            sns.boxplot(data=df, y=col)
            plt.show()
plotting_out(penguins_no_na)
# remove outliers

def remove_out(df_col):
    # 75th percentile
    seventy_fifth = df_col.quantile(0.75)
    # 25th percentile
    twenty_fifth = df_col.quantile(0.25)
    # Interquartile range
    df_col_iqr = seventy_fifth - twenty_fifth
    # Upper threshold
    upper = seventy_fifth + (1.5 * df_col_iqr)
    # Lower threshold
    lower = twenty_fifth - (1.5 * df_col_iqr)
    return upper, lower
upper, lower = remove_out(penguins_no_na['flipper_length_mm'])

print(upper, lower)
penguins_clean = penguins_no_na[(penguins_no_na['flipper_length_mm'] > lower) & (penguins_no_na['flipper_length_mm'] < upper)]
penguins_clean.head()
# Create dummy variables
dummies = pd.get_dummies(penguins_clean, drop_first=True)

# Scale data
scaler = StandardScaler()
dummies_test = scaler.fit_transform(dummies)

# Combine data to new DF and check
penguins_preprocessed = pd.DataFrame(data=dummies_test, columns=dummies.columns)
penguins_preprocessed.head()
# Perform PCA
model = PCA()
model.fit(penguins_preprocessed)
# get explained variance
n_components = np.where(model.explained_variance_ratio_ > 0.1)[0][-1] + 1
# apply PCA to data
pca = PCA(n_components=n_components)
penguins_PCA = pca.fit_transform(penguins_preprocessed)
# implementation of KMeans

# Perform elbow analysis to determine optimal clusters
wcss = []
for cluster in range(1, 21):
    kmeans = KMeans(n_clusters=cluster, random_state=42)
    kmeans.fit(penguins_PCA)
    wcss.append(kmeans.inertia_)
    
# Plot elbow 
plt.plot(range(1, 21), wcss)
plt.xlabel('Number of clusters')
plt.ylabel('WCSS')
plt.axvline(x=4, color='black', linestyle='--')

plt.show()
# Choose number of clusters based on graph
n_cluster = 4

# Create new kmeans model and fit to data
kmeans = KMeans(n_clusters=n_cluster, random_state=42).fit(penguins_PCA)

# Visualise the clusters
plt.scatter(penguins_PCA[:, 0], penguins_PCA[:, 1], c=kmeans.labels_)
plt.xlabel('PC 1')
plt.ylabel('PC 2')
plt.title(f'K_Means Clustering (K={n_cluster})')
plt.legend()
plt.show()
# Create dataframe with each cluster label
penguins_clean['label'] = kmeans.labels_
numeric_col = [col for col in penguins_clean.columns if penguins_clean[col].dtype in ['int64', 'float64', 'int32']]
stat_penguins = penguins_clean[numeric_col].groupby('label').mean()
print(stat_penguins)