{ "cells": [ { "cell_type": "markdown", "id": "cf894f6f", "metadata": {}, "source": [ "# Intermediate Fusion mit Deep SVDD" ] }, { "cell_type": "markdown", "id": "494626b1", "metadata": {}, "source": [ "* Input: gemeinsames Dataset aus EYE Tracking und Action Units mit selber Abtastfrequenz\n", "* Verarbeitung: Intermediate Fusion\n", "* Modell: Deep SVDD --> Erlernen einer Kugel durch ein neuronales Netz, dass die Normaldaten einschließt" ] }, { "cell_type": "markdown", "id": "bef91203", "metadata": {}, "source": [ "### Imports + GPU " ] }, { "cell_type": "code", "execution_count": null, "id": "f0b8274a", "metadata": {}, "outputs": [], "source": [ "import pandas as pd\n", "import numpy as np\n", "from pathlib import Path\n", "import sys\n", "import os\n", "import time\n", "base_dir = os.path.abspath(os.path.join(os.getcwd(), \"..\"))\n", "sys.path.append(base_dir)\n", "\n", "from Fahrsimulator_MSY2526_AI.model_training.tools import evaluation_tools, scaler, mad_outlier_removal, performance_split\n", "from sklearn.preprocessing import StandardScaler, MinMaxScaler\n", "from sklearn.svm import OneClassSVM\n", "from sklearn.model_selection import GridSearchCV, KFold, ParameterGrid, train_test_split, GroupKFold\n", "import matplotlib.pyplot as plt\n", "import tensorflow as tf\n", "from tensorflow.keras import layers, models, regularizers\n", "import pickle\n", "from sklearn.metrics import (accuracy_score, auc, roc_curve, f1_score) " ] }, { "cell_type": "code", "execution_count": null, "id": "f03c8da9", "metadata": {}, "outputs": [], "source": [ "# Check GPU availability\n", "print(\"TensorFlow version:\", tf.__version__)\n", "print(\"GPU Available:\", tf.config.list_physical_devices('GPU'))\n", "print(\"CUDA Available:\", tf.test.is_built_with_cuda())\n", "\n", "# Get detailed GPU info\n", "gpus = tf.config.list_physical_devices('GPU')\n", "if gpus:\n", " print(f\"\\nNumber of GPUs: {len(gpus)}\")\n", " for gpu in gpus:\n", " print(f\"GPU: {gpu}\")\n", " \n", " # Enable memory growth to prevent TF from allocating all GPU memory\n", " try:\n", " for gpu in gpus:\n", " tf.config.experimental.set_memory_growth(gpu, True)\n", " print(\"\\nGPU memory growth enabled\")\n", " except RuntimeError as e:\n", " print(e)\n", "else:\n", " print(\"\\nNo GPU found - running on CPU\")" ] }, { "cell_type": "markdown", "id": "f00a477c", "metadata": {}, "source": [ "### Configuration of paths and data preprocessing" ] }, { "cell_type": "code", "execution_count": null, "id": "5136fcec", "metadata": {}, "outputs": [], "source": [ "# TODO: set path where to save normalizer\n", "normalizer_path=Path('.pkl') # TODO: set manually" ] }, { "cell_type": "code", "execution_count": null, "id": "c2115f65", "metadata": {}, "outputs": [], "source": [ "performance_path = Path(r\".csv\") # TODO: set manually\n", "performance_df = pd.read_csv(performance_path)" ] }, { "cell_type": "code", "execution_count": null, "id": "559eb8d2", "metadata": {}, "outputs": [], "source": [ "encoder_save_path = Path('.keras') # TODO: set manually\n", "deep_svdd_save_path = Path('.keras') # TODO: set manually" ] }, { "cell_type": "code", "execution_count": null, "id": "6482542b", "metadata": {}, "outputs": [], "source": [ "dataset_path = Path(r\".parquet\") # TODO: set manually" ] }, { "cell_type": "code", "execution_count": null, "id": "ce8ab464", "metadata": {}, "outputs": [], "source": [ "df = pd.read_parquet(path=dataset_path)" ] }, { "cell_type": "markdown", "id": "c045c46d", "metadata": {}, "source": [ "Performance based split" ] }, { "cell_type": "code", "execution_count": null, "id": "1660ec95", "metadata": {}, "outputs": [], "source": [ "train_ids, temp_ids, diff1 = performance_split.performance_based_split(\n", " subject_ids=df[\"subjectID\"].unique(),\n", " performance_df=performance_df,\n", " split_ratio=0.6, # 60% train, 40% temp\n", " random_seed=42\n", ")\n", "\n", "val_ids, test_ids, diff2 = performance_split.performance_based_split(\n", " subject_ids=temp_ids,\n", " performance_df=performance_df,\n", " split_ratio=0.5, # 50/50 split of remaining 40%\n", " random_seed=43\n", ")\n", "print(diff1, diff2)" ] }, { "cell_type": "markdown", "id": "195b7283", "metadata": {}, "source": [ "Labeling" ] }, { "cell_type": "code", "execution_count": null, "id": "05b6b73d", "metadata": {}, "outputs": [], "source": [ "low_all = df[\n", " ((df[\"PHASE\"] == \"baseline\") |\n", " ((df[\"STUDY\"] == \"n-back\") & (df[\"PHASE\"] != \"baseline\") & (df[\"LEVEL\"].isin([1, 4]))))\n", "]\n", "print(f\"low all: {low_all.shape}\")\n", "\n", "high_nback = df[\n", " (df[\"STUDY\"]==\"n-back\") &\n", " (df[\"LEVEL\"].isin([2, 3, 5, 6])) &\n", " (df[\"PHASE\"].isin([\"train\", \"test\"]))\n", "]\n", "print(f\"high n-back: {high_nback.shape}\")\n", "\n", "high_kdrive = df[\n", " (df[\"STUDY\"] == \"k-drive\") & (df[\"PHASE\"] != \"baseline\")\n", "]\n", "print(f\"high k-drive: {high_kdrive.shape}\")\n", "\n", "high_all = pd.concat([high_nback, high_kdrive])\n", "print(f\"high all: {high_all.shape}\")" ] }, { "cell_type": "code", "execution_count": null, "id": "60148c0b", "metadata": {}, "outputs": [], "source": [ "low = low_all.copy()\n", "high = high_all.copy()\n", "\n", "low[\"label\"] = 0\n", "high[\"label\"] = 1\n", "\n", "data = pd.concat([low, high], ignore_index=True)\n", "df = data.drop_duplicates()\n", "df = df.dropna()\n", "print(\"Label distribution:\")\n", "print(df[\"label\"].value_counts())" ] }, { "cell_type": "markdown", "id": "c8fefca7", "metadata": {}, "source": [ "Split" ] }, { "cell_type": "code", "execution_count": null, "id": "da6a2f87", "metadata": {}, "outputs": [], "source": [ "train_df = df[\n", " (df.subjectID.isin(train_ids)) & (df['label'] == 0)\n", "].copy()\n", "\n", "# Validation: balanced sampling of label=0 and label=1\n", "val_df_full = df[df.subjectID.isin(val_ids)].copy()\n", "\n", "# Get all label=0 samples\n", "val_df_label0 = val_df_full[val_df_full['label'] == 0]\n", "\n", "# Sample same number from label=1\n", "n_samples = len(val_df_label0)\n", "val_df_label1 = val_df_full[val_df_full['label'] == 1].sample(\n", " n=n_samples, random_state=42\n", ")\n", "\n", "# Combine\n", "val_df = pd.concat([val_df_label0, val_df_label1], ignore_index=True)\n", "test_df = df[df.subjectID.isin(test_ids)]\n", "print(train_df.shape, val_df.shape,test_df.shape)" ] }, { "cell_type": "code", "execution_count": null, "id": "e8375760", "metadata": {}, "outputs": [], "source": [ "val_df['label'].value_counts()" ] }, { "cell_type": "markdown", "id": "f0570a3c", "metadata": {}, "source": [ "Normalization" ] }, { "cell_type": "code", "execution_count": null, "id": "cdd2ba73", "metadata": {}, "outputs": [], "source": [ "face_au_cols = [c for c in train_df.columns if c.startswith(\"FACE_AU\")]\n", "eye_cols = ['Fix_count_short_66_150', 'Fix_count_medium_300_500',\n", " 'Fix_count_long_gt_1000', 'Fix_count_100', 'Fix_mean_duration',\n", " 'Fix_median_duration', 'Sac_count', 'Sac_mean_amp', 'Sac_mean_dur',\n", " 'Sac_median_dur', 'Blink_count', 'Blink_mean_dur', 'Blink_median_dur',\n", " 'Pupil_mean', 'Pupil_IPA']\n", "print(len(eye_cols))\n", "all_signal_columns = face_au_cols+eye_cols\n", "print(len(all_signal_columns))\n", "\n", "# fit and save normalizer\n", "normalizer = scaler.fit_normalizer(train_df, all_signal_columns, method='minmax', scope='global')\n", "scaler.save_normalizer(normalizer, normalizer_path )" ] }, { "cell_type": "code", "execution_count": null, "id": "76afc4d3", "metadata": {}, "outputs": [], "source": [ "normalizer = scaler.load_normalizer(normalizer_path)\n", "# Apply normalization to all sets\n", "train_df_norm = scaler.apply_normalizer(train_df, all_signal_columns, normalizer)\n", "val_df_norm = scaler.apply_normalizer(val_df, all_signal_columns, normalizer)\n", "test_df_norm = scaler.apply_normalizer(test_df, all_signal_columns, normalizer)" ] }, { "cell_type": "markdown", "id": "77deead9", "metadata": {}, "source": [ "Outlier removal (later)" ] }, { "cell_type": "markdown", "id": "fd139799", "metadata": {}, "source": [ "Change of dtypes for keras pandas" ] }, { "cell_type": "code", "execution_count": null, "id": "8587343e", "metadata": {}, "outputs": [], "source": [ "X_face = train_df_norm[face_au_cols].to_numpy(dtype=np.float32)\n", "X_eye = train_df_norm[eye_cols].to_numpy(dtype=np.float32)" ] }, { "cell_type": "markdown", "id": "b736bc58", "metadata": {}, "source": [ "### Autoencoder Pre-Training" ] }, { "cell_type": "markdown", "id": "aa11faf3", "metadata": {}, "source": [ "Vor-Training der Gewichte mit Autoencoder, Loss: MSE" ] }, { "cell_type": "code", "execution_count": null, "id": "3eab9d94", "metadata": {}, "outputs": [], "source": [ "def build_intermediate_fusion_autoencoder(\n", " input_dim_mod1=15,\n", " input_dim_mod2=20,\n", " encoder_hidden_dim_mod1=12, # TODO: set manually\n", " encoder_hidden_dim_mod2=20, # TODO: set manually\n", " latent_dim=6, # TODO: set manually\n", " dropout_rate=0.4, # TODO: set manually\n", " neg_slope=0.1, # TODO: set manually\n", " weight_decay=1e-4, # TODO: set manually\n", " decoder_hidden_dims=[16, 32] # TODO: set manually\n", "):\n", " \"\"\"\n", " Verbesserter Intermediate-Fusion Autoencoder für Deep SVDD.\n", " Änderungen:\n", " - Bottleneck vergrößert (latent_dim)\n", " - Dropout nur in Hidden Layers, nicht im Bottleneck\n", " - Decoder größer für stabileres Pretraining\n", " - Parametrisierbare Hidden-Dimensions für Encoder\n", " \"\"\"\n", "\n", " l2 = regularizers.l2(weight_decay)\n", " act = layers.LeakyReLU(negative_slope=neg_slope)\n", "\n", " # -------- Inputs --------\n", " x1_in = layers.Input(shape=(input_dim_mod1,), name=\"modality_1\")\n", " x2_in = layers.Input(shape=(input_dim_mod2,), name=\"modality_2\")\n", "\n", " # -------- Encoder 1 --------\n", " e1 = layers.Dense(\n", " encoder_hidden_dim_mod1,\n", " use_bias=False,\n", " kernel_regularizer=l2\n", " )(x1_in)\n", " e1 = act(e1)\n", " e1 = layers.Dropout(dropout_rate)(e1) \n", "\n", " e1 = layers.Dense(\n", " 16, \n", " use_bias=False,\n", " kernel_regularizer=l2\n", " )(e1)\n", " e1 = act(e1)\n", "\n", " # -------- Encoder 2 --------\n", " e2 = layers.Dense(\n", " encoder_hidden_dim_mod2,\n", " use_bias=False,\n", " kernel_regularizer=l2\n", " )(x2_in)\n", " e2 = act(e2)\n", " e2 = layers.Dropout(dropout_rate)(e2) \n", "\n", " e2 = layers.Dense(\n", " 16, \n", " use_bias=False,\n", " kernel_regularizer=l2\n", " )(e2)\n", " e2 = act(e2)\n", "\n", " # -------- Intermediate Fusion --------\n", " fused = layers.Concatenate(name=\"fusion\")([e1, e2]) # 16+16=32 dimensions\n", "\n", " # -------- Joint Encoder / Bottleneck --------\n", "\n", " h = layers.Dense(\n", " latent_dim,\n", " use_bias=False,\n", " kernel_regularizer=l2\n", " )(fused)\n", " h = act(h)\n", " h = layers.Dropout(dropout_rate)(h)\n", "\n", " z = layers.Dense(\n", " latent_dim,\n", " activation=None, # linear for Deep SVDD\n", " use_bias=False,\n", " kernel_regularizer=l2,\n", " name=\"latent\"\n", " )(h)\n", "\n", "\n", " # -------- Decoder --------\n", " d = layers.Dense(\n", " decoder_hidden_dims[0], \n", " use_bias=False,\n", " kernel_regularizer=l2\n", " )(z)\n", " d = act(d)\n", "\n", " d = layers.Dense(\n", " decoder_hidden_dims[1],\n", " use_bias=False,\n", " kernel_regularizer=l2\n", " )(d)\n", " d = act(d)\n", "\n", " x1_out = layers.Dense(\n", " input_dim_mod1,\n", " activation=None,\n", " use_bias=False,\n", " name=\"recon_modality_1\"\n", " )(d)\n", "\n", " x2_out = layers.Dense(\n", " input_dim_mod2,\n", " activation=None,\n", " use_bias=False,\n", " name=\"recon_modality_2\"\n", " )(d)\n", "\n", " model = models.Model(\n", " inputs=[x1_in, x2_in],\n", " outputs=[x1_out, x2_out],\n", " name=\"IntermediateFusionAE_Improved\"\n", " )\n", "\n", " return model\n" ] }, { "cell_type": "code", "execution_count": null, "id": "80cb8eb0", "metadata": {}, "outputs": [], "source": [ "model = build_intermediate_fusion_autoencoder(\n", " input_dim_mod1=len(face_au_cols),\n", " input_dim_mod2=len(eye_cols),\n", " encoder_hidden_dim_mod1=12, # TODO: set manually\n", " encoder_hidden_dim_mod2=8, # TODO: set manually\n", " latent_dim=4,\n", " dropout_rate=0.7, # TODO: set manually\n", " neg_slope=0.1,\n", " weight_decay=1e-3\n", ")\n", "\n", "model.compile(\n", " loss={\n", " \"recon_modality_1\": \"mse\",\n", " \"recon_modality_2\": \"mse\",\n", " },\n", " loss_weights={\n", " \"recon_modality_1\": 1.0,\n", " \"recon_modality_2\": 1.0,\n", " },\n", " optimizer=tf.keras.optimizers.Adam(1e-3)\n", " \n", ")\n", "\n", "batch_size_ae=64\n", "# model.summary()" ] }, { "cell_type": "code", "execution_count": null, "id": "95d36a07", "metadata": {}, "outputs": [], "source": [ "model.fit(\n", " x=[X_face, X_eye],\n", " y=[X_face, X_eye],\n", " batch_size=batch_size_ae,\n", " epochs=150,\n", " shuffle=True\n", ")\n", "model.compile(\n", " loss={\n", " \"recon_modality_1\": \"mse\",\n", " \"recon_modality_2\": \"mse\",\n", " },\n", " loss_weights={\n", " \"recon_modality_1\": 1.0,\n", " \"recon_modality_2\": 1.0,\n", " },\n", " optimizer=tf.keras.optimizers.Adam(1e-4),\n", ")\n", "model.fit(\n", " x=[X_face, X_eye],\n", " y=[X_face, X_eye],\n", " batch_size=batch_size_ae,\n", " epochs=100,\n", " shuffle=True\n", ")\n" ] }, { "cell_type": "code", "execution_count": null, "id": "9ccfbc71", "metadata": {}, "outputs": [], "source": [ "encoder = tf.keras.Model(\n", " inputs=model.inputs,\n", " outputs=model.get_layer(\"latent\").output,\n", " name=\"SVDD_Encoder\"\n", ")" ] }, { "cell_type": "markdown", "id": "e4e1b5ff", "metadata": {}, "source": [ "Speichern" ] }, { "cell_type": "code", "execution_count": null, "id": "7e591264", "metadata": {}, "outputs": [], "source": [ "encoder.save(encoder_save_path)" ] }, { "cell_type": "markdown", "id": "372dc754", "metadata": {}, "source": [ "Laden Encoder / Deepsvdd" ] }, { "cell_type": "code", "execution_count": null, "id": "83199fc6", "metadata": {}, "outputs": [], "source": [ "encoder_load_path = encoder_save_path\n", "encoder = tf.keras.models.load_model(encoder_load_path)" ] }, { "cell_type": "markdown", "id": "92046112", "metadata": {}, "source": [ "Check, if encoder works" ] }, { "cell_type": "code", "execution_count": null, "id": "db2fa21c", "metadata": {}, "outputs": [], "source": [ "ans= encoder.predict([X_face, X_eye])\n", "print(ans[:6,:])" ] }, { "cell_type": "markdown", "id": "d7bcc35d", "metadata": {}, "source": [ "### Deep SVDD Training" ] }, { "cell_type": "code", "execution_count": null, "id": "806a2479", "metadata": {}, "outputs": [], "source": [ "encoder_load_path = encoder_save_path\n", "deep_svdd_net = tf.keras.models.load_model(encoder_load_path) " ] }, { "cell_type": "code", "execution_count": null, "id": "54083759", "metadata": {}, "outputs": [], "source": [ "def get_center(model, dataset):\n", " center = model.predict(dataset).mean(axis=0)\n", "\n", " eps = 0.1\n", " center[(abs(center) < eps) & (center < 0)] = -eps\n", " center[(abs(center) < eps) & (center >= 0)] = eps\n", "\n", " return center\n", "def dist_per_sample(output, center):\n", " return tf.reduce_sum(tf.square(output - center), axis=-1)\n", "\n", "def score_per_sample(output, center, radius):\n", " return dist_per_sample(output, center) - radius**2\n", "\n", "def train_loss(output, center):\n", " return tf.reduce_mean(dist_per_sample(output, center))" ] }, { "cell_type": "code", "execution_count": null, "id": "fd6f47c0", "metadata": {}, "outputs": [], "source": [ "center = get_center(deep_svdd_net, [X_face, X_eye])" ] }, { "cell_type": "code", "execution_count": null, "id": "b47b52f6", "metadata": {}, "outputs": [], "source": [ "def get_radius_from_arrays(nu, X_face, X_eye):\n", " z = deep_svdd_net.predict([X_face, X_eye])\n", " dists = dist_per_sample(z, center)\n", " return np.quantile(np.sqrt(dists), 1 - nu).astype(np.float32)" ] }, { "cell_type": "code", "execution_count": null, "id": "b062bd19", "metadata": {}, "outputs": [], "source": [ "@tf.function\n", "def train_step(batch):\n", " with tf.GradientTape() as grad_tape:\n", " output = deep_svdd_net(batch, training=True)\n", " batch_loss = train_loss(output, center)\n", "\n", " gradients = grad_tape.gradient(batch_loss, deep_svdd_net.trainable_variables)\n", " optimizer.apply_gradients(zip(gradients, deep_svdd_net.trainable_variables))\n", "\n", " return batch_loss" ] }, { "cell_type": "code", "execution_count": null, "id": "4c144130", "metadata": {}, "outputs": [], "source": [ "def train(dataset, epochs, nu):\n", " for epoch in range(epochs):\n", " start = time.time()\n", " losses = []\n", " for batch in dataset:\n", " batch_loss = train_step(batch)\n", " losses.append(batch_loss)\n", "\n", " print(f'{epoch+1}/{epochs} epoch: Loss of {np.mean(losses)} ({time.time()-start} secs)')\n", "\n", " return get_radius_from_arrays(nu, X_face, X_eye)\n", "\n", "\n", "nu = 0.05 # Set nu respectively\n", "\n", "train_dataset = tf.data.Dataset.from_tensor_slices((X_face, X_eye)).shuffle(64).batch(64)\n", "\n", "optimizer = tf.keras.optimizers.Adam(1e-3)\n", "train(train_dataset, epochs=150, nu=nu)\n", "\n", "optimizer.learning_rate = 1e-4\n", "radius = train(train_dataset, 100, nu=nu)" ] }, { "cell_type": "markdown", "id": "24f0cef0", "metadata": {}, "source": [ "prepare valid & test set" ] }, { "cell_type": "code", "execution_count": null, "id": "acb9c8f1", "metadata": {}, "outputs": [], "source": [ "# Test set\n", "X_face_test = test_df_norm[face_au_cols].to_numpy(dtype=np.float32)\n", "X_eye_test = test_df_norm[eye_cols].to_numpy(dtype=np.float32)\n", "y_test = test_df_norm[\"label\"].to_numpy(dtype=np.float32)\n", "\n", "# Validation set\n", "X_face_val = val_df_norm[face_au_cols].to_numpy(dtype=np.float32)\n", "X_eye_val = val_df_norm[eye_cols].to_numpy(dtype=np.float32)\n", "y_val = val_df_norm[\"label\"].to_numpy(dtype=np.float32)" ] }, { "cell_type": "code", "execution_count": null, "id": "49737d5d", "metadata": {}, "outputs": [], "source": [ "valid_scores = (score_per_sample(deep_svdd_net.predict([X_face_val, X_eye_val]), center, radius)).numpy()\n", "\n", "valid_fpr, valid_tpr, _ = roc_curve(y_val, valid_scores, pos_label=1)\n", "valid_auc = auc(valid_fpr, valid_tpr)\n", "\n", "plt.figure()\n", "plt.title('Deep SVDD')\n", "plt.plot(valid_fpr, valid_tpr, 'b-')\n", "plt.text(0.5, 0.5, f'AUC: {valid_auc:.4f}')\n", "plt.xlabel('False positive rate')\n", "plt.ylabel('True positive rate')\n", "plt.show()\n", "\n", "valid_predictions = (valid_scores > 0).astype(int)\n", "\n", "normal_acc = np.mean(valid_predictions[y_val == 0] == 0)\n", "anomaly_acc = np.mean(valid_predictions[y_val == 1] == 1)\n", "print(f'Accuracy on Validation set: {accuracy_score(y_val, valid_predictions)}')\n", "print(f'Accuracy for normals: {normal_acc:.4f}')\n", "print(f'Accuracy for anomalies: {anomaly_acc:.4f}')\n", "print(f'F1 on Validation set: {f1_score(y_val, valid_predictions)}')" ] }, { "cell_type": "code", "execution_count": null, "id": "475381db", "metadata": {}, "outputs": [], "source": [ "deep_svdd_net.save(deep_svdd_save_path)" ] }, { "cell_type": "markdown", "id": "6ede1b15", "metadata": {}, "source": [ "### Results" ] }, { "cell_type": "markdown", "id": "c8481d07", "metadata": {}, "source": [ "Validation set" ] }, { "cell_type": "code", "execution_count": null, "id": "719b41b2", "metadata": {}, "outputs": [], "source": [ "valid_predictions = (valid_scores > 0).astype(int)\n", "evaluation_tools.plot_confusion_matrix(true_labels=y_val, predictions=valid_predictions, label_names=[\"low\",\"high\"])\n" ] }, { "cell_type": "markdown", "id": "f33230b1", "metadata": {}, "source": [ "Test set" ] }, { "cell_type": "code", "execution_count": null, "id": "f1189a28", "metadata": {}, "outputs": [], "source": [ "test_scores = (\n", " score_per_sample(\n", " deep_svdd_net.predict([X_face_test, X_eye_test]),\n", " center,\n", " radius\n", " )\n", ").numpy()\n", "\n", "test_predictions = (test_scores > 0).astype(int)\n" ] }, { "cell_type": "code", "execution_count": null, "id": "575dddcf", "metadata": {}, "outputs": [], "source": [ "normal_acc = np.mean(test_predictions[y_test == 0] == 0)\n", "anomaly_acc = np.mean(test_predictions[y_test == 1] == 1)\n", "print(f'Accuracy on Test set: {accuracy_score(y_test, test_predictions)}')" ] }, { "cell_type": "code", "execution_count": null, "id": "5acade06", "metadata": {}, "outputs": [], "source": [ "evaluation_tools.plot_confusion_matrix(true_labels=y_test, predictions=test_predictions, label_names=[\"low\",\"high\"])\n" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" } }, "nbformat": 4, "nbformat_minor": 5 }