1111 lines
32 KiB
Plaintext
1111 lines
32 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "cf894f6f",
|
|
"metadata": {},
|
|
"source": [
|
|
"# Intermediate Fusion mit Deep SVDD"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "494626b1",
|
|
"metadata": {},
|
|
"source": [
|
|
"* Input: gemeinsames Dataset aus EYE Tracking und Action Units mit selber Abtastfrequenz\n",
|
|
"* Verarbeitung: Intermediate Fusion\n",
|
|
"* Modell: Deep SVDD --> Erlernen einer Kugel durch ein neuronales Netz, dass die Normaldaten einschließt"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "bef91203",
|
|
"metadata": {},
|
|
"source": [
|
|
"### Imports + GPU "
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "f0b8274a",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"import pandas as pd\n",
|
|
"import numpy as np\n",
|
|
"from pathlib import Path\n",
|
|
"import sys\n",
|
|
"import os\n",
|
|
"import time\n",
|
|
"base_dir = os.path.abspath(os.path.join(os.getcwd(), \"..\"))\n",
|
|
"sys.path.append(base_dir)\n",
|
|
"print(base_dir)\n",
|
|
"\n",
|
|
"from Fahrsimulator_MSY2526_AI.model_training.tools import evaluation_tools, scaler, mad_outlier_removal, performance_split\n",
|
|
"from sklearn.preprocessing import StandardScaler, MinMaxScaler\n",
|
|
"from sklearn.svm import OneClassSVM\n",
|
|
"from sklearn.model_selection import GridSearchCV, KFold, ParameterGrid, train_test_split, GroupKFold\n",
|
|
"import matplotlib.pyplot as plt\n",
|
|
"import tensorflow as tf\n",
|
|
"from tensorflow.keras import layers, models, regularizers\n",
|
|
"import pickle\n",
|
|
"from sklearn.metrics import (roc_auc_score, accuracy_score, precision_score, recall_score, f1_score, confusion_matrix, classification_report, balanced_accuracy_score, ConfusionMatrixDisplay, auc, roc_curve) "
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "f03c8da9",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Check GPU availability\n",
|
|
"print(\"TensorFlow version:\", tf.__version__)\n",
|
|
"print(\"GPU Available:\", tf.config.list_physical_devices('GPU'))\n",
|
|
"print(\"CUDA Available:\", tf.test.is_built_with_cuda())\n",
|
|
"\n",
|
|
"# Get detailed GPU info\n",
|
|
"gpus = tf.config.list_physical_devices('GPU')\n",
|
|
"if gpus:\n",
|
|
" print(f\"\\nNumber of GPUs: {len(gpus)}\")\n",
|
|
" for gpu in gpus:\n",
|
|
" print(f\"GPU: {gpu}\")\n",
|
|
" \n",
|
|
" # Enable memory growth to prevent TF from allocating all GPU memory\n",
|
|
" try:\n",
|
|
" for gpu in gpus:\n",
|
|
" tf.config.experimental.set_memory_growth(gpu, True)\n",
|
|
" print(\"\\nGPU memory growth enabled\")\n",
|
|
" except RuntimeError as e:\n",
|
|
" print(e)\n",
|
|
"else:\n",
|
|
" print(\"\\nNo GPU found - running on CPU\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "f00a477c",
|
|
"metadata": {},
|
|
"source": [
|
|
"### Data Preprocessing"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "504c1df7",
|
|
"metadata": {},
|
|
"source": [
|
|
"Laden der Daten"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "6482542b",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"dataset_path = Path(r\"data-paulusjafahrsimulator-gpu/new_datasets/combined_dataset_25hz.parquet\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "ce8ab464",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"df = pd.read_parquet(path=dataset_path)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "c2115f65",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"performance_path = Path(r\"/home/jovyan/data-paulusjafahrsimulator-gpu/subject_performance/3new_au_performance.csv\")\n",
|
|
"performance_df = pd.read_csv(performance_path)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "c045c46d",
|
|
"metadata": {},
|
|
"source": [
|
|
"Performance based split"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "1660ec95",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"train_ids, temp_ids, diff1 = performance_split.performance_based_split(\n",
|
|
" subject_ids=df[\"subjectID\"].unique(),\n",
|
|
" performance_df=performance_df,\n",
|
|
" split_ratio=0.6, # 60% train, 40% temp\n",
|
|
" random_seed=42\n",
|
|
")\n",
|
|
"\n",
|
|
"val_ids, test_ids, diff2 = performance_split.performance_based_split(\n",
|
|
" subject_ids=temp_ids,\n",
|
|
" performance_df=performance_df,\n",
|
|
" split_ratio=0.5, # 50/50 split of remaining 40%\n",
|
|
" random_seed=43\n",
|
|
")\n",
|
|
"print(diff1, diff2)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "195b7283",
|
|
"metadata": {},
|
|
"source": [
|
|
"Labeling"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "05b6b73d",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"low_all = df[\n",
|
|
" ((df[\"PHASE\"] == \"baseline\") |\n",
|
|
" ((df[\"STUDY\"] == \"n-back\") & (df[\"PHASE\"] != \"baseline\") & (df[\"LEVEL\"].isin([1, 4]))))\n",
|
|
"]\n",
|
|
"print(f\"low all: {low_all.shape}\")\n",
|
|
"\n",
|
|
"high_nback = df[\n",
|
|
" (df[\"STUDY\"]==\"n-back\") &\n",
|
|
" (df[\"LEVEL\"].isin([2, 3, 5, 6])) &\n",
|
|
" (df[\"PHASE\"].isin([\"train\", \"test\"]))\n",
|
|
"]\n",
|
|
"print(f\"high n-back: {high_nback.shape}\")\n",
|
|
"\n",
|
|
"high_kdrive = df[\n",
|
|
" (df[\"STUDY\"] == \"k-drive\") & (df[\"PHASE\"] != \"baseline\")\n",
|
|
"]\n",
|
|
"print(f\"high k-drive: {high_kdrive.shape}\")\n",
|
|
"\n",
|
|
"high_all = pd.concat([high_nback, high_kdrive])\n",
|
|
"print(f\"high all: {high_all.shape}\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "60148c0b",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"low = low_all.copy()\n",
|
|
"high = high_all.copy()\n",
|
|
"\n",
|
|
"low[\"label\"] = 0\n",
|
|
"high[\"label\"] = 1\n",
|
|
"\n",
|
|
"data = pd.concat([low, high], ignore_index=True)\n",
|
|
"df = data.drop_duplicates()\n",
|
|
"\n",
|
|
"print(\"Label distribution:\")\n",
|
|
"print(df[\"label\"].value_counts())"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "c8fefca7",
|
|
"metadata": {},
|
|
"source": [
|
|
"Split"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "da6a2f87",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"train_df = df[\n",
|
|
" (df.subjectID.isin(train_ids)) & (df['label'] == 0)\n",
|
|
"].copy()\n",
|
|
"\n",
|
|
"# Validation: balanced sampling of label=0 and label=1\n",
|
|
"val_df_full = df[df.subjectID.isin(val_ids)].copy()\n",
|
|
"\n",
|
|
"# Get all label=0 samples\n",
|
|
"val_df_label0 = val_df_full[val_df_full['label'] == 0]\n",
|
|
"\n",
|
|
"# Sample same number from label=1\n",
|
|
"n_samples = len(val_df_label0)\n",
|
|
"val_df_label1 = val_df_full[val_df_full['label'] == 1].sample(\n",
|
|
" n=n_samples, random_state=42\n",
|
|
")\n",
|
|
"\n",
|
|
"# Combine\n",
|
|
"val_df = pd.concat([val_df_label0, val_df_label1], ignore_index=True)\n",
|
|
"test_df = df[df.subjectID.isin(test_ids)]\n",
|
|
"print(train_df.shape, val_df.shape,test_df.shape)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "e8375760",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"val_df['label'].value_counts()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "f0570a3c",
|
|
"metadata": {},
|
|
"source": [
|
|
"Normalization"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "acec4a03",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"def fit_normalizer(train_data, au_columns, method='standard', scope='global'):\n",
|
|
" \"\"\"\n",
|
|
" Fit normalization scalers on training data.\n",
|
|
" \n",
|
|
" Parameters:\n",
|
|
" -----------\n",
|
|
" train_data : pd.DataFrame\n",
|
|
" Training dataframe with AU columns and subjectID\n",
|
|
" au_columns : list\n",
|
|
" List of AU column names to normalize\n",
|
|
" method : str, default='standard'\n",
|
|
" Normalization method: 'standard' for StandardScaler or 'minmax' for MinMaxScaler\n",
|
|
" scope : str, default='global'\n",
|
|
" Normalization scope: 'subject' for per-subject or 'global' for across all subjects\n",
|
|
" \n",
|
|
" Returns:\n",
|
|
" --------\n",
|
|
" dict\n",
|
|
" Dictionary containing fitted scalers and statistics for new subjects\n",
|
|
" \"\"\"\n",
|
|
" if method == 'standard':\n",
|
|
" Scaler = StandardScaler\n",
|
|
" elif method == 'minmax':\n",
|
|
" Scaler = MinMaxScaler\n",
|
|
" else:\n",
|
|
" raise ValueError(\"method must be 'standard' or 'minmax'\")\n",
|
|
" \n",
|
|
" scalers = {}\n",
|
|
" if scope == 'subject':\n",
|
|
" # Fit one scaler per subject\n",
|
|
" subject_stats = []\n",
|
|
" \n",
|
|
" for subject in train_data['subjectID'].unique():\n",
|
|
" subject_mask = train_data['subjectID'] == subject\n",
|
|
" scaler = Scaler()\n",
|
|
" scaler.fit(train_data.loc[subject_mask, au_columns].values)\n",
|
|
" scalers[subject] = scaler\n",
|
|
" \n",
|
|
" # Store statistics for averaging\n",
|
|
" if method == 'standard':\n",
|
|
" subject_stats.append({\n",
|
|
" 'mean': scaler.mean_,\n",
|
|
" 'std': scaler.scale_\n",
|
|
" })\n",
|
|
" elif method == 'minmax':\n",
|
|
" subject_stats.append({\n",
|
|
" 'min': scaler.data_min_,\n",
|
|
" 'max': scaler.data_max_\n",
|
|
" })\n",
|
|
" \n",
|
|
" # Calculate average statistics for new subjects\n",
|
|
" if method == 'standard':\n",
|
|
" avg_mean = np.mean([s['mean'] for s in subject_stats], axis=0)\n",
|
|
" avg_std = np.mean([s['std'] for s in subject_stats], axis=0)\n",
|
|
" fallback_scaler = StandardScaler()\n",
|
|
" fallback_scaler.mean_ = avg_mean\n",
|
|
" fallback_scaler.scale_ = avg_std\n",
|
|
" fallback_scaler.var_ = avg_std ** 2\n",
|
|
" fallback_scaler.n_features_in_ = len(au_columns)\n",
|
|
" elif method == 'minmax':\n",
|
|
" avg_min = np.mean([s['min'] for s in subject_stats], axis=0)\n",
|
|
" avg_max = np.mean([s['max'] for s in subject_stats], axis=0)\n",
|
|
" fallback_scaler = MinMaxScaler()\n",
|
|
" fallback_scaler.data_min_ = avg_min\n",
|
|
" fallback_scaler.data_max_ = avg_max\n",
|
|
" fallback_scaler.data_range_ = avg_max - avg_min\n",
|
|
" fallback_scaler.scale_ = 1.0 / fallback_scaler.data_range_\n",
|
|
" fallback_scaler.min_ = -avg_min * fallback_scaler.scale_\n",
|
|
" fallback_scaler.n_features_in_ = len(au_columns)\n",
|
|
" \n",
|
|
" scalers['_fallback'] = fallback_scaler\n",
|
|
" \n",
|
|
" elif scope == 'global':\n",
|
|
" # Fit one scaler for all subjects\n",
|
|
" scaler = Scaler()\n",
|
|
" scaler.fit(train_data[au_columns].values)\n",
|
|
" scalers['global'] = scaler\n",
|
|
" \n",
|
|
" else:\n",
|
|
" raise ValueError(\"scope must be 'subject' or 'global'\")\n",
|
|
" \n",
|
|
" return {'scalers': scalers, 'method': method, 'scope': scope}\n",
|
|
"\n",
|
|
"def apply_normalizer(data, columns, normalizer_dict):\n",
|
|
" \"\"\"\n",
|
|
" Apply fitted normalization scalers to data.\n",
|
|
" \n",
|
|
" Parameters:\n",
|
|
" -----------\n",
|
|
" data : pd.DataFrame\n",
|
|
" Dataframe with AU columns and subjectID\n",
|
|
" au_columns : list\n",
|
|
" List of AU column names to normalize\n",
|
|
" normalizer_dict : dict\n",
|
|
" Dictionary containing fitted scalers from fit_normalizer()\n",
|
|
" \n",
|
|
" Returns:\n",
|
|
" --------\n",
|
|
" pd.DataFrame\n",
|
|
" DataFrame with normalized AU columns\n",
|
|
" \"\"\"\n",
|
|
" normalized_data = data.copy()\n",
|
|
" scalers = normalizer_dict['scalers']\n",
|
|
" scope = normalizer_dict['scope']\n",
|
|
" normalized_data[columns] = normalized_data[columns].astype(np.float64)\n",
|
|
"\n",
|
|
" if scope == 'subject':\n",
|
|
" # Apply per-subject normalization\n",
|
|
" for subject in data['subjectID'].unique():\n",
|
|
" subject_mask = data['subjectID'] == subject\n",
|
|
" \n",
|
|
" # Use the subject's scaler if available, otherwise use fallback\n",
|
|
" if subject in scalers:\n",
|
|
" scaler = scalers[subject]\n",
|
|
" else:\n",
|
|
" # Use averaged scaler for new subjects\n",
|
|
" scaler = scalers['_fallback']\n",
|
|
" print(f\"Info: Subject {subject} not in training data. Using averaged scaler from training subjects.\")\n",
|
|
" \n",
|
|
" normalized_data.loc[subject_mask, columns] = scaler.transform(\n",
|
|
" data.loc[subject_mask, columns].values\n",
|
|
" )\n",
|
|
" \n",
|
|
" elif scope == 'global':\n",
|
|
" # Apply global normalization\n",
|
|
" scaler = scalers['global']\n",
|
|
" normalized_data[columns] = scaler.transform(data[columns].values)\n",
|
|
" \n",
|
|
" return normalized_data\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "53c6ee6f",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"def save_normalizer(normalizer_dict, filepath):\n",
|
|
" \"\"\"\n",
|
|
" Save fitted normalizer to disk.\n",
|
|
"\n",
|
|
" Parameters:\n",
|
|
" -----------\n",
|
|
" normalizer_dict : dict\n",
|
|
" Dictionary containing fitted scalers from fit_normalizer()\n",
|
|
" filepath : str\n",
|
|
" Path to save the normalizer (e.g., 'normalizer.pkl')\n",
|
|
" \"\"\"\n",
|
|
" # Create directory if it does not exist\n",
|
|
" dirpath = os.path.dirname(filepath)\n",
|
|
" if dirpath:\n",
|
|
" os.makedirs(dirpath, exist_ok=True)\n",
|
|
"\n",
|
|
" with open(filepath, 'wb') as f:\n",
|
|
" pickle.dump(normalizer_dict, f)\n",
|
|
"\n",
|
|
" print(f\"Normalizer saved to {filepath}\")\n",
|
|
"\n",
|
|
"def load_normalizer(filepath):\n",
|
|
" \"\"\"\n",
|
|
" Load fitted normalizer from disk.\n",
|
|
" \n",
|
|
" Parameters:\n",
|
|
" -----------\n",
|
|
" filepath : str\n",
|
|
" Path to the saved normalizer file\n",
|
|
" \n",
|
|
" Returns:\n",
|
|
" --------\n",
|
|
" dict\n",
|
|
" Dictionary containing fitted scalers\n",
|
|
" \"\"\"\n",
|
|
" with open(filepath, 'rb') as f:\n",
|
|
" normalizer_dict = pickle.load(f)\n",
|
|
" print(f\"Normalizer loaded from {filepath}\")\n",
|
|
" return normalizer_dict"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "7280f64f",
|
|
"metadata": {},
|
|
"source": [
|
|
"save Normalizer"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "8420afc2",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"normalizer_path=Path('data-paulusjafahrsimulator-gpu/saved_models/deepsvdd_save/normalizer.pkl')"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "cdd2ba73",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"face_au_cols = [c for c in train_df.columns if c.startswith(\"FACE_AU\")]\n",
|
|
"eye_cols = ['Fix_count_short_66_150', 'Fix_count_medium_300_500',\n",
|
|
" 'Fix_count_long_gt_1000', 'Fix_count_100', 'Fix_mean_duration',\n",
|
|
" 'Fix_median_duration', 'Sac_count', 'Sac_mean_amp', 'Sac_mean_dur',\n",
|
|
" 'Sac_median_dur', 'Blink_count', 'Blink_mean_dur', 'Blink_median_dur',\n",
|
|
" 'Pupil_mean', 'Pupil_IPA']\n",
|
|
"print(len(eye_cols))\n",
|
|
"all_signal_columns = face_au_cols+eye_cols\n",
|
|
"print(len(all_signal_columns))\n",
|
|
"normalizer = fit_normalizer(train_df, all_signal_columns, method='standard', scope='subject')\n",
|
|
"save_normalizer(normalizer, normalizer_path )"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "76afc4d3",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"normalizer = load_normalizer(normalizer_path)\n",
|
|
"# 3. Apply normalization to all sets\n",
|
|
"train_df_norm = apply_normalizer(train_df, all_signal_columns, normalizer)\n",
|
|
"val_df_norm = apply_normalizer(val_df, all_signal_columns, normalizer)\n",
|
|
"test_df_norm = apply_normalizer(test_df, all_signal_columns, normalizer)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "77deead9",
|
|
"metadata": {},
|
|
"source": [
|
|
"Outlier removal (later)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "fd139799",
|
|
"metadata": {},
|
|
"source": [
|
|
"Change of dtypes for keras pandas"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "8587343e",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"X_face = train_df_norm[face_au_cols].to_numpy(dtype=np.float32)\n",
|
|
"X_eye = train_df_norm[eye_cols].to_numpy(dtype=np.float32)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "b736bc58",
|
|
"metadata": {},
|
|
"source": [
|
|
"### Autoencoder Pre-Training"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "aa11faf3",
|
|
"metadata": {},
|
|
"source": [
|
|
"Vor-Training der Gewichte mit Autoencoder, Loss: MSE"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "3eab9d94",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"def build_intermediate_fusion_autoencoder(\n",
|
|
" input_dim_mod1=15,\n",
|
|
" input_dim_mod2=20,\n",
|
|
" encoder_hidden_dim_mod1=12, # individuell\n",
|
|
" encoder_hidden_dim_mod2=20, # individuell\n",
|
|
" latent_dim=6, # Änderung: Bottleneck vergrößert für stabilere Repräsentation\n",
|
|
" dropout_rate=0.4, # Dropout in Hidden Layers\n",
|
|
" neg_slope=0.1,\n",
|
|
" weight_decay=1e-4,\n",
|
|
" decoder_hidden_dims=[16, 32] # Änderung: Decoder größer für bessere Rekonstruktion\n",
|
|
"):\n",
|
|
" \"\"\"\n",
|
|
" Verbesserter Intermediate-Fusion Autoencoder für Deep SVDD.\n",
|
|
" Änderungen:\n",
|
|
" - Bottleneck vergrößert (latent_dim)\n",
|
|
" - Dropout nur in Hidden Layers, nicht im Bottleneck\n",
|
|
" - Decoder größer für stabileres Pretraining\n",
|
|
" - Parametrisierbare Hidden-Dimensions für Encoder\n",
|
|
" \"\"\"\n",
|
|
"\n",
|
|
" l2 = regularizers.l2(weight_decay)\n",
|
|
" act = layers.LeakyReLU(negative_slope=neg_slope)\n",
|
|
"\n",
|
|
" # -------- Inputs --------\n",
|
|
" x1_in = layers.Input(shape=(input_dim_mod1,), name=\"modality_1\")\n",
|
|
" x2_in = layers.Input(shape=(input_dim_mod2,), name=\"modality_2\")\n",
|
|
"\n",
|
|
" # -------- Encoder 1 --------\n",
|
|
" e1 = layers.Dense(\n",
|
|
" encoder_hidden_dim_mod1,\n",
|
|
" use_bias=False,\n",
|
|
" kernel_regularizer=l2\n",
|
|
" )(x1_in)\n",
|
|
" e1 = act(e1)\n",
|
|
" e1 = layers.Dropout(dropout_rate)(e1) # Dropout nur hier\n",
|
|
"\n",
|
|
" e1 = layers.Dense(\n",
|
|
" 16, # Änderung: Hidden Layer größer für stabilere Fusion\n",
|
|
" use_bias=False,\n",
|
|
" kernel_regularizer=l2\n",
|
|
" )(e1)\n",
|
|
" e1 = act(e1)\n",
|
|
"\n",
|
|
" # -------- Encoder 2 --------\n",
|
|
" e2 = layers.Dense(\n",
|
|
" encoder_hidden_dim_mod2,\n",
|
|
" use_bias=False,\n",
|
|
" kernel_regularizer=l2\n",
|
|
" )(x2_in)\n",
|
|
" e2 = act(e2)\n",
|
|
" e2 = layers.Dropout(dropout_rate)(e2) # Dropout nur hier\n",
|
|
"\n",
|
|
" e2 = layers.Dense(\n",
|
|
" 16, # Änderung: Hidden Layer größer\n",
|
|
" use_bias=False,\n",
|
|
" kernel_regularizer=l2\n",
|
|
" )(e2)\n",
|
|
" e2 = act(e2)\n",
|
|
"\n",
|
|
" # -------- Intermediate Fusion --------\n",
|
|
" fused = layers.Concatenate(name=\"fusion\")([e1, e2]) # 16+16=32 Dimensionen\n",
|
|
"\n",
|
|
" # -------- Joint Encoder / Bottleneck --------\n",
|
|
" # sinnvoll kleiner als Fusion\n",
|
|
" h = layers.Dense(\n",
|
|
" latent_dim,\n",
|
|
" use_bias=False,\n",
|
|
" kernel_regularizer=l2\n",
|
|
" )(fused)\n",
|
|
" h = act(h)\n",
|
|
" h = layers.Dropout(dropout_rate)(h)\n",
|
|
"\n",
|
|
" z = layers.Dense(\n",
|
|
" latent_dim,\n",
|
|
" activation=None, # linear, für Deep SVDD\n",
|
|
" use_bias=False,\n",
|
|
" kernel_regularizer=l2,\n",
|
|
" name=\"latent\"\n",
|
|
" )(h)\n",
|
|
" # Dropout entfernt direkt vor Bottleneck\n",
|
|
"\n",
|
|
" # -------- Decoder --------\n",
|
|
" d = layers.Dense(\n",
|
|
" decoder_hidden_dims[0], # größerer Decoder\n",
|
|
" use_bias=False,\n",
|
|
" kernel_regularizer=l2\n",
|
|
" )(z)\n",
|
|
" d = act(d)\n",
|
|
"\n",
|
|
" d = layers.Dense(\n",
|
|
" decoder_hidden_dims[1],\n",
|
|
" use_bias=False,\n",
|
|
" kernel_regularizer=l2\n",
|
|
" )(d)\n",
|
|
" d = act(d)\n",
|
|
"\n",
|
|
" x1_out = layers.Dense(\n",
|
|
" input_dim_mod1,\n",
|
|
" activation=None,\n",
|
|
" use_bias=False,\n",
|
|
" name=\"recon_modality_1\"\n",
|
|
" )(d)\n",
|
|
"\n",
|
|
" x2_out = layers.Dense(\n",
|
|
" input_dim_mod2,\n",
|
|
" activation=None,\n",
|
|
" use_bias=False,\n",
|
|
" name=\"recon_modality_2\"\n",
|
|
" )(d)\n",
|
|
"\n",
|
|
" model = models.Model(\n",
|
|
" inputs=[x1_in, x2_in],\n",
|
|
" outputs=[x1_out, x2_out],\n",
|
|
" name=\"IntermediateFusionAE_Improved\"\n",
|
|
" )\n",
|
|
"\n",
|
|
" return model\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "80cb8eb0",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"model = build_intermediate_fusion_autoencoder(\n",
|
|
" input_dim_mod1=len(face_au_cols),\n",
|
|
" input_dim_mod2=len(eye_cols),\n",
|
|
" encoder_hidden_dim_mod1=15, # individuell\n",
|
|
" encoder_hidden_dim_mod2=10, # individuell\n",
|
|
" latent_dim=8,\n",
|
|
" dropout_rate=0.3, # einstellbar\n",
|
|
" neg_slope=0.1,\n",
|
|
" weight_decay=1e-3\n",
|
|
")\n",
|
|
"\n",
|
|
"model.compile(\n",
|
|
" loss={\n",
|
|
" \"recon_modality_1\": \"mse\",\n",
|
|
" \"recon_modality_2\": \"mse\",\n",
|
|
" },\n",
|
|
" loss_weights={\n",
|
|
" \"recon_modality_1\": 1.0,\n",
|
|
" \"recon_modality_2\": 1.0,\n",
|
|
" },\n",
|
|
" optimizer=tf.keras.optimizers.Adam(1e-2)\n",
|
|
" \n",
|
|
")\n",
|
|
"\n",
|
|
"batch_size_ae=64\n",
|
|
"# model.summary()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "95d36a07",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"model.fit(\n",
|
|
" x=[X_face, X_eye],\n",
|
|
" y=[X_face, X_eye],\n",
|
|
" batch_size=batch_size_ae,\n",
|
|
" epochs=150,\n",
|
|
" shuffle=True\n",
|
|
")\n",
|
|
"model.compile(\n",
|
|
" loss={\n",
|
|
" \"recon_modality_1\": \"mse\",\n",
|
|
" \"recon_modality_2\": \"mse\",\n",
|
|
" },\n",
|
|
" loss_weights={\n",
|
|
" \"recon_modality_1\": 1.0,\n",
|
|
" \"recon_modality_2\": 1.0,\n",
|
|
" },\n",
|
|
" optimizer=tf.keras.optimizers.Adam(1e-5),\n",
|
|
")\n",
|
|
"model.fit(\n",
|
|
" x=[X_face, X_eye],\n",
|
|
" y=[X_face, X_eye],\n",
|
|
" batch_size=batch_size_ae,\n",
|
|
" epochs=100,\n",
|
|
" shuffle=True\n",
|
|
")\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "9ccfbc71",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"encoder = tf.keras.Model(\n",
|
|
" inputs=model.inputs,\n",
|
|
" outputs=model.get_layer(\"latent\").output,\n",
|
|
" name=\"SVDD_Encoder\"\n",
|
|
")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "e4e1b5ff",
|
|
"metadata": {},
|
|
"source": [
|
|
"Speichern"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "7e591264",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"encoder_save_path =Path('data-paulusjafahrsimulator-gpu/saved_models/deepsvdd_save/encoder_6_deep.keras')\n",
|
|
"encoder.save(encoder_save_path)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "372dc754",
|
|
"metadata": {},
|
|
"source": [
|
|
"Laden Encoder / Deepsvdd"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "83199fc6",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"encoder_load_path = encoder_save_path\n",
|
|
"encoder = tf.keras.models.load_model(encoder_load_path)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "92046112",
|
|
"metadata": {},
|
|
"source": [
|
|
"Check, if encoder works"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "db2fa21c",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"ans= encoder.predict([X_face, X_eye])\n",
|
|
"print(ans[:6,:])"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "d7bcc35d",
|
|
"metadata": {},
|
|
"source": [
|
|
"### Deep SVDD Training"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "806a2479",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"encoder_load_path = encoder_save_path\n",
|
|
"deep_svdd_net = tf.keras.models.load_model(encoder_load_path) "
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "54083759",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"def get_center(model, dataset):\n",
|
|
" center = model.predict(dataset).mean(axis=0)\n",
|
|
"\n",
|
|
" eps = 0.1\n",
|
|
" center[(abs(center) < eps) & (center < 0)] = -eps\n",
|
|
" center[(abs(center) < eps) & (center >= 0)] = eps\n",
|
|
"\n",
|
|
" return center\n",
|
|
"def dist_per_sample(output, center):\n",
|
|
" return tf.reduce_sum(tf.square(output - center), axis=-1)\n",
|
|
"\n",
|
|
"def score_per_sample(output, center, radius):\n",
|
|
" return dist_per_sample(output, center) - radius**2\n",
|
|
"\n",
|
|
"def train_loss(output, center):\n",
|
|
" return tf.reduce_mean(dist_per_sample(output, center))"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "fd6f47c0",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"center = get_center(deep_svdd_net, [X_face, X_eye])"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "da140072",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# def get_radius(nu, dataset):\n",
|
|
"# x_face, x_eye = dataset # <-- zwingend entpacken\n",
|
|
"\n",
|
|
"# dataset_tuple=[x_face, x_eye]\n",
|
|
"\n",
|
|
"# dists = dist_per_sample(deep_svdd_net.predict(dataset_tuple), center)\n",
|
|
"# return np.quantile(np.sqrt(dists), 1-nu).astype(np.float32)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "b47b52f6",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"def get_radius_from_arrays(nu, X_face, X_eye):\n",
|
|
" z = deep_svdd_net.predict([X_face, X_eye])\n",
|
|
" dists = dist_per_sample(z, center)\n",
|
|
" return np.quantile(np.sqrt(dists), 1 - nu).astype(np.float32)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "b062bd19",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"@tf.function\n",
|
|
"def train_step(batch):\n",
|
|
" with tf.GradientTape() as grad_tape:\n",
|
|
" output = deep_svdd_net(batch, training=True)\n",
|
|
" batch_loss = train_loss(output, center)\n",
|
|
"\n",
|
|
" gradients = grad_tape.gradient(batch_loss, deep_svdd_net.trainable_variables)\n",
|
|
" optimizer.apply_gradients(zip(gradients, deep_svdd_net.trainable_variables))\n",
|
|
"\n",
|
|
" return batch_loss"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "4c144130",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"def train(dataset, epochs, nu):\n",
|
|
" for epoch in range(epochs):\n",
|
|
" start = time.time()\n",
|
|
" losses = []\n",
|
|
" for batch in dataset:\n",
|
|
" batch_loss = train_step(batch)\n",
|
|
" losses.append(batch_loss)\n",
|
|
"\n",
|
|
" print(f'{epoch+1}/{epochs} epoch: Loss of {np.mean(losses)} ({time.time()-start} secs)')\n",
|
|
"\n",
|
|
" return get_radius_from_arrays(nu, X_face, X_eye)\n",
|
|
"\n",
|
|
"\n",
|
|
"nu = 0.05\n",
|
|
"\n",
|
|
"train_dataset = tf.data.Dataset.from_tensor_slices((X_face, X_eye)).shuffle(64).batch(64)\n",
|
|
"# train_dataset = tf.data.Dataset.from_tensor_slices((X_face, X_eye))\n",
|
|
"\n",
|
|
"optimizer = tf.keras.optimizers.Adam(1e-3)\n",
|
|
"train(train_dataset, epochs=150, nu=nu)\n",
|
|
"\n",
|
|
"optimizer.learning_rate = 1e-4\n",
|
|
"radius = train(train_dataset, 100, nu=nu)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "24f0cef0",
|
|
"metadata": {},
|
|
"source": [
|
|
"prepare valid & test set"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "acb9c8f1",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Test set\n",
|
|
"X_face_test = test_df_norm[face_au_cols].to_numpy(dtype=np.float32)\n",
|
|
"X_eye_test = test_df_norm[eye_cols].to_numpy(dtype=np.float32)\n",
|
|
"y_test = test_df_norm[\"label\"].to_numpy(dtype=np.float32)\n",
|
|
"\n",
|
|
"# Validation set\n",
|
|
"X_face_val = val_df_norm[face_au_cols].to_numpy(dtype=np.float32)\n",
|
|
"X_eye_val = val_df_norm[eye_cols].to_numpy(dtype=np.float32)\n",
|
|
"y_val = val_df_norm[\"label\"].to_numpy(dtype=np.float32)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "49737d5d",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"valid_scores = (score_per_sample(deep_svdd_net.predict([X_face_val, X_eye_val]), center, radius)).numpy()\n",
|
|
"\n",
|
|
"valid_fpr, valid_tpr, _ = roc_curve(y_val, valid_scores, pos_label=1)\n",
|
|
"valid_auc = auc(valid_fpr, valid_tpr)\n",
|
|
"\n",
|
|
"plt.figure()\n",
|
|
"plt.title('Deep SVDD')\n",
|
|
"plt.plot(valid_fpr, valid_tpr, 'b-')\n",
|
|
"plt.text(0.5, 0.5, f'AUC: {valid_auc:.4f}')\n",
|
|
"plt.xlabel('False positive rate')\n",
|
|
"plt.ylabel('True positive rate')\n",
|
|
"plt.show()\n",
|
|
"\n",
|
|
"valid_predictions = (valid_scores > 0).astype(int)\n",
|
|
"\n",
|
|
"normal_acc = np.mean(valid_predictions[y_val == 0] == 0)\n",
|
|
"anomaly_acc = np.mean(valid_predictions[y_val == 1] == 1)\n",
|
|
"print(f'Accuracy on Validation set: {accuracy_score(y_val, valid_predictions)}')\n",
|
|
"print(f'Accuracy for normals: {normal_acc:.4f}')\n",
|
|
"print(f'Accuracy for anomalies: {anomaly_acc:.4f}')\n",
|
|
"print(f'F1 on Validation set: {f1_score(y_val, valid_predictions)}')"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "475381db",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"deep_svdd_save_path =Path('data-paulusjafahrsimulator-gpu/saved_models/deepsvdd_save/deep_svdd_05.keras')\n",
|
|
"deep_svdd_net.save(deep_svdd_save_path)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "6ede1b15",
|
|
"metadata": {},
|
|
"source": [
|
|
"### Results"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "c8481d07",
|
|
"metadata": {},
|
|
"source": [
|
|
"Validation set"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "719b41b2",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"valid_predictions = (valid_scores > 0).astype(int)\n",
|
|
"evaluation_tools.plot_confusion_matrix(true_labels=y_val, predictions=valid_predictions, label_names=[\"low\",\"high\"])\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "f33230b1",
|
|
"metadata": {},
|
|
"source": [
|
|
"Test set"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "f1189a28",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"test_scores = (\n",
|
|
" score_per_sample(\n",
|
|
" deep_svdd_net.predict([X_face_test, X_eye_test]),\n",
|
|
" center,\n",
|
|
" radius\n",
|
|
" )\n",
|
|
").numpy()\n",
|
|
"\n",
|
|
"test_predictions = (test_scores > 0).astype(int)\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "5acade06",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"evaluation_tools.plot_confusion_matrix(true_labels=y_test, predictions=test_predictions, label_names=[\"low\",\"high\"])\n"
|
|
]
|
|
}
|
|
],
|
|
"metadata": {
|
|
"kernelspec": {
|
|
"display_name": "Python 3 (ipykernel)",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.12.10"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 5
|
|
}
|