{ "cells": [ { "cell_type": "markdown", "id": "47f6de7b", "metadata": {}, "source": [ "Bibliotheken importieren" ] }, { "cell_type": "code", "execution_count": 1, "id": "99294260", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "2026-02-18 15:51:45.686247: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:485] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n", "2026-02-18 15:51:45.789201: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:8454] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n", "2026-02-18 15:51:45.819528: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1452] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n", "2026-02-18 15:51:46.036932: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n", "To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n", "2026-02-18 15:51:47.239360: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n" ] } ], "source": [ "import pandas as pd \n", "import numpy as np \n", "import matplotlib.pyplot as plt\n", "import random \n", "import joblib \n", "from pathlib import Path \n", "\n", "\n", "from sklearn.model_selection import GroupKFold \n", "from sklearn.preprocessing import StandardScaler \n", "\n", "import tensorflow as tf \n", "from tensorflow.keras import Input, layers, models, regularizers" ] }, { "cell_type": "markdown", "id": "52b4ca8c", "metadata": {}, "source": [ "Seed festlegen" ] }, { "cell_type": "code", "execution_count": 2, "id": "6e49d281", "metadata": {}, "outputs": [], "source": [ "SEED = 42 \n", "np.random.seed(SEED) \n", "tf.random.set_seed(SEED) \n", "random.seed(SEED)" ] }, { "cell_type": "markdown", "id": "ae1a715f", "metadata": {}, "source": [ "Daten laden" ] }, { "cell_type": "code", "execution_count": 7, "id": "870f01c3", "metadata": {}, "outputs": [], "source": [ "data_path = Path(r\"~/data-paulusjafahrsimulator-gpu/new_datasets/50s_25Hz_dataset.parquet\") \n", "\n", "data = pd.read_parquet(path=data_path)" ] }, { "cell_type": "markdown", "id": "bedbc23b", "metadata": {}, "source": [ "Labels erstellen" ] }, { "cell_type": "code", "execution_count": 8, "id": "38848515", "metadata": {}, "outputs": [], "source": [ "low_all = data[((data[\"PHASE\"] == \"baseline\") | \n", " ((data[\"STUDY\"] == \"n-back\") & (data[\"PHASE\"] != \"baseline\") & (data[\"LEVEL\"].isin([1,4]))))].copy() \n", "\n", "high_all = pd.concat([ \n", " data[(data[\"STUDY\"]==\"n-back\") & (data[\"LEVEL\"].isin([2,3,5,6])) & (data[\"PHASE\"].isin([\"train\",\"test\"]))], \n", " data[(data[\"STUDY\"]==\"k-drive\") & (data[\"PHASE\"]!=\"baseline\")] \n", "]).copy() \n", "\n", "low_all[\"label\"] = 0 \n", "high_all[\"label\"] = 1 \n", "data = pd.concat([low_all, high_all], ignore_index=True).drop_duplicates() " ] }, { "cell_type": "markdown", "id": "0b282acf", "metadata": {}, "source": [ "Features und Labels" ] }, { "cell_type": "code", "execution_count": null, "id": "5edb00a0", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "['subjectID', 'start_time', 'STUDY', 'LEVEL', 'PHASE', 'FACE_AU01_mean', 'FACE_AU02_mean', 'FACE_AU04_mean', 'FACE_AU05_mean', 'FACE_AU06_mean', 'FACE_AU07_mean', 'FACE_AU09_mean', 'FACE_AU10_mean', 'FACE_AU11_mean', 'FACE_AU12_mean', 'FACE_AU14_mean', 'FACE_AU15_mean', 'FACE_AU17_mean', 'FACE_AU20_mean', 'FACE_AU23_mean', 'FACE_AU24_mean', 'FACE_AU25_mean', 'FACE_AU26_mean', 'FACE_AU28_mean', 'FACE_AU43_mean', 'Fix_count_short_66_150', 'Fix_count_medium_300_500', 'Fix_count_long_gt_1000', 'Fix_count_100', 'Fix_mean_duration', 'Fix_median_duration', 'Sac_count', 'Sac_mean_amp', 'Sac_mean_dur', 'Sac_median_dur', 'Blink_count', 'Blink_mean_dur', 'Blink_median_dur', 'Pupil_mean', 'Pupil_IPA', 'label']\n", "Gefundene FACE_AU-Spalten: ['FACE_AU01_mean', 'FACE_AU02_mean', 'FACE_AU04_mean', 'FACE_AU05_mean', 'FACE_AU06_mean', 'FACE_AU07_mean', 'FACE_AU09_mean', 'FACE_AU10_mean', 'FACE_AU11_mean', 'FACE_AU12_mean', 'FACE_AU14_mean', 'FACE_AU15_mean', 'FACE_AU17_mean', 'FACE_AU20_mean', 'FACE_AU23_mean', 'FACE_AU24_mean', 'FACE_AU25_mean', 'FACE_AU26_mean', 'FACE_AU28_mean', 'FACE_AU43_mean']\n" ] } ], "source": [ "#Face AUs\n", "au_columns = [col for col in data.columns if \"face\" in col.lower()] \n", "\n", "#Eye Features\n", "eye_columns = [ \n", " 'Fix_count_short_66_150', \n", " 'Fix_count_medium_300_500', \n", " 'Fix_count_long_gt_1000', \n", " 'Fix_count_100', \n", " 'Fix_mean_duration', \n", " 'Fix_median_duration', \n", " 'Sac_count', \n", " 'Sac_mean_amp', \n", " 'Sac_mean_dur', \n", " 'Sac_median_dur', \n", " 'Blink_count', \n", " 'Blink_mean_dur', \n", " 'Blink_median_dur', \n", " 'Pupil_mean', \n", " 'Pupil_IPA' \n", "]\n", "X = data[au_columns].values[..., np.newaxis] \n", "y = data[\"label\"].values \n", "groups = data[\"subjectID\"].values\n", "print(data.columns.tolist())\n", "\n", "print(\"Gefundene FACE_AU-Spalten:\", au_columns)" ] }, { "cell_type": "markdown", "id": "a539b83b", "metadata": {}, "source": [ "CNN-Modell" ] }, { "cell_type": "code", "execution_count": 33, "id": "e4a7f496", "metadata": {}, "outputs": [], "source": [ "def build_model(input_shape, lr=1e-4): \n", " model = models.Sequential([ \n", " Input(shape=input_shape), \n", " layers.Conv1D(32, kernel_size=3, activation=\"relu\", kernel_regularizer=regularizers.l2(0.001)), \n", " layers.BatchNormalization(), \n", " layers.MaxPooling1D(pool_size=2),\n", "\n", " layers.Conv1D(64, kernel_size=3, activation=\"relu\", kernel_regularizer=regularizers.l2(0.001)), \n", " layers.BatchNormalization(), \n", " layers.GlobalAveragePooling1D(), \n", " \n", " layers.Dense(32, activation=\"relu\", kernel_regularizer=regularizers.l2(0.001)), \n", " layers.Dropout(0.5), \n", " layers.Dense(1, activation=\"sigmoid\") \n", " ]) \n", " \n", " model.compile( \n", " optimizer=tf.keras.optimizers.Adam(learning_rate=lr), \n", " loss=\"binary_crossentropy\", \n", " metrics=[\"accuracy\", tf.keras.metrics.AUC(name=\"auc\")] \n", " ) \n", " return model" ] }, { "cell_type": "markdown", "id": "5905871b", "metadata": {}, "source": [ "Cross-Validation" ] }, { "cell_type": "code", "execution_count": null, "id": "90658000", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "--- Fold 1 ---\n", "Train-Subjects: [ 0 3 4 7 9 12 13 14 16 18 20 21 22 26 27 28]\n", "Val-Subjects: [ 2 5 8 17]\n", "Train Mittelwerte (erste 5 Features): [[-9.48333057e-17]\n", " [ 3.78136535e-17]\n", " [-3.63102089e-16]\n", " [-4.05458101e-16]\n", " [-7.46497309e-16]]\n", "Train Std (erste 5 Features): [[1.]\n", " [1.]\n", " [1.]\n", " [1.]\n", " [1.]]\n", "Val Mittelwerte (erste 5 Features): [[ 0.01837255]\n", " [ 0.03634784]\n", " [-0.01417256]\n", " [-0.00136129]\n", " [ 0.01066093]]\n", "Val Std (erste 5 Features): [[0.9809553 ]\n", " [0.90597105]\n", " [1.0661958 ]\n", " [0.97687653]\n", " [1.06061798]]\n" ] }, { "data": { "text/html": [ "
Model: \"sequential_7\"\n",
"\n"
],
"text/plain": [
"\u001b[1mModel: \"sequential_7\"\u001b[0m\n"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓\n",
"┃ Layer (type) ┃ Output Shape ┃ Param # ┃\n",
"┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩\n",
"│ conv1d_14 (Conv1D) │ (None, 18, 32) │ 128 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ batch_normalization_14 │ (None, 18, 32) │ 128 │\n",
"│ (BatchNormalization) │ │ │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ max_pooling1d_7 (MaxPooling1D) │ (None, 9, 32) │ 0 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ conv1d_15 (Conv1D) │ (None, 7, 64) │ 6,208 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ batch_normalization_15 │ (None, 7, 64) │ 256 │\n",
"│ (BatchNormalization) │ │ │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ global_average_pooling1d_7 │ (None, 64) │ 0 │\n",
"│ (GlobalAveragePooling1D) │ │ │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense_14 (Dense) │ (None, 32) │ 2,080 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dropout_7 (Dropout) │ (None, 32) │ 0 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense_15 (Dense) │ (None, 1) │ 33 │\n",
"└─────────────────────────────────┴────────────────────────┴───────────────┘\n",
"\n"
],
"text/plain": [
"┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓\n",
"┃\u001b[1m \u001b[0m\u001b[1mLayer (type) \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1mOutput Shape \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1m Param #\u001b[0m\u001b[1m \u001b[0m┃\n",
"┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩\n",
"│ conv1d_14 (\u001b[38;5;33mConv1D\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m18\u001b[0m, \u001b[38;5;34m32\u001b[0m) │ \u001b[38;5;34m128\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ batch_normalization_14 │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m18\u001b[0m, \u001b[38;5;34m32\u001b[0m) │ \u001b[38;5;34m128\u001b[0m │\n",
"│ (\u001b[38;5;33mBatchNormalization\u001b[0m) │ │ │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ max_pooling1d_7 (\u001b[38;5;33mMaxPooling1D\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m9\u001b[0m, \u001b[38;5;34m32\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ conv1d_15 (\u001b[38;5;33mConv1D\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m7\u001b[0m, \u001b[38;5;34m64\u001b[0m) │ \u001b[38;5;34m6,208\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ batch_normalization_15 │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m7\u001b[0m, \u001b[38;5;34m64\u001b[0m) │ \u001b[38;5;34m256\u001b[0m │\n",
"│ (\u001b[38;5;33mBatchNormalization\u001b[0m) │ │ │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ global_average_pooling1d_7 │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m64\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │\n",
"│ (\u001b[38;5;33mGlobalAveragePooling1D\u001b[0m) │ │ │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense_14 (\u001b[38;5;33mDense\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m32\u001b[0m) │ \u001b[38;5;34m2,080\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dropout_7 (\u001b[38;5;33mDropout\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m32\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense_15 (\u001b[38;5;33mDense\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m1\u001b[0m) │ \u001b[38;5;34m33\u001b[0m │\n",
"└─────────────────────────────────┴────────────────────────┴───────────────┘\n"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"Total params: 8,833 (34.50 KB)\n", "\n" ], "text/plain": [ "\u001b[1m Total params: \u001b[0m\u001b[38;5;34m8,833\u001b[0m (34.50 KB)\n" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "
Trainable params: 8,641 (33.75 KB)\n", "\n" ], "text/plain": [ "\u001b[1m Trainable params: \u001b[0m\u001b[38;5;34m8,641\u001b[0m (33.75 KB)\n" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "
Non-trainable params: 192 (768.00 B)\n", "\n" ], "text/plain": [ "\u001b[1m Non-trainable params: \u001b[0m\u001b[38;5;34m192\u001b[0m (768.00 B)\n" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "Fold 1 - Val Loss: 0.2336, Val Acc: 0.9435, Val AUC: 0.9757\n", "\n", "--- Fold 2 ---\n", "Train-Subjects: [ 0 2 3 4 5 7 8 9 16 17 18 20 21 22 26 28]\n", "Val-Subjects: [12 13 14 27]\n", "Train Mittelwerte (erste 5 Features): [[-1.44302026e-16]\n", " [ 8.74930579e-17]\n", " [ 1.50201191e-16]\n", " [ 6.48301479e-16]\n", " [ 6.66605748e-17]]\n", "Train Std (erste 5 Features): [[1.]\n", " [1.]\n", " [1.]\n", " [1.]\n", " [1.]]\n", "Val Mittelwerte (erste 5 Features): [[0.12995691]\n", " [0.06355549]\n", " [0.02275319]\n", " [0.06616701]\n", " [0.06788641]]\n", "Val Std (erste 5 Features): [[0.93313441]\n", " [1.00300875]\n", " [0.99231989]\n", " [1.03627391]\n", " [1.088837 ]]\n" ] }, { "data": { "text/html": [ "
Model: \"sequential_8\"\n",
"\n"
],
"text/plain": [
"\u001b[1mModel: \"sequential_8\"\u001b[0m\n"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓\n",
"┃ Layer (type) ┃ Output Shape ┃ Param # ┃\n",
"┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩\n",
"│ conv1d_16 (Conv1D) │ (None, 18, 32) │ 128 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ batch_normalization_16 │ (None, 18, 32) │ 128 │\n",
"│ (BatchNormalization) │ │ │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ max_pooling1d_8 (MaxPooling1D) │ (None, 9, 32) │ 0 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ conv1d_17 (Conv1D) │ (None, 7, 64) │ 6,208 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ batch_normalization_17 │ (None, 7, 64) │ 256 │\n",
"│ (BatchNormalization) │ │ │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ global_average_pooling1d_8 │ (None, 64) │ 0 │\n",
"│ (GlobalAveragePooling1D) │ │ │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense_16 (Dense) │ (None, 32) │ 2,080 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dropout_8 (Dropout) │ (None, 32) │ 0 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense_17 (Dense) │ (None, 1) │ 33 │\n",
"└─────────────────────────────────┴────────────────────────┴───────────────┘\n",
"\n"
],
"text/plain": [
"┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓\n",
"┃\u001b[1m \u001b[0m\u001b[1mLayer (type) \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1mOutput Shape \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1m Param #\u001b[0m\u001b[1m \u001b[0m┃\n",
"┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩\n",
"│ conv1d_16 (\u001b[38;5;33mConv1D\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m18\u001b[0m, \u001b[38;5;34m32\u001b[0m) │ \u001b[38;5;34m128\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ batch_normalization_16 │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m18\u001b[0m, \u001b[38;5;34m32\u001b[0m) │ \u001b[38;5;34m128\u001b[0m │\n",
"│ (\u001b[38;5;33mBatchNormalization\u001b[0m) │ │ │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ max_pooling1d_8 (\u001b[38;5;33mMaxPooling1D\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m9\u001b[0m, \u001b[38;5;34m32\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ conv1d_17 (\u001b[38;5;33mConv1D\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m7\u001b[0m, \u001b[38;5;34m64\u001b[0m) │ \u001b[38;5;34m6,208\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ batch_normalization_17 │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m7\u001b[0m, \u001b[38;5;34m64\u001b[0m) │ \u001b[38;5;34m256\u001b[0m │\n",
"│ (\u001b[38;5;33mBatchNormalization\u001b[0m) │ │ │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ global_average_pooling1d_8 │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m64\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │\n",
"│ (\u001b[38;5;33mGlobalAveragePooling1D\u001b[0m) │ │ │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense_16 (\u001b[38;5;33mDense\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m32\u001b[0m) │ \u001b[38;5;34m2,080\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dropout_8 (\u001b[38;5;33mDropout\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m32\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense_17 (\u001b[38;5;33mDense\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m1\u001b[0m) │ \u001b[38;5;34m33\u001b[0m │\n",
"└─────────────────────────────────┴────────────────────────┴───────────────┘\n"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"Total params: 8,833 (34.50 KB)\n", "\n" ], "text/plain": [ "\u001b[1m Total params: \u001b[0m\u001b[38;5;34m8,833\u001b[0m (34.50 KB)\n" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "
Trainable params: 8,641 (33.75 KB)\n", "\n" ], "text/plain": [ "\u001b[1m Trainable params: \u001b[0m\u001b[38;5;34m8,641\u001b[0m (33.75 KB)\n" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "
Non-trainable params: 192 (768.00 B)\n", "\n" ], "text/plain": [ "\u001b[1m Non-trainable params: \u001b[0m\u001b[38;5;34m192\u001b[0m (768.00 B)\n" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "Fold 2 - Val Loss: 0.2085, Val Acc: 0.9514, Val AUC: 0.9803\n", "\n", "--- Fold 3 ---\n", "Train-Subjects: [ 0 2 3 5 8 9 12 13 14 17 18 20 22 26 27 28]\n", "Val-Subjects: [ 4 7 16 21]\n", "Train Mittelwerte (erste 5 Features): [[-1.70932546e-16]\n", " [ 8.53862129e-17]\n", " [ 2.40045488e-16]\n", " [ 1.14228078e-15]\n", " [-9.88531659e-17]]\n", "Train Std (erste 5 Features): [[1.]\n", " [1.]\n", " [1.]\n", " [1.]\n", " [1.]]\n", "Val Mittelwerte (erste 5 Features): [[-0.16879516]\n", " [-0.09483067]\n", " [-0.00774855]\n", " [-0.02642025]\n", " [-0.01273985]]\n", "Val Std (erste 5 Features): [[1.11007971]\n", " [1.03956086]\n", " [0.95289305]\n", " [0.97649474]\n", " [0.98732466]]\n" ] }, { "data": { "text/html": [ "
Model: \"sequential_9\"\n",
"\n"
],
"text/plain": [
"\u001b[1mModel: \"sequential_9\"\u001b[0m\n"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓\n",
"┃ Layer (type) ┃ Output Shape ┃ Param # ┃\n",
"┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩\n",
"│ conv1d_18 (Conv1D) │ (None, 18, 32) │ 128 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ batch_normalization_18 │ (None, 18, 32) │ 128 │\n",
"│ (BatchNormalization) │ │ │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ max_pooling1d_9 (MaxPooling1D) │ (None, 9, 32) │ 0 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ conv1d_19 (Conv1D) │ (None, 7, 64) │ 6,208 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ batch_normalization_19 │ (None, 7, 64) │ 256 │\n",
"│ (BatchNormalization) │ │ │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ global_average_pooling1d_9 │ (None, 64) │ 0 │\n",
"│ (GlobalAveragePooling1D) │ │ │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense_18 (Dense) │ (None, 32) │ 2,080 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dropout_9 (Dropout) │ (None, 32) │ 0 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense_19 (Dense) │ (None, 1) │ 33 │\n",
"└─────────────────────────────────┴────────────────────────┴───────────────┘\n",
"\n"
],
"text/plain": [
"┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓\n",
"┃\u001b[1m \u001b[0m\u001b[1mLayer (type) \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1mOutput Shape \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1m Param #\u001b[0m\u001b[1m \u001b[0m┃\n",
"┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩\n",
"│ conv1d_18 (\u001b[38;5;33mConv1D\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m18\u001b[0m, \u001b[38;5;34m32\u001b[0m) │ \u001b[38;5;34m128\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ batch_normalization_18 │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m18\u001b[0m, \u001b[38;5;34m32\u001b[0m) │ \u001b[38;5;34m128\u001b[0m │\n",
"│ (\u001b[38;5;33mBatchNormalization\u001b[0m) │ │ │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ max_pooling1d_9 (\u001b[38;5;33mMaxPooling1D\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m9\u001b[0m, \u001b[38;5;34m32\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ conv1d_19 (\u001b[38;5;33mConv1D\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m7\u001b[0m, \u001b[38;5;34m64\u001b[0m) │ \u001b[38;5;34m6,208\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ batch_normalization_19 │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m7\u001b[0m, \u001b[38;5;34m64\u001b[0m) │ \u001b[38;5;34m256\u001b[0m │\n",
"│ (\u001b[38;5;33mBatchNormalization\u001b[0m) │ │ │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ global_average_pooling1d_9 │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m64\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │\n",
"│ (\u001b[38;5;33mGlobalAveragePooling1D\u001b[0m) │ │ │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense_18 (\u001b[38;5;33mDense\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m32\u001b[0m) │ \u001b[38;5;34m2,080\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dropout_9 (\u001b[38;5;33mDropout\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m32\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense_19 (\u001b[38;5;33mDense\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m1\u001b[0m) │ \u001b[38;5;34m33\u001b[0m │\n",
"└─────────────────────────────────┴────────────────────────┴───────────────┘\n"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"Total params: 8,833 (34.50 KB)\n", "\n" ], "text/plain": [ "\u001b[1m Total params: \u001b[0m\u001b[38;5;34m8,833\u001b[0m (34.50 KB)\n" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "
Trainable params: 8,641 (33.75 KB)\n", "\n" ], "text/plain": [ "\u001b[1m Trainable params: \u001b[0m\u001b[38;5;34m8,641\u001b[0m (33.75 KB)\n" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "
Non-trainable params: 192 (768.00 B)\n", "\n" ], "text/plain": [ "\u001b[1m Non-trainable params: \u001b[0m\u001b[38;5;34m192\u001b[0m (768.00 B)\n" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "Fold 3 - Val Loss: 0.2030, Val Acc: 0.9478, Val AUC: 0.9797\n", "\n", "--- Fold 4 ---\n", "Train-Subjects: [ 2 3 4 5 7 8 12 13 14 16 17 18 20 21 26 27]\n", "Val-Subjects: [ 0 9 22 28]\n", "Train Mittelwerte (erste 5 Features): [[-6.20161010e-18]\n", " [-2.54131196e-17]\n", " [ 3.29511093e-16]\n", " [ 5.47831361e-16]\n", " [-1.38162773e-16]]\n", "Train Std (erste 5 Features): [[1.]\n", " [1.]\n", " [1.]\n", " [1.]\n", " [1.]]\n", "Val Mittelwerte (erste 5 Features): [[ 0.01788626]\n", " [ 0.0311636 ]\n", " [-0.04004633]\n", " [-0.01924637]\n", " [ 0.01714694]]\n", "Val Std (erste 5 Features): [[0.98096724]\n", " [1.05932267]\n", " [0.95190592]\n", " [1.01373334]\n", " [0.99555169]]\n" ] }, { "data": { "text/html": [ "
Model: \"sequential_10\"\n",
"\n"
],
"text/plain": [
"\u001b[1mModel: \"sequential_10\"\u001b[0m\n"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓\n",
"┃ Layer (type) ┃ Output Shape ┃ Param # ┃\n",
"┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩\n",
"│ conv1d_20 (Conv1D) │ (None, 18, 32) │ 128 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ batch_normalization_20 │ (None, 18, 32) │ 128 │\n",
"│ (BatchNormalization) │ │ │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ max_pooling1d_10 (MaxPooling1D) │ (None, 9, 32) │ 0 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ conv1d_21 (Conv1D) │ (None, 7, 64) │ 6,208 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ batch_normalization_21 │ (None, 7, 64) │ 256 │\n",
"│ (BatchNormalization) │ │ │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ global_average_pooling1d_10 │ (None, 64) │ 0 │\n",
"│ (GlobalAveragePooling1D) │ │ │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense_20 (Dense) │ (None, 32) │ 2,080 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dropout_10 (Dropout) │ (None, 32) │ 0 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense_21 (Dense) │ (None, 1) │ 33 │\n",
"└─────────────────────────────────┴────────────────────────┴───────────────┘\n",
"\n"
],
"text/plain": [
"┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓\n",
"┃\u001b[1m \u001b[0m\u001b[1mLayer (type) \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1mOutput Shape \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1m Param #\u001b[0m\u001b[1m \u001b[0m┃\n",
"┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩\n",
"│ conv1d_20 (\u001b[38;5;33mConv1D\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m18\u001b[0m, \u001b[38;5;34m32\u001b[0m) │ \u001b[38;5;34m128\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ batch_normalization_20 │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m18\u001b[0m, \u001b[38;5;34m32\u001b[0m) │ \u001b[38;5;34m128\u001b[0m │\n",
"│ (\u001b[38;5;33mBatchNormalization\u001b[0m) │ │ │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ max_pooling1d_10 (\u001b[38;5;33mMaxPooling1D\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m9\u001b[0m, \u001b[38;5;34m32\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ conv1d_21 (\u001b[38;5;33mConv1D\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m7\u001b[0m, \u001b[38;5;34m64\u001b[0m) │ \u001b[38;5;34m6,208\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ batch_normalization_21 │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m7\u001b[0m, \u001b[38;5;34m64\u001b[0m) │ \u001b[38;5;34m256\u001b[0m │\n",
"│ (\u001b[38;5;33mBatchNormalization\u001b[0m) │ │ │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ global_average_pooling1d_10 │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m64\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │\n",
"│ (\u001b[38;5;33mGlobalAveragePooling1D\u001b[0m) │ │ │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense_20 (\u001b[38;5;33mDense\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m32\u001b[0m) │ \u001b[38;5;34m2,080\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dropout_10 (\u001b[38;5;33mDropout\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m32\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense_21 (\u001b[38;5;33mDense\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m1\u001b[0m) │ \u001b[38;5;34m33\u001b[0m │\n",
"└─────────────────────────────────┴────────────────────────┴───────────────┘\n"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"Total params: 8,833 (34.50 KB)\n", "\n" ], "text/plain": [ "\u001b[1m Total params: \u001b[0m\u001b[38;5;34m8,833\u001b[0m (34.50 KB)\n" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "
Trainable params: 8,641 (33.75 KB)\n", "\n" ], "text/plain": [ "\u001b[1m Trainable params: \u001b[0m\u001b[38;5;34m8,641\u001b[0m (33.75 KB)\n" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "
Non-trainable params: 192 (768.00 B)\n", "\n" ], "text/plain": [ "\u001b[1m Non-trainable params: \u001b[0m\u001b[38;5;34m192\u001b[0m (768.00 B)\n" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "Fold 4 - Val Loss: 0.2266, Val Acc: 0.9356, Val AUC: 0.9743\n", "\n", "--- Fold 5 ---\n", "Train-Subjects: [ 0 2 4 5 7 8 9 12 13 14 16 17 21 22 27 28]\n", "Val-Subjects: [ 3 18 20 26]\n", "Train Mittelwerte (erste 5 Features): [[-9.87688921e-18]\n", " [ 1.07120426e-16]\n", " [-9.48417331e-17]\n", " [ 1.12171797e-15]\n", " [-3.25397992e-16]]\n", "Train Std (erste 5 Features): [[1.]\n", " [1.]\n", " [1.]\n", " [1.]\n", " [1.]]\n", "Val Mittelwerte (erste 5 Features): [[-0.00321515]\n", " [-0.03717355]\n", " [ 0.03970031]\n", " [-0.01854768]\n", " [-0.07944637]]\n", "Val Std (erste 5 Features): [[0.98009046]\n", " [0.98567844]\n", " [1.03448059]\n", " [0.99467548]\n", " [0.86050472]]\n" ] }, { "data": { "text/html": [ "
Model: \"sequential_11\"\n",
"\n"
],
"text/plain": [
"\u001b[1mModel: \"sequential_11\"\u001b[0m\n"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓\n",
"┃ Layer (type) ┃ Output Shape ┃ Param # ┃\n",
"┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩\n",
"│ conv1d_22 (Conv1D) │ (None, 18, 32) │ 128 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ batch_normalization_22 │ (None, 18, 32) │ 128 │\n",
"│ (BatchNormalization) │ │ │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ max_pooling1d_11 (MaxPooling1D) │ (None, 9, 32) │ 0 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ conv1d_23 (Conv1D) │ (None, 7, 64) │ 6,208 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ batch_normalization_23 │ (None, 7, 64) │ 256 │\n",
"│ (BatchNormalization) │ │ │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ global_average_pooling1d_11 │ (None, 64) │ 0 │\n",
"│ (GlobalAveragePooling1D) │ │ │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense_22 (Dense) │ (None, 32) │ 2,080 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dropout_11 (Dropout) │ (None, 32) │ 0 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense_23 (Dense) │ (None, 1) │ 33 │\n",
"└─────────────────────────────────┴────────────────────────┴───────────────┘\n",
"\n"
],
"text/plain": [
"┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓\n",
"┃\u001b[1m \u001b[0m\u001b[1mLayer (type) \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1mOutput Shape \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1m Param #\u001b[0m\u001b[1m \u001b[0m┃\n",
"┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩\n",
"│ conv1d_22 (\u001b[38;5;33mConv1D\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m18\u001b[0m, \u001b[38;5;34m32\u001b[0m) │ \u001b[38;5;34m128\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ batch_normalization_22 │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m18\u001b[0m, \u001b[38;5;34m32\u001b[0m) │ \u001b[38;5;34m128\u001b[0m │\n",
"│ (\u001b[38;5;33mBatchNormalization\u001b[0m) │ │ │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ max_pooling1d_11 (\u001b[38;5;33mMaxPooling1D\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m9\u001b[0m, \u001b[38;5;34m32\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ conv1d_23 (\u001b[38;5;33mConv1D\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m7\u001b[0m, \u001b[38;5;34m64\u001b[0m) │ \u001b[38;5;34m6,208\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ batch_normalization_23 │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m7\u001b[0m, \u001b[38;5;34m64\u001b[0m) │ \u001b[38;5;34m256\u001b[0m │\n",
"│ (\u001b[38;5;33mBatchNormalization\u001b[0m) │ │ │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ global_average_pooling1d_11 │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m64\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │\n",
"│ (\u001b[38;5;33mGlobalAveragePooling1D\u001b[0m) │ │ │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense_22 (\u001b[38;5;33mDense\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m32\u001b[0m) │ \u001b[38;5;34m2,080\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dropout_11 (\u001b[38;5;33mDropout\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m32\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense_23 (\u001b[38;5;33mDense\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m1\u001b[0m) │ \u001b[38;5;34m33\u001b[0m │\n",
"└─────────────────────────────────┴────────────────────────┴───────────────┘\n"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"Total params: 8,833 (34.50 KB)\n", "\n" ], "text/plain": [ "\u001b[1m Total params: \u001b[0m\u001b[38;5;34m8,833\u001b[0m (34.50 KB)\n" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "
Trainable params: 8,641 (33.75 KB)\n", "\n" ], "text/plain": [ "\u001b[1m Trainable params: \u001b[0m\u001b[38;5;34m8,641\u001b[0m (33.75 KB)\n" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "
Non-trainable params: 192 (768.00 B)\n", "\n" ], "text/plain": [ "\u001b[1m Non-trainable params: \u001b[0m\u001b[38;5;34m192\u001b[0m (768.00 B)\n" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "Fold 5 - Val Loss: 0.1719, Val Acc: 0.9532, Val AUC: 0.9869\n" ] } ], "source": [ "gkf = GroupKFold(n_splits=5) \n", "cv_histories = [] \n", "cv_results = [] \n", "fold_subjects = []\n", "\n", "for fold, (train_idx, val_idx) in enumerate(gkf.split(X, y, groups)):\n", " train_subjects = np.unique(groups[train_idx]) \n", " val_subjects = np.unique(groups[val_idx]) \n", " fold_subjects.append({\"Fold\": fold+1, \n", " \"Train_Subjects\": train_subjects, \n", " \"Val_Subjects\": val_subjects}) \n", " \n", " print(f\"\\n--- Fold {fold+1} ---\") \n", " print(\"Train-Subjects:\", train_subjects) \n", " print(\"Val-Subjects:\", val_subjects) \n", "\n", " #Split\n", " X_train, X_val = X[train_idx], X[val_idx] \n", " y_train, y_val = y[train_idx], y[val_idx] # Normalisierung pro Fold \n", "\n", " #Normalisierung pro Fold\n", " scaler = StandardScaler() \n", " X_train = scaler.fit_transform(X_train.reshape(len(X_train), -1)).reshape(X_train.shape) \n", " X_val = scaler.transform(X_val.reshape(len(X_val), -1)).reshape(X_val.shape) \n", "\n", " # Plausibilitäts-Check \n", " print(\"Train Mittelwerte (erste 5 Features):\", X_train.mean(axis=0)[:5]) \n", " print(\"Train Std (erste 5 Features):\", X_train.std(axis=0)[:5]) \n", " print(\"Val Mittelwerte (erste 5 Features):\", X_val.mean(axis=0)[:5]) \n", " print(\"Val Std (erste 5 Features):\", X_val.std(axis=0)[:5]) \n", "\n", " # Modell \n", " model = build_model(input_shape=(len(au_columns),1), lr=1e-4) \n", " model.summary() \n", "\n", " callbacks = [ \n", " tf.keras.callbacks.EarlyStopping(monitor=\"val_loss\", patience=10, restore_best_weights=True), \n", " tf.keras.callbacks.ReduceLROnPlateau(monitor=\"val_loss\", factor=0.5, patience=5, min_lr=1e-6) \n", " ] \n", "\n", " history = model.fit( \n", " X_train, y_train, \n", " validation_data=(X_val, y_val), \n", " epochs=100, \n", " batch_size=16, \n", " callbacks=callbacks, \n", " verbose=0 \n", " ) \n", "\n", " cv_histories.append(history.history) \n", " scores = model.evaluate(X_val, y_val, verbose=0) \n", " cv_results.append(scores) \n", " print(f\"Fold {fold+1} - Val Loss: {scores[0]:.4f}, Val Acc: {scores[1]:.4f}, Val AUC: {scores[2]:.4f}\")" ] }, { "cell_type": "markdown", "id": "d10b7e78", "metadata": {}, "source": [ "Results" ] }, { "cell_type": "code", "execution_count": 42, "id": "9aeba7f4", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "=== Cross-Validation Ergebnisse ===\n", "Durchschnittlicher Val-Loss: 0.2087\n", "Durchschnittliche Val-Accuracy: 0.9463\n", "Durchschnittliche Val-AUC: 0.9794\n", "\n", "=== Ergebnis-Tabelle ===\n", " Fold Val Loss Val Accuracy Val AUC\n", "0 1 0.233584 0.943534 0.975684\n", "1 2 0.208549 0.951427 0.980279\n", "2 3 0.203029 0.947784 0.979743\n", "3 4 0.226566 0.935601 0.974336\n", "4 5 0.171882 0.953248 0.986887\n", "5 Ø 0.208722 0.946319 0.979386\n", "Ergebnisse gespeichert als 'cnn_crossVal_results.csv'\n" ] } ], "source": [ "#results\n", "cv_results = np.array(cv_results) \n", "print(\"\\n=== Cross-Validation Ergebnisse ===\") \n", "print(f\"Durchschnittlicher Val-Loss: {cv_results[:,0].mean():.4f}\") \n", "print(f\"Durchschnittliche Val-Accuracy: {cv_results[:,1].mean():.4f}\") \n", "print(f\"Durchschnittliche Val-AUC: {cv_results[:,2].mean():.4f}\")\n", "\n", "#Ergebnis-Tabelle erstellen\n", "results_table = pd.DataFrame({ \n", " \"Fold\": np.arange(1, len(cv_results)+1), \n", " \"Val Loss\": cv_results[:,0], \n", " \"Val Accuracy\": cv_results[:,1], \n", " \"Val AUC\": cv_results[:,2] }) \n", "\n", "# Durchschnittszeile hinzufügen \n", "avg_row = pd.DataFrame({ \n", " \"Fold\": [\"Ø\"], \n", " \"Val Loss\": [cv_results[:,0].mean()], \n", " \"Val Accuracy\": [cv_results[:,1].mean()], \n", " \"Val AUC\": [cv_results[:,2].mean()] \n", "}) \n", "\n", "results_table = pd.concat([results_table, avg_row], ignore_index=True) \n", "\n", "print(\"\\n=== Ergebnis-Tabelle ===\") \n", "print(results_table) \n", "\n", "#Tabelle speichern \n", "results_table.to_csv(\"cnn_crossVal_results.csv\", index=False) \n", "print(\"Ergebnisse gespeichert als 'cnn_crossVal_results.csv'\")" ] }, { "cell_type": "markdown", "id": "d2f6de79", "metadata": {}, "source": [ "Finales Modell auf alle Daten trainieren" ] }, { "cell_type": "code", "execution_count": 44, "id": "6e403e5f", "metadata": {}, "outputs": [ { "data": { "text/html": [ "
Model: \"sequential_14\"\n",
"\n"
],
"text/plain": [
"\u001b[1mModel: \"sequential_14\"\u001b[0m\n"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓\n",
"┃ Layer (type) ┃ Output Shape ┃ Param # ┃\n",
"┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩\n",
"│ conv1d_28 (Conv1D) │ (None, 18, 32) │ 128 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ batch_normalization_28 │ (None, 18, 32) │ 128 │\n",
"│ (BatchNormalization) │ │ │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ max_pooling1d_14 (MaxPooling1D) │ (None, 9, 32) │ 0 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ conv1d_29 (Conv1D) │ (None, 7, 64) │ 6,208 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ batch_normalization_29 │ (None, 7, 64) │ 256 │\n",
"│ (BatchNormalization) │ │ │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ global_average_pooling1d_14 │ (None, 64) │ 0 │\n",
"│ (GlobalAveragePooling1D) │ │ │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense_28 (Dense) │ (None, 32) │ 2,080 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dropout_14 (Dropout) │ (None, 32) │ 0 │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense_29 (Dense) │ (None, 1) │ 33 │\n",
"└─────────────────────────────────┴────────────────────────┴───────────────┘\n",
"\n"
],
"text/plain": [
"┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓\n",
"┃\u001b[1m \u001b[0m\u001b[1mLayer (type) \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1mOutput Shape \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1m Param #\u001b[0m\u001b[1m \u001b[0m┃\n",
"┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩\n",
"│ conv1d_28 (\u001b[38;5;33mConv1D\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m18\u001b[0m, \u001b[38;5;34m32\u001b[0m) │ \u001b[38;5;34m128\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ batch_normalization_28 │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m18\u001b[0m, \u001b[38;5;34m32\u001b[0m) │ \u001b[38;5;34m128\u001b[0m │\n",
"│ (\u001b[38;5;33mBatchNormalization\u001b[0m) │ │ │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ max_pooling1d_14 (\u001b[38;5;33mMaxPooling1D\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m9\u001b[0m, \u001b[38;5;34m32\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ conv1d_29 (\u001b[38;5;33mConv1D\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m7\u001b[0m, \u001b[38;5;34m64\u001b[0m) │ \u001b[38;5;34m6,208\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ batch_normalization_29 │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m7\u001b[0m, \u001b[38;5;34m64\u001b[0m) │ \u001b[38;5;34m256\u001b[0m │\n",
"│ (\u001b[38;5;33mBatchNormalization\u001b[0m) │ │ │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ global_average_pooling1d_14 │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m64\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │\n",
"│ (\u001b[38;5;33mGlobalAveragePooling1D\u001b[0m) │ │ │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense_28 (\u001b[38;5;33mDense\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m32\u001b[0m) │ \u001b[38;5;34m2,080\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dropout_14 (\u001b[38;5;33mDropout\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m32\u001b[0m) │ \u001b[38;5;34m0\u001b[0m │\n",
"├─────────────────────────────────┼────────────────────────┼───────────────┤\n",
"│ dense_29 (\u001b[38;5;33mDense\u001b[0m) │ (\u001b[38;5;45mNone\u001b[0m, \u001b[38;5;34m1\u001b[0m) │ \u001b[38;5;34m33\u001b[0m │\n",
"└─────────────────────────────────┴────────────────────────┴───────────────┘\n"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"Total params: 8,833 (34.50 KB)\n", "\n" ], "text/plain": [ "\u001b[1m Total params: \u001b[0m\u001b[38;5;34m8,833\u001b[0m (34.50 KB)\n" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "
Trainable params: 8,641 (33.75 KB)\n", "\n" ], "text/plain": [ "\u001b[1m Trainable params: \u001b[0m\u001b[38;5;34m8,641\u001b[0m (33.75 KB)\n" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ "
Non-trainable params: 192 (768.00 B)\n", "\n" ], "text/plain": [ "\u001b[1m Non-trainable params: \u001b[0m\u001b[38;5;34m192\u001b[0m (768.00 B)\n" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "Epoch 1/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m5s\u001b[0m 4ms/step - accuracy: 0.6497 - auc: 0.7074 - loss: 0.7224\n", "Epoch 2/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.8300 - auc: 0.8909 - loss: 0.5339\n", "Epoch 3/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.8763 - auc: 0.9222 - loss: 0.4463\n", "Epoch 4/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 1ms/step - accuracy: 0.8935 - auc: 0.9344 - loss: 0.4032\n", "Epoch 5/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.8982 - auc: 0.9404 - loss: 0.3786\n", "Epoch 6/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9120 - auc: 0.9506 - loss: 0.3505\n", "Epoch 7/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9157 - auc: 0.9560 - loss: 0.3320\n", "Epoch 8/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9192 - auc: 0.9580 - loss: 0.3230\n", "Epoch 9/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9208 - auc: 0.9599 - loss: 0.3145\n", "Epoch 10/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9257 - auc: 0.9658 - loss: 0.2972\n", "Epoch 11/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9255 - auc: 0.9682 - loss: 0.2881\n", "Epoch 12/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9281 - auc: 0.9702 - loss: 0.2824\n", "Epoch 13/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9285 - auc: 0.9720 - loss: 0.2749\n", "Epoch 14/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9313 - auc: 0.9726 - loss: 0.2703\n", "Epoch 15/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9315 - auc: 0.9745 - loss: 0.2622\n", "Epoch 16/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9360 - auc: 0.9758 - loss: 0.2558\n", "Epoch 17/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9343 - auc: 0.9759 - loss: 0.2543\n", "Epoch 18/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9353 - auc: 0.9764 - loss: 0.2510\n", "Epoch 19/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9374 - auc: 0.9787 - loss: 0.2431\n", "Epoch 20/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9376 - auc: 0.9803 - loss: 0.2374\n", "Epoch 21/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9378 - auc: 0.9794 - loss: 0.2362\n", "Epoch 22/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9402 - auc: 0.9805 - loss: 0.2327\n", "Epoch 23/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9399 - auc: 0.9803 - loss: 0.2312\n", "Epoch 24/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9389 - auc: 0.9794 - loss: 0.2315\n", "Epoch 25/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9391 - auc: 0.9804 - loss: 0.2282\n", "Epoch 26/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9371 - auc: 0.9811 - loss: 0.2240\n", "Epoch 27/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9401 - auc: 0.9807 - loss: 0.2237\n", "Epoch 28/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9433 - auc: 0.9823 - loss: 0.2187\n", "Epoch 29/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9432 - auc: 0.9834 - loss: 0.2133\n", "Epoch 30/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9425 - auc: 0.9833 - loss: 0.2117\n", "Epoch 31/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9429 - auc: 0.9834 - loss: 0.2102\n", "Epoch 32/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9452 - auc: 0.9849 - loss: 0.2049\n", "Epoch 33/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9401 - auc: 0.9836 - loss: 0.2082\n", "Epoch 34/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9436 - auc: 0.9838 - loss: 0.2064\n", "Epoch 35/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9468 - auc: 0.9845 - loss: 0.2015\n", "Epoch 36/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9469 - auc: 0.9849 - loss: 0.1997\n", "Epoch 37/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9449 - auc: 0.9837 - loss: 0.2039\n", "Epoch 38/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9461 - auc: 0.9850 - loss: 0.1981\n", "Epoch 39/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9447 - auc: 0.9845 - loss: 0.1975\n", "Epoch 40/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9482 - auc: 0.9854 - loss: 0.1958\n", "Epoch 41/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9447 - auc: 0.9854 - loss: 0.1952\n", "Epoch 42/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9475 - auc: 0.9848 - loss: 0.1960\n", "Epoch 43/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9459 - auc: 0.9859 - loss: 0.1919\n", "Epoch 44/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9488 - auc: 0.9860 - loss: 0.1897\n", "Epoch 45/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9465 - auc: 0.9856 - loss: 0.1911\n", "Epoch 46/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9493 - auc: 0.9869 - loss: 0.1859\n", "Epoch 47/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9489 - auc: 0.9863 - loss: 0.1861\n", "Epoch 48/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9485 - auc: 0.9862 - loss: 0.1857\n", "Epoch 49/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9470 - auc: 0.9862 - loss: 0.1862\n", "Epoch 50/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9490 - auc: 0.9866 - loss: 0.1827\n", "Epoch 51/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9495 - auc: 0.9862 - loss: 0.1830\n", "Epoch 52/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9498 - auc: 0.9871 - loss: 0.1788\n", "Epoch 53/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9490 - auc: 0.9872 - loss: 0.1791\n", "Epoch 54/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9475 - auc: 0.9863 - loss: 0.1823\n", "Epoch 55/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9500 - auc: 0.9865 - loss: 0.1793\n", "Epoch 56/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9470 - auc: 0.9875 - loss: 0.1765\n", "Epoch 57/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9481 - auc: 0.9870 - loss: 0.1778\n", "Epoch 58/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9482 - auc: 0.9877 - loss: 0.1740\n", "Epoch 59/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9485 - auc: 0.9877 - loss: 0.1746\n", "Epoch 60/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9498 - auc: 0.9878 - loss: 0.1726\n", "Epoch 61/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9493 - auc: 0.9880 - loss: 0.1708\n", "Epoch 62/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9482 - auc: 0.9882 - loss: 0.1700\n", "Epoch 63/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9500 - auc: 0.9882 - loss: 0.1695\n", "Epoch 64/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9502 - auc: 0.9884 - loss: 0.1673\n", "Epoch 65/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9484 - auc: 0.9880 - loss: 0.1695\n", "Epoch 66/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9515 - auc: 0.9884 - loss: 0.1678\n", "Epoch 67/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9505 - auc: 0.9891 - loss: 0.1654\n", "Epoch 68/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9486 - auc: 0.9888 - loss: 0.1651\n", "Epoch 69/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9501 - auc: 0.9885 - loss: 0.1656\n", "Epoch 70/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9514 - auc: 0.9879 - loss: 0.1663\n", "Epoch 71/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9516 - auc: 0.9888 - loss: 0.1629\n", "Epoch 72/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9503 - auc: 0.9882 - loss: 0.1653\n", "Epoch 73/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9513 - auc: 0.9890 - loss: 0.1614\n", "Epoch 74/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9508 - auc: 0.9890 - loss: 0.1611\n", "Epoch 75/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9511 - auc: 0.9890 - loss: 0.1604\n", "Epoch 76/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9519 - auc: 0.9889 - loss: 0.1611\n", "Epoch 77/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9506 - auc: 0.9891 - loss: 0.1595\n", "Epoch 78/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9519 - auc: 0.9892 - loss: 0.1577\n", "Epoch 79/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9511 - auc: 0.9892 - loss: 0.1590\n", "Epoch 80/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9507 - auc: 0.9893 - loss: 0.1581\n", "Epoch 81/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9518 - auc: 0.9896 - loss: 0.1560\n", "Epoch 82/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9519 - auc: 0.9890 - loss: 0.1579\n", "Epoch 83/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9511 - auc: 0.9894 - loss: 0.1554\n", "Epoch 84/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9527 - auc: 0.9890 - loss: 0.1571\n", "Epoch 85/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9506 - auc: 0.9893 - loss: 0.1563\n", "Epoch 86/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9504 - auc: 0.9893 - loss: 0.1565\n", "Epoch 87/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9533 - auc: 0.9902 - loss: 0.1519\n", "Epoch 88/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9520 - auc: 0.9893 - loss: 0.1542\n", "Epoch 89/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9510 - auc: 0.9895 - loss: 0.1535\n", "Epoch 90/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9524 - auc: 0.9898 - loss: 0.1507\n", "Epoch 91/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9512 - auc: 0.9899 - loss: 0.1533\n", "Epoch 92/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9521 - auc: 0.9905 - loss: 0.1496\n", "Epoch 93/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9537 - auc: 0.9897 - loss: 0.1500\n", "Epoch 94/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9521 - auc: 0.9901 - loss: 0.1505\n", "Epoch 95/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9522 - auc: 0.9901 - loss: 0.1500\n", "Epoch 96/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9521 - auc: 0.9898 - loss: 0.1509\n", "Epoch 97/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9517 - auc: 0.9902 - loss: 0.1485\n", "Epoch 98/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9537 - auc: 0.9900 - loss: 0.1488\n", "Epoch 99/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9525 - auc: 0.9906 - loss: 0.1476\n", "Epoch 100/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9527 - auc: 0.9905 - loss: 0.1472\n", "Epoch 101/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9506 - auc: 0.9899 - loss: 0.1490\n", "Epoch 102/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9528 - auc: 0.9908 - loss: 0.1462\n", "Epoch 103/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9536 - auc: 0.9911 - loss: 0.1439\n", "Epoch 104/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9537 - auc: 0.9904 - loss: 0.1457\n", "Epoch 105/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9513 - auc: 0.9902 - loss: 0.1464\n", "Epoch 106/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9533 - auc: 0.9908 - loss: 0.1434\n", "Epoch 107/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9522 - auc: 0.9897 - loss: 0.1479\n", "Epoch 108/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9522 - auc: 0.9901 - loss: 0.1469\n", "Epoch 109/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9535 - auc: 0.9907 - loss: 0.1432\n", "Epoch 110/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9525 - auc: 0.9907 - loss: 0.1448\n", "Epoch 111/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9545 - auc: 0.9907 - loss: 0.1431\n", "Epoch 112/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9523 - auc: 0.9910 - loss: 0.1411\n", "Epoch 113/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9533 - auc: 0.9911 - loss: 0.1419\n", "Epoch 114/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9529 - auc: 0.9911 - loss: 0.1413\n", "Epoch 115/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9539 - auc: 0.9910 - loss: 0.1422\n", "Epoch 116/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9518 - auc: 0.9907 - loss: 0.1431\n", "Epoch 117/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9535 - auc: 0.9913 - loss: 0.1407\n", "Epoch 118/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9528 - auc: 0.9915 - loss: 0.1398\n", "Epoch 119/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9517 - auc: 0.9908 - loss: 0.1413\n", "Epoch 120/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9534 - auc: 0.9912 - loss: 0.1399\n", "Epoch 121/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9535 - auc: 0.9912 - loss: 0.1379\n", "Epoch 122/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9538 - auc: 0.9908 - loss: 0.1404\n", "Epoch 123/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9532 - auc: 0.9912 - loss: 0.1407\n", "Epoch 124/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9532 - auc: 0.9916 - loss: 0.1377\n", "Epoch 125/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9527 - auc: 0.9913 - loss: 0.1379\n", "Epoch 126/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9554 - auc: 0.9913 - loss: 0.1377\n", "Epoch 127/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9525 - auc: 0.9915 - loss: 0.1381\n", "Epoch 128/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9529 - auc: 0.9910 - loss: 0.1384\n", "Epoch 129/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9535 - auc: 0.9914 - loss: 0.1372\n", "Epoch 130/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9534 - auc: 0.9913 - loss: 0.1375\n", "Epoch 131/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9525 - auc: 0.9915 - loss: 0.1372\n", "Epoch 132/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9540 - auc: 0.9916 - loss: 0.1359\n", "Epoch 133/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9535 - auc: 0.9912 - loss: 0.1374\n", "Epoch 134/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9535 - auc: 0.9915 - loss: 0.1361\n", "Epoch 135/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9531 - auc: 0.9919 - loss: 0.1341\n", "Epoch 136/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9543 - auc: 0.9914 - loss: 0.1362\n", "Epoch 137/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9547 - auc: 0.9914 - loss: 0.1344\n", "Epoch 138/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9539 - auc: 0.9915 - loss: 0.1354\n", "Epoch 139/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9523 - auc: 0.9911 - loss: 0.1366\n", "Epoch 140/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9543 - auc: 0.9915 - loss: 0.1336\n", "Epoch 141/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9536 - auc: 0.9918 - loss: 0.1330\n", "Epoch 142/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9547 - auc: 0.9918 - loss: 0.1325\n", "Epoch 143/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9535 - auc: 0.9918 - loss: 0.1347\n", "Epoch 144/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9529 - auc: 0.9915 - loss: 0.1357\n", "Epoch 145/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9552 - auc: 0.9920 - loss: 0.1318\n", "Epoch 146/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9545 - auc: 0.9918 - loss: 0.1316\n", "Epoch 147/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9533 - auc: 0.9917 - loss: 0.1322\n", "Epoch 148/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9540 - auc: 0.9920 - loss: 0.1309\n", "Epoch 149/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9537 - auc: 0.9918 - loss: 0.1321\n", "Epoch 150/150\n", "\u001b[1m515/515\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 2ms/step - accuracy: 0.9539 - auc: 0.9919 - loss: 0.1319\n" ] }, { "data": { "text/plain": [ "