commit d1ce7b933f48188bc2e0cfdf5b6930b910114460 Author: Artur Feoktistov Date: Fri Apr 29 19:26:47 2022 +0200 init diff --git a/.ipynb_checkpoints/eval-checkpoint.ipynb b/.ipynb_checkpoints/eval-checkpoint.ipynb new file mode 100644 index 0000000..73e9d3f --- /dev/null +++ b/.ipynb_checkpoints/eval-checkpoint.ipynb @@ -0,0 +1,4691 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "5dba5871", + "metadata": {}, + "source": [ + "# In-Distribution = hem" + ] + }, + { + "cell_type": "markdown", + "id": "de7c031b", + "metadata": {}, + "source": [ + "## combined" + ] + }, + { + "cell_type": "code", + "execution_count": 129, + "id": "7c21eb48", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0152\t0.0130\t0.0105\t0.0135\n", + "weight_shi:\t-0.0578\t0.1014\t0.1178\t0.1034\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5853569764733287\n", + "CNMC 1.9901 +- 0.1085 q0: 1.7212 q10: 1.8609 q20: 1.9042 q30: 1.9291 q40: 1.9549 q50: 1.9800 q60: 2.0013 q70: 2.0351 q80: 2.0786 q90: 2.1427 q100: 2.3180\n", + "one_class_0 1.9561 +- 0.0829 q0: 1.6679 q10: 1.8555 q20: 1.8869 q30: 1.9128 q40: 1.9357 q50: 1.9564 q60: 1.9747 q70: 1.9978 q80: 2.0224 q90: 2.0600 q100: 2.2041\n", + "[one_class_0 CSI 0.5854] [one_class_0 best 0.5854] \n", + "[one_class_mean CSI 0.5854] [one_class_mean best 0.5854] \n", + "0.5854\t0.5854\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur_randpers\n", + "# crop : 0.08\n", + "# blur_sigma : 40\n", + "# randpers : 0.8\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --distortion_scale 0.8 --resize_factor 0.08 --blur_sigma 40 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur_randpers --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_randpers_resize_factor0.08_color_dist0.5_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 130, + "id": "846efb49", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0109\t0.0072\t0.0133\t0.0129\n", + "weight_shi:\t0.4840\t0.0844\t0.4048\t0.2004\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.4842849583244716\n", + "CNMC 1.9963 +- 0.4334 q0: 0.3449 q10: 1.4686 q20: 1.6647 q30: 1.7749 q40: 1.8802 q50: 1.9851 q60: 2.0904 q70: 2.2032 q80: 2.3314 q90: 2.5160 q100: 3.5596\n", + "one_class_0 2.0168 +- 0.3659 q0: 0.5032 q10: 1.5638 q20: 1.7269 q30: 1.8222 q40: 1.9245 q50: 2.0083 q60: 2.0883 q70: 2.1776 q80: 2.3057 q90: 2.4967 q100: 3.3674\n", + "[one_class_0 CSI 0.4843] [one_class_0 best 0.4843] \n", + "[one_class_mean CSI 0.4843] [one_class_mean best 0.4843] \n", + "0.4843\t0.4843\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur_sharp\n", + "# crop : 0.08\n", + "# blur_sigma : 40\n", + "# randpers : 0.8\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --sharpness_factor 128 --resize_factor 0.08 --blur_sigma 40 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur_sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_sharp_resize_factor0.08_color_dist0.5_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 131, + "id": "ebf2e296", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0019\t0.0039\t0.0042\t0.0047\n", + "weight_shi:\t0.0159\t0.3020\t1.0707\t0.5438\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.43598274238142987\n", + "CNMC 1.9968 +- 0.4243 q0: 1.0210 q10: 1.5387 q20: 1.6392 q30: 1.7221 q40: 1.7914 q50: 1.8964 q60: 2.0368 q70: 2.1923 q80: 2.3638 q90: 2.6239 q100: 3.6290\n", + "one_class_0 2.0836 +- 0.4325 q0: 1.0885 q10: 1.6040 q20: 1.7218 q30: 1.8127 q40: 1.9018 q50: 1.9885 q60: 2.1022 q70: 2.2500 q80: 2.4798 q90: 2.6977 q100: 3.7788\n", + "[one_class_0 CSI 0.4360] [one_class_0 best 0.4360] \n", + "[one_class_mean CSI 0.4360] [one_class_mean best 0.4360] \n", + "0.4360\t0.4360\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : randpers_sharp\n", + "# crop : 0.08\n", + "# blur_sigma : 40\n", + "# randpers : 0.8\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --distortion_scale 0.8 --resize_factor 0.08 --sharpness_factor 128 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type randpers_sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_randpers_sharp_resize_factor0.08_color_dist0.5_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 132, + "id": "a7b553d3", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0011\t0.0008\t0.0009\t0.0009\n", + "weight_shi:\t-0.0836\t0.1015\t0.0813\t0.0787\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5724992151024417\n", + "CNMC 2.0160 +- 0.0836 q0: 1.8554 q10: 1.9224 q20: 1.9466 q30: 1.9663 q40: 1.9854 q50: 2.0042 q60: 2.0232 q70: 2.0465 q80: 2.0759 q90: 2.1259 q100: 2.3440\n", + "one_class_0 1.9930 +- 0.0670 q0: 1.8047 q10: 1.9141 q20: 1.9399 q30: 1.9557 q40: 1.9704 q50: 1.9843 q60: 2.0012 q70: 2.0244 q80: 2.0475 q90: 2.0793 q100: 2.2942\n", + "[one_class_0 CSI 0.5725] [one_class_0 best 0.5725] \n", + "[one_class_mean CSI 0.5725] [one_class_mean best 0.5725] \n", + "0.5725\t0.5725\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur_randpers_sharp\n", + "# crop : 0.08\n", + "# blur_sigma : 40\n", + "# randpers : 0.8\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --sharpness_factor 128 --distortion_scale 0.8 --resize_factor 0.08 --blur_sigma 40 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur_randpers_sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_randpers_sharp_resize_factor0.08_color_dist0.5_one_class_1/last.model\"" + ] + }, + { + "cell_type": "markdown", + "id": "b5d5f05f", + "metadata": {}, + "source": [ + "## sharp" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "13c15d92", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0082\t0.0048\t0.0035\t0.0035\n", + "weight_shi:\t-0.0162\t0.0291\t0.0264\t0.0261\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.46516067612594825\n", + "CNMC 2.0611 +- 0.2843 q0: 1.4233 q10: 1.7048 q20: 1.8158 q30: 1.8910 q40: 1.9831 q50: 2.0498 q60: 2.1209 q70: 2.1990 q80: 2.3022 q90: 2.4508 q100: 3.0255\n", + "one_class_0 2.0896 +- 0.2109 q0: 1.5218 q10: 1.8407 q20: 1.9143 q30: 1.9720 q40: 2.0252 q50: 2.0691 q60: 2.1174 q70: 2.1761 q80: 2.2568 q90: 2.3717 q100: 2.9418\n", + "[one_class_0 CSI 0.4652] [one_class_0 best 0.4652] \n", + "[one_class_mean CSI 0.4652] [one_class_mean best 0.4652] \n", + "0.4652\t0.4652\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharpness : 4096\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --sharpness_factor 4096 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor4096.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "25951e79", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0095\t0.0075\t0.0068\t0.0072\n", + "weight_shi:\t-0.0480\t0.0769\t0.0704\t0.0693\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.4025752235692077\n", + "CNMC 1.9780 +- 0.1552 q0: 1.6133 q10: 1.7905 q20: 1.8304 q30: 1.8774 q40: 1.9265 q50: 1.9698 q60: 2.0166 q70: 2.0610 q80: 2.1123 q90: 2.1776 q100: 2.5595\n", + "one_class_0 2.0255 +- 0.1272 q0: 1.6800 q10: 1.8659 q20: 1.9179 q30: 1.9585 q40: 1.9884 q50: 2.0210 q60: 2.0530 q70: 2.0845 q80: 2.1202 q90: 2.1844 q100: 2.7354\n", + "[one_class_0 CSI 0.4026] [one_class_0 best 0.4026] \n", + "[one_class_mean CSI 0.4026] [one_class_mean best 0.4026] \n", + "0.4026\t0.4026\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharpness : 2048\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --sharpness_factor 2048 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor2048.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 133, + "id": "4fc12b02", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0089\t0.0059\t0.0064\t0.0063\n", + "weight_shi:\t-0.0361\t0.0765\t0.0742\t0.0727\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.4227749420188578\n", + "CNMC 2.0207 +- 0.1832 q0: 1.5627 q10: 1.7726 q20: 1.8618 q30: 1.9226 q40: 1.9811 q50: 2.0217 q60: 2.0627 q70: 2.1123 q80: 2.1832 q90: 2.2560 q100: 2.6861\n", + "one_class_0 2.0632 +- 0.1200 q0: 1.6644 q10: 1.9104 q20: 1.9645 q30: 2.0071 q40: 2.0386 q50: 2.0633 q60: 2.0887 q70: 2.1181 q80: 2.1534 q90: 2.2160 q100: 2.5742\n", + "[one_class_0 CSI 0.4228] [one_class_0 best 0.4228] \n", + "[one_class_mean CSI 0.4228] [one_class_mean best 0.4228] \n", + "0.4228\t0.4228\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharpness : 1024\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --sharpness_factor 1024 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor1024.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "99698eb6", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0067\t0.0032\t0.0035\t0.0038\n", + "weight_shi:\t-0.0499\t0.0682\t0.0675\t0.0722\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.3898554522529092\n", + "CNMC 1.9663 +- 0.1069 q0: 1.6970 q10: 1.8437 q20: 1.8799 q30: 1.9043 q40: 1.9235 q50: 1.9537 q60: 1.9831 q70: 2.0145 q80: 2.0505 q90: 2.1087 q100: 2.5004\n", + "one_class_0 2.0038 +- 0.0970 q0: 1.7568 q10: 1.8902 q20: 1.9239 q30: 1.9445 q40: 1.9657 q50: 1.9897 q60: 2.0196 q70: 2.0483 q80: 2.0868 q90: 2.1398 q100: 2.3773\n", + "[one_class_0 CSI 0.3899] [one_class_0 best 0.3899] \n", + "[one_class_mean CSI 0.3899] [one_class_mean best 0.3899] \n", + "0.3899\t0.3899\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharpness : 512\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --sharpness_factor 512 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor512.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "01e6d61a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0053\t0.0084\t0.0092\t0.0087\n", + "weight_shi:\t0.4300\t0.0647\t0.0695\t0.0685\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5784846919656873\n", + "CNMC 2.1270 +- 0.9610 q0: -0.9326 q10: 0.9101 q20: 1.3880 q30: 1.6010 q40: 1.8639 q50: 2.1067 q60: 2.3374 q70: 2.5413 q80: 2.8893 q90: 3.3775 q100: 5.1585\n", + "one_class_0 1.8950 +- 0.7309 q0: -0.2104 q10: 1.0100 q20: 1.3020 q30: 1.4936 q40: 1.6684 q50: 1.8373 q60: 2.0139 q70: 2.2017 q80: 2.4870 q90: 2.8570 q100: 4.5441\n", + "[one_class_0 CSI 0.5785] [one_class_0 best 0.5785] \n", + "[one_class_mean CSI 0.5785] [one_class_mean best 0.5785] \n", + "0.5785\t0.5785\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharpness : 256\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --sharpness_factor 256 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor256.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "id": "65cc4fcd", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0089\t0.0063\t0.0075\t0.0065\n", + "weight_shi:\t-0.0184\t0.0363\t0.0371\t0.0371\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.3679688370350115\n", + "CNMC 1.9800 +- 0.0919 q0: 1.7207 q10: 1.8629 q20: 1.8911 q30: 1.9241 q40: 1.9548 q50: 1.9755 q60: 2.0034 q70: 2.0354 q80: 2.0631 q90: 2.1071 q100: 2.2242\n", + "one_class_0 2.0217 +- 0.0794 q0: 1.7727 q10: 1.9194 q20: 1.9543 q30: 1.9779 q40: 1.9999 q50: 2.0212 q60: 2.0423 q70: 2.0650 q80: 2.0906 q90: 2.1259 q100: 2.2548\n", + "[one_class_0 CSI 0.3680] [one_class_0 best 0.3680] \n", + "[one_class_mean CSI 0.3680] [one_class_mean best 0.3680] \n", + "0.3680\t0.3680\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharpness : 150\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --sharpness_factor 150 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor150.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "id": "e13b48db", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0083\t0.0053\t0.0056\t0.0053\n", + "weight_shi:\t-0.1256\t0.0869\t0.0823\t0.0921\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.4935733347512128\n", + "CNMC 2.0389 +- 0.1184 q0: 1.7300 q10: 1.8883 q20: 1.9358 q30: 1.9771 q40: 2.0079 q50: 2.0342 q60: 2.0657 q70: 2.0974 q80: 2.1469 q90: 2.1945 q100: 2.5086\n", + "one_class_0 2.0418 +- 0.0930 q0: 1.7624 q10: 1.9334 q20: 1.9610 q30: 1.9867 q40: 2.0125 q50: 2.0354 q60: 2.0608 q70: 2.0915 q80: 2.1163 q90: 2.1599 q100: 2.3964\n", + "[one_class_0 CSI 0.4936] [one_class_0 best 0.4936] \n", + "[one_class_mean CSI 0.4936] [one_class_mean best 0.4936] \n", + "0.4936\t0.4936\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharpness : 140\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --sharpness_factor 140 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor140.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "id": "29cf690f", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0045\t0.0043\t0.0070\t0.0053\n", + "weight_shi:\t-0.0813\t0.0676\t0.0710\t0.0626\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.4419042880725954\n", + "CNMC 2.0129 +- 0.1195 q0: 1.5835 q10: 1.8643 q20: 1.9252 q30: 1.9581 q40: 1.9900 q50: 2.0157 q60: 2.0378 q70: 2.0653 q80: 2.1055 q90: 2.1542 q100: 2.4828\n", + "one_class_0 2.0362 +- 0.1010 q0: 1.6891 q10: 1.9098 q20: 1.9486 q30: 1.9817 q40: 2.0087 q50: 2.0351 q60: 2.0576 q70: 2.0853 q80: 2.1206 q90: 2.1651 q100: 2.3927\n", + "[one_class_0 CSI 0.4419] [one_class_0 best 0.4419] \n", + "[one_class_mean CSI 0.4419] [one_class_mean best 0.4419] \n", + "0.4419\t0.4419\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharpness : 130\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --sharpness_factor 130 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor130.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "id": "dfaa2119", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0130\t0.0032\t0.0032\t0.0028\n", + "weight_shi:\t-0.0796\t0.1288\t0.1192\t0.1286\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.47003337080586194\n", + "CNMC 1.9969 +- 0.1287 q0: 1.5279 q10: 1.8373 q20: 1.9073 q30: 1.9387 q40: 1.9663 q50: 1.9928 q60: 2.0270 q70: 2.0640 q80: 2.0993 q90: 2.1517 q100: 2.4161\n", + "one_class_0 2.0110 +- 0.1133 q0: 1.6407 q10: 1.8709 q20: 1.9143 q30: 1.9536 q40: 1.9857 q50: 2.0121 q60: 2.0374 q70: 2.0675 q80: 2.1043 q90: 2.1554 q100: 2.3778\n", + "[one_class_0 CSI 0.4700] [one_class_0 best 0.4700] \n", + "[one_class_mean CSI 0.4700] [one_class_mean best 0.4700] \n", + "0.4700\t0.4700\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharpness : 120\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --sharpness_factor 120 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor120.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "id": "e3eecf30", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0091\t0.0036\t0.0042\t0.0040\n", + "weight_shi:\t0.2410\t0.5432\t0.2487\t0.3103\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5940902277722075\n", + "CNMC 2.0988 +- 0.5314 q0: 1.0497 q10: 1.4954 q20: 1.6747 q30: 1.8162 q40: 1.9078 q50: 2.0314 q60: 2.1299 q70: 2.2698 q80: 2.4594 q90: 2.8458 q100: 4.3420\n", + "one_class_0 1.9324 +- 0.3714 q0: 1.0642 q10: 1.5287 q20: 1.6460 q30: 1.7249 q40: 1.8095 q50: 1.8731 q60: 1.9535 q70: 2.0458 q80: 2.1797 q90: 2.4138 q100: 3.6869\n", + "[one_class_0 CSI 0.5941] [one_class_0 best 0.5941] \n", + "[one_class_mean CSI 0.5941] [one_class_mean best 0.5941] \n", + "0.5941\t0.5941\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharpness : 128\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --sharpness_factor 128 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor128.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "id": "d7d86bff", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0077\t0.0039\t0.0057\t0.0045\n", + "weight_shi:\t-0.0543\t0.1223\t0.1116\t0.1079\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.4230414273995078\n", + "CNMC 1.9898 +- 0.1931 q0: 1.3874 q10: 1.7331 q20: 1.8325 q30: 1.9150 q40: 1.9682 q50: 2.0080 q60: 2.0511 q70: 2.1020 q80: 2.1509 q90: 2.2176 q100: 2.4975\n", + "one_class_0 2.0442 +- 0.1594 q0: 1.5034 q10: 1.8378 q20: 1.9120 q30: 1.9673 q40: 2.0118 q50: 2.0508 q60: 2.0920 q70: 2.1314 q80: 2.1747 q90: 2.2473 q100: 2.5530\n", + "[one_class_0 CSI 0.4230] [one_class_0 best 0.4230] \n", + "[one_class_mean CSI 0.4230] [one_class_mean best 0.4230] \n", + "0.4230\t0.4230\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharpness : 100\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --sharpness_factor 100 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor100.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "id": "d60476b1", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0071\t0.0035\t0.0055\t0.0033\n", + "weight_shi:\t-0.7731\t-0.4426\t3.0750\t-1.0296\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.3861885880958892\n", + "CNMC 1.8504 +- 1.7281 q0: -3.9915 q10: -0.3957 q20: 0.3727 q30: 1.0483 q40: 1.6967 q50: 2.0760 q60: 2.4763 q70: 2.8382 q80: 3.3079 q90: 3.8465 q100: 5.6520\n", + "one_class_0 2.5429 +- 1.3399 q0: -4.5019 q10: 0.9296 q20: 1.4679 q30: 1.9042 q40: 2.2539 q50: 2.5979 q60: 2.9289 q70: 3.3053 q80: 3.6585 q90: 4.1959 q100: 6.6848\n", + "[one_class_0 CSI 0.3862] [one_class_0 best 0.3862] \n", + "[one_class_mean CSI 0.3862] [one_class_mean best 0.3862] \n", + "0.3862\t0.3862\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharpness : 80\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --sharpness_factor 80 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor80.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "id": "b367669a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0083\t0.0112\t0.0076\t0.0136\n", + "weight_shi:\t-0.0567\t0.1140\t0.0842\t0.1028\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.376367240907848\n", + "CNMC 1.9968 +- 0.0768 q0: 1.7939 q10: 1.9005 q20: 1.9341 q30: 1.9569 q40: 1.9751 q50: 1.9937 q60: 2.0157 q70: 2.0367 q80: 2.0629 q90: 2.0964 q100: 2.2761\n", + "one_class_0 2.0289 +- 0.0677 q0: 1.8223 q10: 1.9439 q20: 1.9701 q30: 1.9928 q40: 2.0111 q50: 2.0279 q60: 2.0448 q70: 2.0625 q80: 2.0815 q90: 2.1167 q100: 2.3343\n", + "[one_class_0 CSI 0.3764] [one_class_0 best 0.3764] \n", + "[one_class_mean CSI 0.3764] [one_class_mean best 0.3764] \n", + "0.3764\t0.3764\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharpness : 64\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --sharpness_factor 64 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor64.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "id": "dce638a8", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0150\t0.0058\t0.0129\t0.0054\n", + "weight_shi:\t-0.0982\t0.1165\t0.1059\t0.0929\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.4102051874132815\n", + "CNMC 1.9836 +- 0.2025 q0: 1.4858 q10: 1.7023 q20: 1.8012 q30: 1.8770 q40: 1.9428 q50: 2.0013 q60: 2.0495 q70: 2.0936 q80: 2.1462 q90: 2.2425 q100: 2.5144\n", + "one_class_0 2.0499 +- 0.1863 q0: 1.5414 q10: 1.8119 q20: 1.8950 q30: 1.9488 q40: 1.9984 q50: 2.0487 q60: 2.0962 q70: 2.1512 q80: 2.2168 q90: 2.2846 q100: 2.6101\n", + "[one_class_0 CSI 0.4102] [one_class_0 best 0.4102] \n", + "[one_class_mean CSI 0.4102] [one_class_mean best 0.4102] \n", + "0.4102\t0.4102\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharpness : 32\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --sharpness_factor 32 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor32.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "id": "28387a64", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0088\t0.0070\t0.0079\t0.0070\n", + "weight_shi:\t-0.0517\t0.1752\t0.1985\t0.2796\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.4567964532758079\n", + "CNMC 2.0252 +- 0.2349 q0: 1.3683 q10: 1.7184 q20: 1.8035 q30: 1.8906 q40: 1.9540 q50: 2.0153 q60: 2.0821 q70: 2.1619 q80: 2.2506 q90: 2.3384 q100: 2.6113\n", + "one_class_0 2.0617 +- 0.2072 q0: 1.5917 q10: 1.7983 q20: 1.8819 q30: 1.9423 q40: 1.9904 q50: 2.0477 q60: 2.1076 q70: 2.1646 q80: 2.2533 q90: 2.3431 q100: 2.6298\n", + "[one_class_0 CSI 0.4568] [one_class_0 best 0.4568] \n", + "[one_class_mean CSI 0.4568] [one_class_mean best 0.4568] \n", + "0.4568\t0.4568\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharpness : 16\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --sharpness_factor 16 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor16.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "id": "424cd4b8", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0167\t0.0112\t0.0119\t0.0098\n", + "weight_shi:\t-0.1065\t0.1467\t0.1401\t0.1203\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.4809307872269316\n", + "CNMC 2.0080 +- 0.2589 q0: 1.4062 q10: 1.6813 q20: 1.7879 q30: 1.8753 q40: 1.9309 q50: 1.9964 q60: 2.0601 q70: 2.1217 q80: 2.2165 q90: 2.3636 q100: 3.0258\n", + "one_class_0 2.0288 +- 0.2475 q0: 1.4597 q10: 1.7282 q20: 1.8162 q30: 1.8799 q40: 1.9422 q50: 1.9987 q60: 2.0671 q70: 2.1423 q80: 2.2336 q90: 2.3568 q100: 3.5008\n", + "[one_class_0 CSI 0.4809] [one_class_0 best 0.4809] \n", + "[one_class_mean CSI 0.4809] [one_class_mean best 0.4809] \n", + "0.4809\t0.4809\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharpness : 8\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --sharpness_factor 8 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor8.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "id": "b30452ce", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0033\t0.0030\t0.0025\t0.0028\n", + "weight_shi:\t-0.0191\t0.0502\t0.0455\t0.0473\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.4414099292073041\n", + "CNMC 1.9717 +- 0.1514 q0: 1.4512 q10: 1.7885 q20: 1.8443 q30: 1.8923 q40: 1.9275 q50: 1.9674 q60: 2.0006 q70: 2.0525 q80: 2.0992 q90: 2.1705 q100: 2.4112\n", + "one_class_0 1.9985 +- 0.1198 q0: 1.6117 q10: 1.8478 q20: 1.9013 q30: 1.9366 q40: 1.9671 q50: 1.9967 q60: 2.0257 q70: 2.0616 q80: 2.0994 q90: 2.1570 q100: 2.3699\n", + "[one_class_0 CSI 0.4414] [one_class_0 best 0.4414] \n", + "[one_class_mean CSI 0.4414] [one_class_mean best 0.4414] \n", + "0.4414\t0.4414\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharpness : 5\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --sharpness_factor 5 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor5.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 34, + "id": "61511c88", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0048\t0.0048\t0.0033\t0.0040\n", + "weight_shi:\t-0.0216\t0.0573\t0.0466\t0.0474\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.3330541883146477\n", + "CNMC 1.9613 +- 0.1613 q0: 1.4001 q10: 1.7205 q20: 1.8317 q30: 1.8884 q40: 1.9420 q50: 1.9740 q60: 2.0204 q70: 2.0591 q80: 2.1023 q90: 2.1507 q100: 2.3497\n", + "one_class_0 2.0506 +- 0.1298 q0: 1.5709 q10: 1.8729 q20: 1.9474 q30: 2.0023 q40: 2.0388 q50: 2.0668 q60: 2.0970 q70: 2.1272 q80: 2.1610 q90: 2.1981 q100: 2.4918\n", + "[one_class_0 CSI 0.3331] [one_class_0 best 0.3331] \n", + "[one_class_mean CSI 0.3331] [one_class_mean best 0.3331] \n", + "0.3331\t0.3331\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharpness : 4\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --sharpness_factor 4 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor4.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 35, + "id": "9aa87298", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0025\t0.0027\t0.0023\t0.0026\n", + "weight_shi:\t-0.0247\t0.0743\t0.0809\t0.0786\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.32097499468295204\n", + "CNMC 1.9756 +- 0.0921 q0: 1.6810 q10: 1.8631 q20: 1.9010 q30: 1.9281 q40: 1.9493 q50: 1.9693 q60: 1.9932 q70: 2.0205 q80: 2.0561 q90: 2.0987 q100: 2.2881\n", + "one_class_0 2.0291 +- 0.0715 q0: 1.8113 q10: 1.9469 q20: 1.9690 q30: 1.9889 q40: 2.0060 q50: 2.0221 q60: 2.0372 q70: 2.0598 q80: 2.0847 q90: 2.1253 q100: 2.3137\n", + "[one_class_0 CSI 0.3210] [one_class_0 best 0.3210] \n", + "[one_class_mean CSI 0.3210] [one_class_mean best 0.3210] \n", + "0.3210\t0.3210\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharpness : 3\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --sharpness_factor 3 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor3.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 36, + "id": "ed261f4c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0016\t0.0017\t0.0018\t0.0018\n", + "weight_shi:\t-0.0191\t0.0634\t0.0692\t0.0681\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5479179959286604\n", + "CNMC 2.0439 +- 0.2491 q0: 1.3671 q10: 1.7149 q20: 1.8289 q30: 1.9114 q40: 1.9969 q50: 2.0563 q60: 2.1157 q70: 2.1758 q80: 2.2482 q90: 2.3551 q100: 2.6738\n", + "one_class_0 2.0114 +- 0.1906 q0: 1.4681 q10: 1.7626 q20: 1.8467 q30: 1.9083 q40: 1.9608 q50: 2.0043 q60: 2.0576 q70: 2.1069 q80: 2.1710 q90: 2.2558 q100: 2.5480\n", + "[one_class_0 CSI 0.5479] [one_class_0 best 0.5479] \n", + "[one_class_mean CSI 0.5479] [one_class_mean best 0.5479] \n", + "0.5479\t0.5479\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharpness : 2\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --sharpness_factor 2 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor2.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "markdown", + "id": "3f347111", + "metadata": {}, + "source": [ + "## randpers" + ] + }, + { + "cell_type": "code", + "execution_count": 37, + "id": "6954e9f3", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0027\t0.0028\t0.0028\t0.0029\n", + "weight_shi:\t0.0396\t-0.1267\t-0.1178\t-0.1344\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.35701318627897793\n", + "CNMC 1.9933 +- 0.0426 q0: 1.7637 q10: 1.9470 q20: 1.9683 q30: 1.9825 q40: 1.9896 q50: 1.9981 q60: 2.0059 q70: 2.0119 q80: 2.0228 q90: 2.0391 q100: 2.1039\n", + "one_class_0 2.0107 +- 0.0300 q0: 1.8398 q10: 1.9753 q20: 1.9909 q30: 2.0006 q40: 2.0073 q50: 2.0134 q60: 2.0197 q70: 2.0245 q80: 2.0323 q90: 2.0429 q100: 2.0991\n", + "[one_class_0 CSI 0.3570] [one_class_0 best 0.3570] \n", + "[one_class_mean CSI 0.3570] [one_class_mean best 0.3570] \n", + "0.3570\t0.3570\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : randpers\n", + "# crop : 0.08\n", + "# randpers : 0.95\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --distortion_scale 0.95 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type randpers --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/randpers/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_randpers_resize_factor0.08_color_dist0.5_distortion_scale0.95_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 38, + "id": "7ef390e9", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0079\t0.0098\t0.0115\t0.0104\n", + "weight_shi:\t-0.2285\t-6.8399\t0.4918\t0.3229\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5641122049038374\n", + "CNMC 1.9712 +- 0.8313 q0: -4.4253 q10: 0.9802 q20: 1.5045 q30: 1.8002 q40: 1.9917 q50: 2.1173 q60: 2.2353 q70: 2.3608 q80: 2.5048 q90: 2.7707 q100: 4.2407\n", + "one_class_0 1.9180 +- 0.6218 q0: -3.1624 q10: 1.2584 q20: 1.5541 q30: 1.7231 q40: 1.8312 q50: 1.9474 q60: 2.0646 q70: 2.2017 q80: 2.3455 q90: 2.6130 q100: 4.2616\n", + "[one_class_0 CSI 0.5641] [one_class_0 best 0.5641] \n", + "[one_class_mean CSI 0.5641] [one_class_mean best 0.5641] \n", + "0.5641\t0.5641\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : randpers\n", + "# crop : 0.08\n", + "# randpers : 0.9\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --distortion_scale 0.9 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type randpers --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/randpers/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_randpers_resize_factor0.08_color_dist0.5_distortion_scale0.9_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 39, + "id": "1205e882", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0034\t0.0047\t0.0029\t0.0041\n", + "weight_shi:\t0.1303\t-0.3875\t-0.1777\t-0.3820\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.34714879632161555\n", + "CNMC 1.9226 +- 0.3041 q0: 0.5031 q10: 1.4958 q20: 1.7161 q30: 1.8443 q40: 1.9245 q50: 1.9879 q60: 2.0373 q70: 2.0889 q80: 2.1566 q90: 2.2287 q100: 2.5521\n", + "one_class_0 2.0685 +- 0.2043 q0: 1.1682 q10: 1.8203 q20: 1.9473 q30: 2.0089 q40: 2.0513 q50: 2.0872 q60: 2.1284 q70: 2.1701 q80: 2.2162 q90: 2.2834 q100: 2.6224\n", + "[one_class_0 CSI 0.3471] [one_class_0 best 0.3471] \n", + "[one_class_mean CSI 0.3471] [one_class_mean best 0.3471] \n", + "0.3471\t0.3471\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : randpers\n", + "# crop : 0.08\n", + "# randpers : 0.85\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --distortion_scale 0.85 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type randpers --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/randpers/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_randpers_resize_factor0.08_color_dist0.5_distortion_scale0.85_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 40, + "id": "8887546c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0020\t0.0037\t0.0026\t0.0039\n", + "weight_shi:\t0.1393\t2.5299\t-1.4218\t1.2437\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.6913884078226435\n", + "CNMC 2.0752 +- 0.3123 q0: 0.7320 q10: 1.7083 q20: 1.8297 q30: 1.9220 q40: 2.0047 q50: 2.0690 q60: 2.1391 q70: 2.2162 q80: 2.3010 q90: 2.4549 q100: 3.2842\n", + "one_class_0 1.8917 +- 0.2289 q0: 0.7422 q10: 1.6150 q20: 1.7197 q30: 1.7818 q40: 1.8380 q50: 1.8923 q60: 1.9400 q70: 1.9926 q80: 2.0616 q90: 2.1731 q100: 2.9070\n", + "[one_class_0 CSI 0.6914] [one_class_0 best 0.6914] \n", + "[one_class_mean CSI 0.6914] [one_class_mean best 0.6914] \n", + "0.6914\t0.6914\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : randpers\n", + "# crop : 0.08\n", + "# randpers : 0.8\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --distortion_scale 0.8 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type randpers --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/randpers/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_randpers_resize_factor0.08_color_dist0.5_distortion_scale0.8_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 41, + "id": "b65d2295", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0080\t0.0036\t0.0038\t0.0054\n", + "weight_shi:\t-0.0669\t-0.5647\t-0.7888\t0.5885\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.35124039133473095\n", + "CNMC 1.9592 +- 0.1769 q0: 1.1540 q10: 1.7506 q20: 1.8287 q30: 1.8809 q40: 1.9227 q50: 1.9622 q60: 1.9930 q70: 2.0505 q80: 2.0959 q90: 2.1715 q100: 2.7656\n", + "one_class_0 2.0376 +- 0.1423 q0: 1.0198 q10: 1.8838 q20: 1.9409 q30: 1.9806 q40: 2.0144 q50: 2.0407 q60: 2.0708 q70: 2.0989 q80: 2.1348 q90: 2.1967 q100: 2.6480\n", + "[one_class_0 CSI 0.3512] [one_class_0 best 0.3512] \n", + "[one_class_mean CSI 0.3512] [one_class_mean best 0.3512] \n", + "0.3512\t0.3512\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : randpers\n", + "# crop : 0.08\n", + "# randpers : 0.75\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --distortion_scale 0.75 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type randpers --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/randpers/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_randpers_resize_factor0.08_color_dist0.5_distortion_scale0.75_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 42, + "id": "2a818378", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0034\t0.0037\t0.0024\t0.0028\n", + "weight_shi:\t0.5181\t-2.5612\t-0.2828\t-0.4473\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.3594818156959256\n", + "CNMC 1.5977 +- 1.3081 q0: -2.9399 q10: -0.2131 q20: 0.5889 q30: 1.0500 q40: 1.4828 q50: 1.7567 q60: 2.0337 q70: 2.3642 q80: 2.6741 q90: 3.1919 q100: 4.5132\n", + "one_class_0 2.2261 +- 1.0824 q0: -1.7685 q10: 0.7646 q20: 1.4013 q30: 1.7643 q40: 2.0621 q50: 2.3193 q60: 2.5929 q70: 2.8838 q80: 3.1160 q90: 3.5429 q100: 5.0474\n", + "[one_class_0 CSI 0.3595] [one_class_0 best 0.3595] \n", + "[one_class_mean CSI 0.3595] [one_class_mean best 0.3595] \n", + "0.3595\t0.3595\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : randpers\n", + "# crop : 0.08\n", + "# randpers : 0.6\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --distortion_scale 0.6 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type randpers --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/randpers/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_randpers_resize_factor0.08_color_dist0.5_distortion_scale0.6_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 43, + "id": "09a15dda", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0043\t0.0115\t0.0075\t0.0087\n", + "weight_shi:\t12.1609\t0.3968\t2.0101\t0.4812\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.36039204367068733\n", + "CNMC 1.4531 +- 7.5510 q0: -28.7074 q10: -8.1564 q20: -3.7419 q30: -1.3692 q40: 0.5754 q50: 2.0721 q60: 3.7007 q70: 5.1638 q80: 7.3805 q90: 10.4019 q100: 20.0002\n", + "one_class_0 4.8084 +- 5.1144 q0: -14.2655 q10: -1.2285 q20: 1.1701 q30: 2.5734 q40: 3.7262 q50: 4.8972 q60: 5.9430 q70: 7.0902 q80: 8.7584 q90: 11.3302 q100: 19.8412\n", + "[one_class_0 CSI 0.3604] [one_class_0 best 0.3604] \n", + "[one_class_mean CSI 0.3604] [one_class_mean best 0.3604] \n", + "0.3604\t0.3604\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : randpers\n", + "# crop : 0.08\n", + "# randpers : 0.3\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --distortion_scale 0.3 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type randpers --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/randpers/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_randpers_resize_factor0.08_color_dist0.5_distortion_scale0.3_one_class_1/last.model\"" + ] + }, + { + "cell_type": "markdown", + "id": "47013663", + "metadata": {}, + "source": [ + "## blur" + ] + }, + { + "cell_type": "code", + "execution_count": 134, + "id": "958ecba3", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0038\t0.0072\t0.0039\t0.0044\n", + "weight_shi:\t-0.1658\t0.1714\t0.2799\t0.2838\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.4812004375170905\n", + "CNMC 1.9384 +- 0.2410 q0: 1.5304 q10: 1.7013 q20: 1.7508 q30: 1.7894 q40: 1.8299 q50: 1.8827 q60: 1.9243 q70: 1.9999 q80: 2.0841 q90: 2.2637 q100: 2.8485\n", + "one_class_0 1.9219 +- 0.1651 q0: 1.5451 q10: 1.7386 q20: 1.7816 q30: 1.8174 q40: 1.8548 q50: 1.8945 q60: 1.9407 q70: 1.9846 q80: 2.0549 q90: 2.1446 q100: 2.6371\n", + "[one_class_0 CSI 0.4812] [one_class_0 best 0.4812] \n", + "[one_class_mean CSI 0.4812] [one_class_mean best 0.4812] \n", + "0.4812\t0.4812\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 180\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 180 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.5_blur_sigma180.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 135, + "id": "a3f7ef72", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0025\t0.0058\t0.0024\t0.0029\n", + "weight_shi:\t-0.0568\t0.0831\t0.1701\t0.1303\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.6192537902956279\n", + "CNMC 2.0247 +- 0.0991 q0: 1.8346 q10: 1.9155 q20: 1.9404 q30: 1.9652 q40: 1.9879 q50: 2.0067 q60: 2.0291 q70: 2.0625 q80: 2.1019 q90: 2.1563 q100: 2.4786\n", + "one_class_0 1.9853 +- 0.0765 q0: 1.7917 q10: 1.9064 q20: 1.9276 q30: 1.9429 q40: 1.9598 q50: 1.9743 q60: 1.9887 q70: 2.0055 q80: 2.0343 q90: 2.0845 q100: 2.3701\n", + "[one_class_0 CSI 0.6193] [one_class_0 best 0.6193] \n", + "[one_class_mean CSI 0.6193] [one_class_mean best 0.6193] \n", + "0.6193\t0.6193\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 120\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 120 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.5_blur_sigma120.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 147, + "id": "2f2a8808", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0030\t0.0043\t0.0026\t0.0028\n", + "weight_shi:\t-0.0889\t0.1756\t0.3138\t0.2610\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5952460527248604\n", + "CNMC 2.0008 +- 0.0139 q0: 1.9745 q10: 1.9866 q20: 1.9900 q30: 1.9930 q40: 1.9955 q50: 1.9986 q60: 2.0019 q70: 2.0048 q80: 2.0099 q90: 2.0166 q100: 2.0896\n", + "one_class_0 1.9964 +- 0.0119 q0: 1.9575 q10: 1.9833 q20: 1.9872 q30: 1.9899 q40: 1.9925 q50: 1.9948 q60: 1.9973 q70: 2.0007 q80: 2.0051 q90: 2.0119 q100: 2.0732\n", + "[one_class_0 CSI 0.5952] [one_class_0 best 0.5952] \n", + "[one_class_mean CSI 0.5952] [one_class_mean best 0.5952] \n", + "0.5952\t0.5952\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 110\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 110 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.5_blur_sigma110.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 136, + "id": "08a6959c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0067\t0.0043\t0.0047\t0.0049\n", + "weight_shi:\t-0.0583\t0.0915\t0.2051\t0.1700\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.633952896018797\n", + "CNMC 1.9904 +- 0.1692 q0: 1.5494 q10: 1.7849 q20: 1.8442 q30: 1.9005 q40: 1.9430 q50: 1.9786 q60: 2.0203 q70: 2.0602 q80: 2.1209 q90: 2.2064 q100: 2.5511\n", + "one_class_0 1.9167 +- 0.1098 q0: 1.5787 q10: 1.7796 q20: 1.8329 q30: 1.8646 q40: 1.8922 q50: 1.9150 q60: 1.9402 q70: 1.9662 q80: 1.9991 q90: 2.0546 q100: 2.2965\n", + "[one_class_0 CSI 0.6340] [one_class_0 best 0.6340] \n", + "[one_class_mean CSI 0.6340] [one_class_mean best 0.6340] \n", + "0.6340\t0.6340\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 105\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 105 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.5_blur_sigma105.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 137, + "id": "a4a4eee1", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0091\t0.0045\t0.0091\t0.0070\n", + "weight_shi:\t-0.0676\t0.0975\t0.1849\t0.1972\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5476483456385015\n", + "CNMC 1.9769 +- 0.2661 q0: 1.3171 q10: 1.6312 q20: 1.7272 q30: 1.8319 q40: 1.9118 q50: 1.9776 q60: 2.0460 q70: 2.1125 q80: 2.1924 q90: 2.3278 q100: 2.8173\n", + "one_class_0 1.9268 +- 0.2241 q0: 1.2928 q10: 1.6165 q20: 1.7138 q30: 1.8152 q40: 1.8859 q50: 1.9465 q60: 2.0048 q70: 2.0580 q80: 2.1179 q90: 2.1973 q100: 2.5704\n", + "[one_class_0 CSI 0.5476] [one_class_0 best 0.5476] \n", + "[one_class_mean CSI 0.5476] [one_class_mean best 0.5476] \n", + "0.5476\t0.5476\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 100\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 100 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.5_blur_sigma100.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 138, + "id": "8f0ceb15", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0018\t0.0028\t0.0016\t0.0018\n", + "weight_shi:\t-0.2029\t0.1970\t1.0597\t0.4185\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5393334953767002\n", + "CNMC 1.9749 +- 0.3046 q0: 1.1986 q10: 1.5960 q20: 1.7089 q30: 1.8060 q40: 1.8905 q50: 1.9696 q60: 2.0356 q70: 2.1233 q80: 2.2217 q90: 2.3752 q100: 3.1061\n", + "one_class_0 1.9275 +- 0.2387 q0: 1.1897 q10: 1.6222 q20: 1.7266 q30: 1.8084 q40: 1.8732 q50: 1.9343 q60: 1.9958 q70: 2.0464 q80: 2.1172 q90: 2.2327 q100: 2.6893\n", + "[one_class_0 CSI 0.5393] [one_class_0 best 0.5393] \n", + "[one_class_mean CSI 0.5393] [one_class_mean best 0.5393] \n", + "0.5393\t0.5393\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 95\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 95 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.5_blur_sigma95.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 139, + "id": "7d89e279", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0075\t0.0041\t0.0071\t0.0059\n", + "weight_shi:\t-0.0360\t0.0714\t0.1079\t0.0991\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5292488277175179\n", + "CNMC 1.9774 +- 0.1920 q0: 1.5068 q10: 1.7357 q20: 1.8106 q30: 1.8745 q40: 1.9206 q50: 1.9753 q60: 2.0197 q70: 2.0694 q80: 2.1299 q90: 2.2192 q100: 2.6272\n", + "one_class_0 1.9507 +- 0.1545 q0: 1.4789 q10: 1.7436 q20: 1.8103 q30: 1.8750 q40: 1.9239 q50: 1.9683 q60: 2.0050 q70: 2.0411 q80: 2.0780 q90: 2.1379 q100: 2.3968\n", + "[one_class_0 CSI 0.5292] [one_class_0 best 0.5292] \n", + "[one_class_mean CSI 0.5292] [one_class_mean best 0.5292] \n", + "0.5292\t0.5292\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 90\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 90 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.5_blur_sigma90.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 140, + "id": "ebb47e6b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0050\t0.0117\t0.0038\t0.0049\n", + "weight_shi:\t-0.2427\t0.2328\t1.3692\t0.7248\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.557083573866456\n", + "CNMC 2.0005 +- 0.1926 q0: 1.3343 q10: 1.8135 q20: 1.8631 q30: 1.9060 q40: 1.9410 q50: 1.9699 q60: 2.0110 q70: 2.0589 q80: 2.1204 q90: 2.2186 q100: 3.1284\n", + "one_class_0 1.9634 +- 0.1487 q0: 1.4064 q10: 1.8025 q20: 1.8544 q30: 1.8886 q40: 1.9192 q50: 1.9463 q60: 1.9749 q70: 2.0096 q80: 2.0594 q90: 2.1522 q100: 2.5877\n", + "[one_class_0 CSI 0.5571] [one_class_0 best 0.5571] \n", + "[one_class_mean CSI 0.5571] [one_class_mean best 0.5571] \n", + "0.5571\t0.5571\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 80\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 80 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.5_blur_sigma80.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 141, + "id": "7d6e0050", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0062\t0.0053\t0.0066\t0.0062\n", + "weight_shi:\t-0.0434\t0.0771\t0.1221\t0.1065\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5821104122990916\n", + "CNMC 1.9984 +- 0.0869 q0: 1.7841 q10: 1.8832 q20: 1.9216 q30: 1.9505 q40: 1.9768 q50: 1.9991 q60: 2.0230 q70: 2.0443 q80: 2.0710 q90: 2.1126 q100: 2.2334\n", + "one_class_0 1.9740 +- 0.0685 q0: 1.7594 q10: 1.8780 q20: 1.9143 q30: 1.9428 q40: 1.9641 q50: 1.9808 q60: 1.9973 q70: 2.0131 q80: 2.0305 q90: 2.0551 q100: 2.1770\n", + "[one_class_0 CSI 0.5821] [one_class_0 best 0.5821] \n", + "[one_class_mean CSI 0.5821] [one_class_mean best 0.5821] \n", + "0.5821\t0.5821\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 60\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 60 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.5_blur_sigma60.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 142, + "id": "df7becce", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0045\t0.0051\t0.0033\t0.0041\n", + "weight_shi:\t-0.1512\t0.2745\t0.6510\t0.4026\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.6940311072625811\n", + "CNMC 2.0096 +- 0.0626 q0: 1.8488 q10: 1.9389 q20: 1.9597 q30: 1.9741 q40: 1.9884 q50: 2.0031 q60: 2.0185 q70: 2.0342 q80: 2.0520 q90: 2.0853 q100: 2.2770\n", + "one_class_0 1.9718 +- 0.0446 q0: 1.8450 q10: 1.9219 q20: 1.9383 q30: 1.9495 q40: 1.9584 q50: 1.9680 q60: 1.9769 q70: 1.9880 q80: 2.0015 q90: 2.0244 q100: 2.2023\n", + "[one_class_0 CSI 0.6940] [one_class_0 best 0.6940] \n", + "[one_class_mean CSI 0.6940] [one_class_mean best 0.6940] \n", + "0.6940\t0.6940\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 40\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 40 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.5_blur_sigma40.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 143, + "id": "b7036b42", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0017\t0.0020\t0.0015\t0.0016\n", + "weight_shi:\t0.0317\t-0.1164\t-0.0840\t-0.0812\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.39877986408612603\n", + "CNMC 1.9759 +- 0.1316 q0: 1.4471 q10: 1.8010 q20: 1.8983 q30: 1.9382 q40: 1.9698 q50: 1.9932 q60: 2.0241 q70: 2.0528 q80: 2.0799 q90: 2.1197 q100: 2.2278\n", + "one_class_0 2.0210 +- 0.0942 q0: 1.5614 q10: 1.8968 q20: 1.9555 q30: 1.9874 q40: 2.0148 q50: 2.0364 q60: 2.0551 q70: 2.0753 q80: 2.0963 q90: 2.1246 q100: 2.2320\n", + "[one_class_0 CSI 0.3988] [one_class_0 best 0.3988] \n", + "[one_class_mean CSI 0.3988] [one_class_mean best 0.3988] \n", + "0.3988\t0.3988\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 20\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 20 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.5_blur_sigma20.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 144, + "id": "e7b68654", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0020\t0.0035\t0.0027\t0.0027\n", + "weight_shi:\t0.1013\t-0.5641\t-0.5419\t-0.3880\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.33394542683235595\n", + "CNMC 1.9739 +- 0.2803 q0: 1.2318 q10: 1.5933 q20: 1.7359 q30: 1.8196 q40: 1.9141 q50: 1.9863 q60: 2.0640 q70: 2.1425 q80: 2.2339 q90: 2.3203 q100: 2.6037\n", + "one_class_0 2.1309 +- 0.1875 q0: 1.4830 q10: 1.8910 q20: 1.9714 q30: 2.0347 q40: 2.0807 q50: 2.1311 q60: 2.1754 q70: 2.2372 q80: 2.3011 q90: 2.3743 q100: 2.6831\n", + "[one_class_0 CSI 0.3339] [one_class_0 best 0.3339] \n", + "[one_class_mean CSI 0.3339] [one_class_mean best 0.3339] \n", + "0.3339\t0.3339\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# shift_tr : blur\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# res : 450px\n", + "# crop : 0.08\n", + "# blur_sigma : 6\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 6 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.5_blur_sigma6.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 145, + "id": "5a20ddb8", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0029\t0.0062\t0.0034\t0.0030\n", + "weight_shi:\t0.2169\t2.1291\t-0.6997\t-0.6317\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5895074388033098\n", + "CNMC 2.0596 +- 0.2877 q0: 1.0222 q10: 1.6929 q20: 1.8375 q30: 1.9167 q40: 2.0096 q50: 2.0851 q60: 2.1435 q70: 2.2065 q80: 2.2951 q90: 2.4044 q100: 3.0604\n", + "one_class_0 1.9839 +- 0.2326 q0: 1.1067 q10: 1.6760 q20: 1.7908 q30: 1.8756 q40: 1.9429 q50: 2.0082 q60: 2.0584 q70: 2.1101 q80: 2.1732 q90: 2.2584 q100: 3.0116\n", + "[one_class_0 CSI 0.5895] [one_class_0 best 0.5895] \n", + "[one_class_mean CSI 0.5895] [one_class_mean best 0.5895] \n", + "0.5895\t0.5895\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 4\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 4 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.5_blur_sigma4.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 146, + "id": "f014e06d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0047\t0.0065\t0.0046\t0.0045\n", + "weight_shi:\t0.2645\t-12.1918\t-1.1354\t-0.9111\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.43248488439218546\n", + "CNMC 1.5307 +- 2.2981 q0: -6.2860 q10: -1.3964 q20: -0.1077 q30: 0.5774 q40: 1.2354 q50: 1.7091 q60: 2.1039 q70: 2.7068 q80: 3.3782 q90: 4.4476 q100: 6.9377\n", + "one_class_0 2.0424 +- 1.5916 q0: -5.1678 q10: 0.0924 q20: 0.8505 q30: 1.3834 q40: 1.7445 q50: 2.1476 q60: 2.4484 q70: 2.8574 q80: 3.3216 q90: 3.9483 q100: 6.4052\n", + "[one_class_0 CSI 0.4325] [one_class_0 best 0.4325] \n", + "[one_class_mean CSI 0.4325] [one_class_mean best 0.4325] \n", + "0.4325\t0.4325\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 3\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 3 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.5_blur_sigma3.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 148, + "id": "469197e2", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0083\t0.0100\t0.0115\t0.0075\n", + "weight_shi:\t2.4798\t0.7962\t-4.3631\t-2.5771\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.37173128145920054\n", + "CNMC 1.8398 +- 0.7107 q0: -1.6740 q10: 0.9741 q20: 1.3853 q30: 1.6153 q40: 1.8033 q50: 1.9486 q60: 2.1053 q70: 2.2304 q80: 2.3601 q90: 2.5741 q100: 3.5645\n", + "one_class_0 2.1409 +- 0.5323 q0: -0.7020 q10: 1.4665 q20: 1.7704 q30: 1.9274 q40: 2.0616 q50: 2.1799 q60: 2.2918 q70: 2.4188 q80: 2.5430 q90: 2.7544 q100: 3.8279\n", + "[one_class_0 CSI 0.3717] [one_class_0 best 0.3717] \n", + "[one_class_mean CSI 0.3717] [one_class_mean best 0.3717] \n", + "0.3717\t0.3717\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 2\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 2 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.5_blur_sigma2.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 149, + "id": "b8ccee0f", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0078\t0.0116\t0.0095\t0.0106\n", + "weight_shi:\t0.1768\t-0.5198\t-0.4439\t-0.3696\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.3392225969475081\n", + "CNMC 1.9808 +- 0.1438 q0: 1.5735 q10: 1.8230 q20: 1.8637 q30: 1.8908 q40: 1.9230 q50: 1.9577 q60: 1.9928 q70: 2.0435 q80: 2.1120 q90: 2.1900 q100: 2.4139\n", + "one_class_0 2.0502 +- 0.1152 q0: 1.7554 q10: 1.9134 q20: 1.9501 q30: 1.9799 q40: 2.0064 q50: 2.0376 q60: 2.0668 q70: 2.1043 q80: 2.1520 q90: 2.2170 q100: 2.4357\n", + "[one_class_0 CSI 0.3392] [one_class_0 best 0.3392] \n", + "[one_class_mean CSI 0.3392] [one_class_mean best 0.3392] \n", + "0.3392\t0.3392\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 1.5\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 1.5 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.5_blur_sigma1.5_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 150, + "id": "3ba56d85", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0021\t0.0031\t0.0026\t0.0026\n", + "weight_shi:\t0.3756\t9.2614\t-0.9536\t-0.8326\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.6796440616169901\n", + "CNMC 2.2897 +- 0.8188 q0: -0.1330 q10: 1.2176 q20: 1.6025 q30: 1.8640 q40: 2.0833 q50: 2.2961 q60: 2.5135 q70: 2.7109 q80: 2.9502 q90: 3.3024 q100: 4.9905\n", + "one_class_0 1.8212 +- 0.6476 q0: -0.7580 q10: 1.0215 q20: 1.3188 q30: 1.4821 q40: 1.6494 q50: 1.7908 q60: 1.9457 q70: 2.1147 q80: 2.3124 q90: 2.6421 q100: 4.3585\n", + "[one_class_0 CSI 0.6796] [one_class_0 best 0.6796] \n", + "[one_class_mean CSI 0.6796] [one_class_mean best 0.6796] \n", + "0.6796\t0.6796\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 1\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 1 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.5_blur_sigma1.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "markdown", + "id": "9fd03e0e", + "metadata": {}, + "source": [ + "## other transformations" + ] + }, + { + "cell_type": "code", + "execution_count": 151, + "id": "beda234d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0022\t0.0048\t0.0029\t0.0028\n", + "weight_shi:\t-3.2909\t-2.8657\t12.5482\t8.7034\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5285322922047013\n", + "CNMC 2.0914 +- 0.5227 q0: 0.3459 q10: 1.3730 q20: 1.6394 q30: 1.8216 q40: 1.9820 q50: 2.1814 q60: 2.2788 q70: 2.3999 q80: 2.5247 q90: 2.7229 q100: 3.6842\n", + "one_class_0 2.0687 +- 0.3844 q0: 0.9708 q10: 1.5495 q20: 1.7411 q30: 1.8523 q40: 1.9633 q50: 2.0755 q60: 2.1823 q70: 2.2948 q80: 2.4057 q90: 2.5591 q100: 3.3615\n", + "[one_class_0 CSI 0.5285] [one_class_0 best 0.5285] \n", + "[one_class_mean CSI 0.5285] [one_class_mean best 0.5285] \n", + "0.5285\t0.5285\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : rotation\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type rotation --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_rotation_resize_factor0.08_color_dist0.5_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 152, + "id": "025aedc5", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0030\t0.0034\t0.0036\t0.0040\n", + "weight_shi:\t-0.0433\t0.5499\t-0.7289\t0.1057\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.4115717953392276\n", + "CNMC 2.0156 +- 0.1922 q0: 1.5401 q10: 1.7668 q20: 1.8500 q30: 1.9196 q40: 1.9652 q50: 2.0074 q60: 2.0619 q70: 2.1144 q80: 2.1791 q90: 2.2713 q100: 2.6170\n", + "one_class_0 2.0726 +- 0.1608 q0: 1.6384 q10: 1.8757 q20: 1.9493 q30: 1.9876 q40: 2.0244 q50: 2.0556 q60: 2.0929 q70: 2.1446 q80: 2.2026 q90: 2.3007 q100: 2.7631\n", + "[one_class_0 CSI 0.4116] [one_class_0 best 0.4116] \n", + "[one_class_mean CSI 0.4116] [one_class_mean best 0.4116] \n", + "0.4116\t0.4116\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : cutperm\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type cutperm --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_cutperm_resize_factor0.08_color_dist0.5_one_class_1/last.model\"" + ] + }, + { + "cell_type": "markdown", + "id": "7def804c", + "metadata": {}, + "source": [ + "# In-Distribution = ALL" + ] + }, + { + "cell_type": "markdown", + "id": "4d826eb3", + "metadata": {}, + "source": [ + "# Combined shiftings" + ] + }, + { + "cell_type": "code", + "execution_count": 153, + "id": "ed1501cd", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0099\t0.0102\t0.0065\t0.0102\n", + "weight_shi:\t-0.2651\t0.3988\t-0.4352\t-0.9217\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.39018776775134445\n", + "CNMC 1.9696 +- 0.3172 q0: 1.0059 q10: 1.5824 q20: 1.7007 q30: 1.7852 q40: 1.8767 q50: 1.9557 q60: 2.0335 q70: 2.1270 q80: 2.2281 q90: 2.3851 q100: 3.0697\n", + "one_class_1 2.1059 +- 0.3583 q0: 1.1658 q10: 1.6595 q20: 1.8070 q30: 1.9152 q40: 2.0053 q50: 2.0849 q60: 2.1740 q70: 2.2650 q80: 2.3979 q90: 2.5818 q100: 3.4632\n", + "[one_class_1 CSI 0.3902] [one_class_1 best 0.3902] \n", + "[one_class_mean CSI 0.3902] [one_class_mean best 0.3902] \n", + "0.3902\t0.3902\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur_randpers\n", + "# crop : 0.08\n", + "# blur_sigma : 2\n", + "# randpers : 0.75\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --distortion_scale 0.75 --resize_factor 0.08 --blur_sigma 2 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur_randpers --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_randpers_resize_factor0.08_color_dist0.5_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 154, + "id": "b471436b", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0067\t0.0129\t0.0065\t0.0086\n", + "weight_shi:\t-0.0850\t0.2249\t0.1729\t0.1702\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.4909736780805963\n", + "CNMC 2.1838 +- 0.3670 q0: 0.9506 q10: 1.6677 q20: 1.8649 q30: 2.0015 q40: 2.1242 q50: 2.2193 q60: 2.3160 q70: 2.4125 q80: 2.5084 q90: 2.6376 q100: 3.1795\n", + "one_class_1 2.1670 +- 0.4888 q0: 0.7892 q10: 1.4646 q20: 1.7498 q30: 1.9466 q40: 2.1070 q50: 2.2393 q60: 2.3641 q70: 2.4747 q80: 2.6032 q90: 2.7386 q100: 3.1321\n", + "[one_class_1 CSI 0.4910] [one_class_1 best 0.4910] \n", + "[one_class_mean CSI 0.4910] [one_class_mean best 0.4910] \n", + "0.4910\t0.4910\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur_sharp\n", + "# crop : 0.08\n", + "# blur_sigma : 2\n", + "# randpers : 0.75\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --sharpness_factor 5 --resize_factor 0.08 --blur_sigma 2 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur_sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_sharp_resize_factor0.08_color_dist0.5_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 155, + "id": "5c08667d", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0058\t0.0071\t0.0060\t0.0060\n", + "weight_shi:\t-0.0229\t0.0795\t0.0649\t0.0666\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.45186742320663564\n", + "CNMC 2.0290 +- 0.0922 q0: 1.7841 q10: 1.9115 q20: 1.9486 q30: 1.9769 q40: 2.0011 q50: 2.0200 q60: 2.0479 q70: 2.0788 q80: 2.1081 q90: 2.1556 q100: 2.4408\n", + "one_class_1 2.0462 +- 0.1034 q0: 1.7679 q10: 1.9185 q20: 1.9546 q30: 1.9876 q40: 2.0171 q50: 2.0410 q60: 2.0679 q70: 2.0985 q80: 2.1328 q90: 2.1914 q100: 2.3683\n", + "[one_class_1 CSI 0.4519] [one_class_1 best 0.4519] \n", + "[one_class_mean CSI 0.4519] [one_class_mean best 0.4519] \n", + "0.4519\t0.4519\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : randpers_sharp\n", + "# crop : 0.08\n", + "# blur_sigma : 2\n", + "# randpers : 0.75\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --sharpness_factor 5 --distortion_scale 0.75 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type randpers_sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_randpers_sharp_resize_factor0.08_color_dist0.5_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 156, + "id": "e1be886d", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0069\t0.0188\t0.0166\t0.0120\n", + "weight_shi:\t-0.1581\t0.1971\t0.2342\t0.3190\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.32064141322071316\n", + "CNMC 1.9454 +- 0.0810 q0: 1.7576 q10: 1.8630 q20: 1.8860 q30: 1.9012 q40: 1.9140 q50: 1.9316 q60: 1.9461 q70: 1.9640 q80: 1.9905 q90: 2.0476 q100: 2.4165\n", + "one_class_1 2.0265 +- 0.1592 q0: 1.7834 q10: 1.8887 q20: 1.9115 q30: 1.9346 q40: 1.9614 q50: 1.9884 q60: 2.0114 q70: 2.0559 q80: 2.1059 q90: 2.2091 q100: 3.1080\n", + "[one_class_1 CSI 0.3206] [one_class_1 best 0.3206] \n", + "[one_class_mean CSI 0.3206] [one_class_mean best 0.3206] \n", + "0.3206\t0.3206\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur_randpers_sharp\n", + "# crop : 0.08\n", + "# blur_sigma : 2\n", + "# randpers : 0.75\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --sharpness_factor 5 --distortion_scale 0.75 --resize_factor 0.08 --blur_sigma 2 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur_randpers_sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_randpers_sharp_resize_factor0.08_color_dist0.5_one_class_0/last.model\"" + ] + }, + { + "cell_type": "markdown", + "id": "d8cd9c5a", + "metadata": {}, + "source": [ + "# Rotation" + ] + }, + { + "cell_type": "code", + "execution_count": 157, + "id": "3f9748c5", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0058\t0.0091\t0.0059\t0.0061\n", + "weight_shi:\t-20.2520\t5.6794\t4.4756\t-13.8486\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.6155293247855458\n", + "CNMC 2.2941 +- 0.4851 q0: 0.6171 q10: 1.6977 q20: 1.9170 q30: 2.0537 q40: 2.1467 q50: 2.2525 q60: 2.3680 q70: 2.5004 q80: 2.6952 q90: 2.9333 q100: 4.0615\n", + "one_class_1 2.0566 +- 0.6054 q0: 0.2141 q10: 1.2573 q20: 1.5313 q30: 1.7431 q40: 1.8966 q50: 2.0426 q60: 2.2221 q70: 2.3685 q80: 2.6045 q90: 2.8399 q100: 3.9073\n", + "[one_class_1 CSI 0.6155] [one_class_1 best 0.6155] \n", + "[one_class_mean CSI 0.6155] [one_class_mean best 0.6155] \n", + "0.6155\t0.6155\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : rotation\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type rotation --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_rotation_resize_factor0.08_color_dist0.5_one_class_0/last.model\"" + ] + }, + { + "cell_type": "markdown", + "id": "ed7a3ca6", + "metadata": {}, + "source": [ + "# Cutperm" + ] + }, + { + "cell_type": "code", + "execution_count": 158, + "id": "47382eef", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0033\t0.0040\t0.0048\t0.0059\n", + "weight_shi:\t-0.0422\t-0.2956\t0.3071\t0.0913\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5637665967854647\n", + "CNMC 2.1340 +- 0.2713 q0: 1.5092 q10: 1.8054 q20: 1.8955 q30: 1.9635 q40: 2.0281 q50: 2.0900 q60: 2.1689 q70: 2.2729 q80: 2.3753 q90: 2.5306 q100: 2.8713\n", + "one_class_1 2.0681 +- 0.3216 q0: 1.3818 q10: 1.6678 q20: 1.7728 q30: 1.8582 q40: 1.9368 q50: 2.0391 q60: 2.1288 q70: 2.2616 q80: 2.3884 q90: 2.5307 q100: 2.8915\n", + "[one_class_1 CSI 0.5638] [one_class_1 best 0.5638] \n", + "[one_class_mean CSI 0.5638] [one_class_mean best 0.5638] \n", + "0.5638\t0.5638\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : cutperm\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type cutperm --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_cutperm_resize_factor0.08_color_dist0.5_one_class_0/last.model\"" + ] + }, + { + "cell_type": "markdown", + "id": "e338538b", + "metadata": {}, + "source": [ + "# Rotated Dataset 4" + ] + }, + { + "cell_type": "code", + "execution_count": 69, + "id": "18aa1694", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0089\t0.0071\t0.0082\t0.0060\n", + "weight_shi:\t-0.0826\t0.1155\t0.1144\t0.1138\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.6829627857280305\n", + "CNMC 2.0823 +- 0.1590 q0: 1.4861 q10: 1.8867 q20: 1.9512 q30: 1.9962 q40: 2.0434 q50: 2.0844 q60: 2.1284 q70: 2.1653 q80: 2.2150 q90: 2.2784 q100: 2.7066\n", + "one_class_1 1.9798 +- 0.1471 q0: 1.4589 q10: 1.7996 q20: 1.8601 q30: 1.9145 q40: 1.9503 q50: 1.9828 q60: 2.0164 q70: 2.0541 q80: 2.1007 q90: 2.1670 q100: 2.3931\n", + "[one_class_1 CSI 0.6830] [one_class_1 best 0.6830] \n", + "[one_class_mean CSI 0.6830] [one_class_mean best 0.6830] \n", + "0.6830\t0.6830\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC_ROT4\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharp : 64\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --sharpness_factor 64 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/dataset_rotated_4/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor64.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 70, + "id": "95e84b59", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0076\t0.0081\t0.0080\t0.0086\n", + "weight_shi:\t-0.1382\t1.2588\t2.1567\t0.5287\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.3485907290938738\n", + "CNMC 1.8551 +- 0.4346 q0: 0.8263 q10: 1.3471 q20: 1.5032 q30: 1.6156 q40: 1.7119 q50: 1.8134 q60: 1.9097 q70: 2.0306 q80: 2.1600 q90: 2.4047 q100: 4.4743\n", + "one_class_1 2.1133 +- 0.5033 q0: 0.9826 q10: 1.5132 q20: 1.7004 q30: 1.8184 q40: 1.9124 q50: 2.0310 q60: 2.1594 q70: 2.3078 q80: 2.5394 q90: 2.7696 q100: 4.0888\n", + "[one_class_1 CSI 0.3486] [one_class_1 best 0.3486] \n", + "[one_class_mean CSI 0.3486] [one_class_mean best 0.3486] \n", + "0.3486\t0.3486\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC_ROT4\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : randpers\n", + "# crop : 0.08\n", + "# randpers : 0.75\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --distortion_scale 0.75 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type randpers --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/dataset_rotated_4/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_randpers_resize_factor0.08_color_dist0.5_distortion_scale0.75_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 159, + "id": "982cf5a4", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0137\t0.0152\t0.0140\t0.0126\n", + "weight_shi:\t-0.1440\t0.3135\t0.4775\t0.4211\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.3167783246741409\n", + "CNMC 1.9366 +- 0.1773 q0: 1.4437 q10: 1.7285 q20: 1.7909 q30: 1.8431 q40: 1.8862 q50: 1.9192 q60: 1.9569 q70: 2.0073 q80: 2.0694 q90: 2.1765 q100: 2.5655\n", + "one_class_1 2.0674 +- 0.2071 q0: 1.6099 q10: 1.8331 q20: 1.8954 q30: 1.9420 q40: 1.9859 q50: 2.0320 q60: 2.0882 q70: 2.1432 q80: 2.2308 q90: 2.3727 q100: 2.8015\n", + "[one_class_1 CSI 0.3168] [one_class_1 best 0.3168] \n", + "[one_class_mean CSI 0.3168] [one_class_mean best 0.3168] \n", + "0.3168\t0.3168\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC_ROT4\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 2\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 2 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/dataset_rotated_4/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.5_blur_sigma2.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "markdown", + "id": "c9c8f555", + "metadata": {}, + "source": [ + "# Sharpness Factor" + ] + }, + { + "cell_type": "code", + "execution_count": 72, + "id": "ac35a164", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0050\t0.0011\t0.0010\t0.0009\n", + "weight_shi:\t-0.0433\t0.0726\t0.2303\t0.0769\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5375965930382118\n", + "CNMC 1.9981 +- 0.1008 q0: 1.5969 q10: 1.8641 q20: 1.9163 q30: 1.9503 q40: 1.9776 q50: 2.0056 q60: 2.0329 q70: 2.0563 q80: 2.0814 q90: 2.1226 q100: 2.2687\n", + "one_class_1 1.9867 +- 0.1056 q0: 1.6492 q10: 1.8484 q20: 1.8943 q30: 1.9323 q40: 1.9603 q50: 1.9909 q60: 2.0131 q70: 2.0457 q80: 2.0783 q90: 2.1193 q100: 2.2764\n", + "[one_class_1 CSI 0.5376] [one_class_1 best 0.5376] \n", + "[one_class_mean CSI 0.5376] [one_class_mean best 0.5376] \n", + "0.5376\t0.5376\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharp : 4096\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --sharpness_factor 4096 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor4096.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 73, + "id": "49250ae3", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0099\t0.0038\t0.0037\t0.0035\n", + "weight_shi:\t-0.0601\t0.0628\t0.0572\t0.0620\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5004063743809436\n", + "CNMC 2.1367 +- 0.2510 q0: 1.4456 q10: 1.8346 q20: 1.9233 q30: 1.9922 q40: 2.0485 q50: 2.1189 q60: 2.1860 q70: 2.2597 q80: 2.3425 q90: 2.4668 q100: 3.2275\n", + "one_class_1 2.1346 +- 0.3755 q0: 1.1971 q10: 1.6290 q20: 1.8287 q30: 1.9361 q40: 2.0319 q50: 2.1191 q60: 2.2264 q70: 2.3246 q80: 2.4509 q90: 2.6481 q100: 3.1412\n", + "[one_class_1 CSI 0.5004] [one_class_1 best 0.5004] \n", + "[one_class_mean CSI 0.5004] [one_class_mean best 0.5004] \n", + "0.5004\t0.5004\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharp : 2048\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --sharpness_factor 2048 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor2048.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 74, + "id": "0bd84a7e", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0242\t0.0050\t0.0046\t0.0044\n", + "weight_shi:\t-0.0828\t0.0645\t0.0669\t0.0596\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.521132733772876\n", + "CNMC 2.1385 +- 0.2030 q0: 1.5179 q10: 1.8773 q20: 1.9648 q30: 2.0325 q40: 2.0825 q50: 2.1336 q60: 2.1808 q70: 2.2389 q80: 2.3102 q90: 2.3997 q100: 2.7902\n", + "one_class_1 2.1145 +- 0.2767 q0: 1.3283 q10: 1.7725 q20: 1.8865 q30: 1.9760 q40: 2.0428 q50: 2.1166 q60: 2.1976 q70: 2.2709 q80: 2.3529 q90: 2.4697 q100: 2.8054\n", + "[one_class_1 CSI 0.5211] [one_class_1 best 0.5211] \n", + "[one_class_mean CSI 0.5211] [one_class_mean best 0.5211] \n", + "0.5211\t0.5211\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharp : 1024\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --sharpness_factor 1024 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor1024.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 75, + "id": "7084a03f", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0059\t0.0055\t0.0051\t0.0051\n", + "weight_shi:\t-0.0132\t0.0371\t0.0377\t0.0376\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5495593179999798\n", + "CNMC 2.0729 +- 0.0917 q0: 1.7983 q10: 1.9554 q20: 1.9947 q30: 2.0222 q40: 2.0461 q50: 2.0659 q60: 2.0930 q70: 2.1220 q80: 2.1540 q90: 2.1973 q100: 2.3686\n", + "one_class_1 2.0536 +- 0.1203 q0: 1.7288 q10: 1.9078 q20: 1.9506 q30: 1.9885 q40: 2.0186 q50: 2.0481 q60: 2.0860 q70: 2.1206 q80: 2.1548 q90: 2.2130 q100: 2.3754\n", + "[one_class_1 CSI 0.5496] [one_class_1 best 0.5496] \n", + "[one_class_mean CSI 0.5496] [one_class_mean best 0.5496] \n", + "0.5496\t0.5496\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharp : 512\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --sharpness_factor 512 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor512.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 76, + "id": "7609406d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0033\t0.0016\t0.0015\t0.0015\n", + "weight_shi:\t-0.0626\t0.0548\t0.0482\t0.0476\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5079350611207324\n", + "CNMC 2.1463 +- 0.2056 q0: 1.5154 q10: 1.8902 q20: 1.9661 q30: 2.0278 q40: 2.0864 q50: 2.1497 q60: 2.1995 q70: 2.2608 q80: 2.3272 q90: 2.4144 q100: 2.8480\n", + "one_class_1 2.1363 +- 0.2821 q0: 1.3866 q10: 1.7738 q20: 1.8962 q30: 1.9787 q40: 2.0632 q50: 2.1372 q60: 2.2119 q70: 2.2966 q80: 2.3999 q90: 2.5028 q100: 2.7673\n", + "[one_class_1 CSI 0.5079] [one_class_1 best 0.5079] \n", + "[one_class_mean CSI 0.5079] [one_class_mean best 0.5079] \n", + "0.5079\t0.5079\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharp : 256\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --sharpness_factor 256 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor256.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 77, + "id": "aad2a734", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0048\t0.0080\t0.0087\t0.0057\n", + "weight_shi:\t-0.0840\t0.0954\t0.0919\t0.0779\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5942041645145282\n", + "CNMC 2.0494 +- 0.1428 q0: 1.7036 q10: 1.8748 q20: 1.9344 q30: 1.9773 q40: 2.0081 q50: 2.0436 q60: 2.0718 q70: 2.1122 q80: 2.1561 q90: 2.2278 q100: 2.7563\n", + "one_class_1 2.0001 +- 0.1729 q0: 1.5526 q10: 1.7899 q20: 1.8590 q30: 1.9109 q40: 1.9589 q50: 1.9977 q60: 2.0327 q70: 2.0653 q80: 2.1332 q90: 2.2229 q100: 2.6275\n", + "[one_class_1 CSI 0.5942] [one_class_1 best 0.5942] \n", + "[one_class_mean CSI 0.5942] [one_class_mean best 0.5942] \n", + "0.5942\t0.5942\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharp : 128\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --sharpness_factor 128 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor128.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 78, + "id": "eceb0082", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0055\t0.0037\t0.0058\t0.0037\n", + "weight_shi:\t-0.1448\t0.1735\t0.1588\t0.1423\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.7104164767720962\n", + "CNMC 2.0883 +- 0.1055 q0: 1.7978 q10: 1.9497 q20: 1.9997 q30: 2.0352 q40: 2.0642 q50: 2.0925 q60: 2.1162 q70: 2.1440 q80: 2.1764 q90: 2.2198 q100: 2.4996\n", + "one_class_1 1.9981 +- 0.1273 q0: 1.6099 q10: 1.8403 q20: 1.8878 q30: 1.9263 q40: 1.9618 q50: 1.9937 q60: 2.0157 q70: 2.0606 q80: 2.1110 q90: 2.1738 q100: 2.4795\n", + "[one_class_1 CSI 0.7104] [one_class_1 best 0.7104] \n", + "[one_class_mean CSI 0.7104] [one_class_mean best 0.7104] \n", + "0.7104\t0.7104\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharp : 64\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --sharpness_factor 64 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor64.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 79, + "id": "7c881700", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0028\t0.0085\t0.0044\t0.0097\n", + "weight_shi:\t-0.0235\t0.0638\t0.0549\t0.0541\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5937648750746918\n", + "CNMC 2.0002 +- 0.1702 q0: 1.6494 q10: 1.8157 q20: 1.8583 q30: 1.8929 q40: 1.9291 q50: 1.9695 q60: 2.0094 q70: 2.0576 q80: 2.1345 q90: 2.2419 q100: 2.8225\n", + "one_class_1 1.9446 +- 0.1597 q0: 1.5613 q10: 1.7697 q20: 1.8122 q30: 1.8531 q40: 1.8816 q50: 1.9188 q60: 1.9583 q70: 2.0091 q80: 2.0737 q90: 2.1568 q100: 2.5480\n", + "[one_class_1 CSI 0.5938] [one_class_1 best 0.5938] \n", + "[one_class_mean CSI 0.5938] [one_class_mean best 0.5938] \n", + "0.5938\t0.5938\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharp : 32\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --sharpness_factor 32 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor32.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 80, + "id": "afaa2706", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0030\t0.0048\t0.0042\t0.0054\n", + "weight_shi:\t-0.0352\t0.0883\t0.0761\t0.0693\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5747906095868907\n", + "CNMC 2.0814 +- 0.1200 q0: 1.7198 q10: 1.9284 q20: 1.9790 q30: 2.0137 q40: 2.0462 q50: 2.0794 q60: 2.1084 q70: 2.1462 q80: 2.1929 q90: 2.2480 q100: 2.3614\n", + "one_class_1 2.0319 +- 0.1736 q0: 1.4826 q10: 1.8007 q20: 1.8786 q30: 1.9545 q40: 1.9968 q50: 2.0343 q60: 2.0956 q70: 2.1409 q80: 2.1917 q90: 2.2512 q100: 2.3875\n", + "[one_class_1 CSI 0.5748] [one_class_1 best 0.5748] \n", + "[one_class_mean CSI 0.5748] [one_class_mean best 0.5748] \n", + "0.5748\t0.5748\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharp : 16\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --sharpness_factor 16 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor16.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 81, + "id": "374eec9c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0040\t0.0039\t0.0044\t0.0039\n", + "weight_shi:\t-0.0360\t0.1191\t0.0847\t0.0773\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.47723417292052783\n", + "CNMC 2.1169 +- 0.2902 q0: 1.3060 q10: 1.7390 q20: 1.8614 q30: 1.9501 q40: 2.0312 q50: 2.1085 q60: 2.1828 q70: 2.2782 q80: 2.3917 q90: 2.5189 q100: 3.0332\n", + "one_class_1 2.1411 +- 0.3676 q0: 1.2368 q10: 1.6509 q20: 1.8257 q30: 1.9349 q40: 2.0555 q50: 2.1498 q60: 2.2467 q70: 2.3477 q80: 2.4742 q90: 2.6155 q100: 3.2105\n", + "[one_class_1 CSI 0.4772] [one_class_1 best 0.4772] \n", + "[one_class_mean CSI 0.4772] [one_class_mean best 0.4772] \n", + "0.4772\t0.4772\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharp : 8\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --sharpness_factor 8 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor8.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 82, + "id": "2b907319", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0120\t0.0137\t0.0113\t0.0151\n", + "weight_shi:\t-0.0230\t0.0744\t0.0702\t0.0797\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.7440512360870578\n", + "CNMC 2.0897 +- 0.1280 q0: 1.6279 q10: 1.9253 q20: 1.9937 q30: 2.0344 q40: 2.0646 q50: 2.0929 q60: 2.1235 q70: 2.1514 q80: 2.1925 q90: 2.2452 q100: 2.5109\n", + "one_class_1 1.9564 +- 0.1648 q0: 1.2763 q10: 1.7402 q20: 1.8197 q30: 1.8821 q40: 1.9240 q50: 1.9604 q60: 2.0013 q70: 2.0434 q80: 2.0908 q90: 2.1604 q100: 2.4535\n", + "[one_class_1 CSI 0.7441] [one_class_1 best 0.7441] \n", + "[one_class_mean CSI 0.7441] [one_class_mean best 0.7441] \n", + "0.7441\t0.7441\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharp : 5\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --sharpness_factor 5 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor5.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 83, + "id": "eadc9f63", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0044\t0.0050\t0.0036\t0.0045\n", + "weight_shi:\t-0.0174\t0.0520\t0.0435\t0.0457\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.6856960015799229\n", + "CNMC 2.0337 +- 0.0637 q0: 1.7790 q10: 1.9526 q20: 1.9802 q30: 2.0010 q40: 2.0182 q50: 2.0348 q60: 2.0490 q70: 2.0674 q80: 2.0882 q90: 2.1155 q100: 2.2475\n", + "one_class_1 1.9842 +- 0.0803 q0: 1.6728 q10: 1.8819 q20: 1.9195 q30: 1.9483 q40: 1.9679 q50: 1.9875 q60: 2.0072 q70: 2.0236 q80: 2.0479 q90: 2.0840 q100: 2.2335\n", + "[one_class_1 CSI 0.6857] [one_class_1 best 0.6857] \n", + "[one_class_mean CSI 0.6857] [one_class_mean best 0.6857] \n", + "0.6857\t0.6857\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharp : 4\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --sharpness_factor 4 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor4.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 84, + "id": "66a30bac", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0096\t0.0088\t0.0090\t0.0096\n", + "weight_shi:\t-0.0320\t0.1007\t0.1076\t0.0998\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.49014067389785193\n", + "CNMC 2.0877 +- 0.2810 q0: 1.3173 q10: 1.7265 q20: 1.8460 q30: 1.9306 q40: 2.0048 q50: 2.0768 q60: 2.1398 q70: 2.2252 q80: 2.3252 q90: 2.4627 q100: 3.0835\n", + "one_class_1 2.0957 +- 0.3295 q0: 1.1248 q10: 1.6700 q20: 1.8185 q30: 1.9197 q40: 2.0144 q50: 2.0849 q60: 2.1813 q70: 2.2611 q80: 2.3718 q90: 2.5219 q100: 3.0920\n", + "[one_class_1 CSI 0.4901] [one_class_1 best 0.4901] \n", + "[one_class_mean CSI 0.4901] [one_class_mean best 0.4901] \n", + "0.4901\t0.4901\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharp : 3\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --sharpness_factor 3 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor3.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 85, + "id": "e8fde266", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0051\t0.0046\t0.0048\t0.0045\n", + "weight_shi:\t-0.0137\t0.0407\t0.0450\t0.0411\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.4467181154356435\n", + "CNMC 2.0176 +- 0.0689 q0: 1.7903 q10: 1.9308 q20: 1.9620 q30: 1.9833 q40: 1.9998 q50: 2.0144 q60: 2.0329 q70: 2.0534 q80: 2.0752 q90: 2.1054 q100: 2.2461\n", + "one_class_1 2.0300 +- 0.0917 q0: 1.7417 q10: 1.9114 q20: 1.9580 q30: 1.9866 q40: 2.0089 q50: 2.0337 q60: 2.0591 q70: 2.0798 q80: 2.1052 q90: 2.1409 q100: 2.2672\n", + "[one_class_1 CSI 0.4467] [one_class_1 best 0.4467] \n", + "[one_class_mean CSI 0.4467] [one_class_mean best 0.4467] \n", + "0.4467\t0.4467\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharp : 2\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --sharpness_factor 2 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor2.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "markdown", + "id": "bac55a6b", + "metadata": {}, + "source": [ + "# Random Perspective" + ] + }, + { + "cell_type": "code", + "execution_count": 86, + "id": "acb8e0cf", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0046\t0.0037\t0.0045\t0.0046\n", + "weight_shi:\t0.1028\t-0.1896\t-0.2910\t-0.3483\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.6587695844600411\n", + "CNMC 2.0386 +- 0.2243 q0: 1.1465 q10: 1.7463 q20: 1.8820 q30: 1.9641 q40: 2.0161 q50: 2.0690 q60: 2.1137 q70: 2.1678 q80: 2.2188 q90: 2.2888 q100: 2.5369\n", + "one_class_1 1.8805 +- 0.3066 q0: 0.7440 q10: 1.4384 q20: 1.6744 q30: 1.7753 q40: 1.8640 q50: 1.9389 q60: 1.9992 q70: 2.0643 q80: 2.1327 q90: 2.2159 q100: 2.4966\n", + "[one_class_1 CSI 0.6588] [one_class_1 best 0.6588] \n", + "[one_class_mean CSI 0.6588] [one_class_mean best 0.6588] \n", + "0.6588\t0.6588\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : randpers\n", + "# crop : 0.08\n", + "# randper_dist: 0.95\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --distortion_scale 0.95 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type randpers --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/randpers/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_randpers_resize_factor0.08_color_dist0.5_distortion_scale0.95_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 87, + "id": "38406c45", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0036\t0.0043\t0.0044\t0.0045\n", + "weight_shi:\t0.0940\t-0.3354\t-0.3010\t-0.4613\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.6866549691611218\n", + "CNMC 2.0701 +- 0.1458 q0: 1.3353 q10: 1.9029 q20: 1.9897 q30: 2.0319 q40: 2.0558 q50: 2.0795 q60: 2.1028 q70: 2.1319 q80: 2.1715 q90: 2.2439 q100: 2.4895\n", + "one_class_1 1.9579 +- 0.2070 q0: 0.9880 q10: 1.6843 q20: 1.8208 q30: 1.8972 q40: 1.9508 q50: 1.9923 q60: 2.0252 q70: 2.0622 q80: 2.0986 q90: 2.1886 q100: 2.4493\n", + "[one_class_1 CSI 0.6867] [one_class_1 best 0.6867] \n", + "[one_class_mean CSI 0.6867] [one_class_mean best 0.6867] \n", + "0.6867\t0.6867\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : randpers\n", + "# crop : 0.08\n", + "# randper_dist: 0.9\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --distortion_scale 0.9 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type randpers --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/randpers/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_randpers_resize_factor0.08_color_dist0.5_distortion_scale0.9_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 88, + "id": "79e43776", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0058\t0.0098\t0.0051\t0.0075\n", + "weight_shi:\t0.7573\t0.7158\t-0.4403\t3.1769\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.6343618023273478\n", + "CNMC 2.2964 +- 0.4564 q0: 0.6867 q10: 1.7046 q20: 1.9085 q30: 2.0619 q40: 2.1922 q50: 2.3256 q60: 2.4368 q70: 2.5468 q80: 2.6813 q90: 2.8610 q100: 3.7183\n", + "one_class_1 1.9670 +- 0.7117 q0: -1.6022 q10: 1.0639 q20: 1.4591 q30: 1.7160 q40: 1.8807 q50: 2.0547 q60: 2.2023 q70: 2.3776 q80: 2.5617 q90: 2.7902 q100: 3.2754\n", + "[one_class_1 CSI 0.6344] [one_class_1 best 0.6344] \n", + "[one_class_mean CSI 0.6344] [one_class_mean best 0.6344] \n", + "0.6344\t0.6344\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : randpers\n", + "# crop : 0.08\n", + "# randper_dist: 0.85\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --distortion_scale 0.85 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type randpers --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/randpers/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_randpers_resize_factor0.08_color_dist0.5_distortion_scale0.85_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 89, + "id": "b5045a90", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0044\t0.0059\t0.0035\t0.0046\n", + "weight_shi:\t0.1149\t-0.5921\t-0.2913\t-0.4212\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.6980790265244736\n", + "CNMC 2.0586 +- 0.0926 q0: 1.6907 q10: 1.9416 q20: 1.9891 q30: 2.0146 q40: 2.0367 q50: 2.0567 q60: 2.0821 q70: 2.1041 q80: 2.1311 q90: 2.1727 q100: 2.4236\n", + "one_class_1 1.9890 +- 0.1129 q0: 1.6747 q10: 1.8586 q20: 1.8947 q30: 1.9326 q40: 1.9579 q50: 1.9848 q60: 2.0103 q70: 2.0342 q80: 2.0767 q90: 2.1413 q100: 2.4328\n", + "[one_class_1 CSI 0.6981] [one_class_1 best 0.6981] \n", + "[one_class_mean CSI 0.6981] [one_class_mean best 0.6981] \n", + "0.6981\t0.6981\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : randpers\n", + "# crop : 0.08\n", + "# randper_dist: 0.8\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --distortion_scale 0.8 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type randpers --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/randpers/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_randpers_resize_factor0.08_color_dist0.5_distortion_scale0.8_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 90, + "id": "5d4659ac", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0020\t0.0024\t0.0019\t0.0028\n", + "weight_shi:\t0.0839\t-0.1992\t-0.1714\t-0.2720\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.7161709152411915\n", + "CNMC 2.0766 +- 0.1562 q0: 1.4380 q10: 1.8760 q20: 1.9670 q30: 2.0275 q40: 2.0724 q50: 2.1054 q60: 2.1342 q70: 2.1645 q80: 2.2012 q90: 2.2452 q100: 2.4108\n", + "one_class_1 1.9367 +- 0.2053 q0: 1.1357 q10: 1.6437 q20: 1.7718 q30: 1.8741 q40: 1.9299 q50: 1.9756 q60: 2.0202 q70: 2.0576 q80: 2.1027 q90: 2.1710 q100: 2.3247\n", + "[one_class_1 CSI 0.7162] [one_class_1 best 0.7162] \n", + "[one_class_mean CSI 0.7162] [one_class_mean best 0.7162] \n", + "0.7162\t0.7162\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : randpers\n", + "# crop : 0.08\n", + "# randper_dist: 0.75\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --distortion_scale 0.75 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type randpers --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/randpers/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_randpers_resize_factor0.08_color_dist0.5_distortion_scale0.75_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 91, + "id": "43c01d76", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0049\t0.0060\t0.0045\t0.0072\n", + "weight_shi:\t-1.4937\t0.4193\t-0.5923\t0.9519\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.272612645459241\n", + "CNMC 1.3428 +- 1.3765 q0: -2.6822 q10: -0.1317 q20: 0.2430 q30: 0.5766 q40: 0.8777 q50: 1.1651 q60: 1.4791 q70: 1.7858 q80: 2.3163 q90: 3.1566 q100: 7.5281\n", + "one_class_1 2.6219 +- 1.7311 q0: -1.5026 q10: 0.6189 q20: 1.1608 q30: 1.6420 q40: 2.0065 q50: 2.4546 q60: 2.8590 q70: 3.3026 q80: 3.9411 q90: 5.0125 q100: 8.8420\n", + "[one_class_1 CSI 0.2726] [one_class_1 best 0.2726] \n", + "[one_class_mean CSI 0.2726] [one_class_mean best 0.2726] \n", + "0.2726\t0.2726\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : randpers\n", + "# crop : 0.08\n", + "# randper_dist: 0.6\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --distortion_scale 0.6 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type randpers --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/randpers/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_randpers_resize_factor0.08_color_dist0.5_distortion_scale0.6_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 92, + "id": "b3c2bb68", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0040\t0.0080\t0.0069\t0.0094\n", + "weight_shi:\t0.2243\t2.4831\t-1.1810\t8.3228\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.28934109115952156\n", + "CNMC 1.5171 +- 0.6417 q0: -1.0892 q10: 0.6963 q20: 1.0163 q30: 1.2147 q40: 1.3800 q50: 1.5239 q60: 1.6610 q70: 1.8232 q80: 2.0413 q90: 2.3147 q100: 3.5528\n", + "one_class_1 2.0698 +- 0.8043 q0: -0.3686 q10: 0.9775 q20: 1.4094 q30: 1.6626 q40: 1.9406 q50: 2.1430 q60: 2.3291 q70: 2.4725 q80: 2.7776 q90: 3.0563 q100: 4.2509\n", + "[one_class_1 CSI 0.2893] [one_class_1 best 0.2893] \n", + "[one_class_mean CSI 0.2893] [one_class_mean best 0.2893] \n", + "0.2893\t0.2893\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : randpers\n", + "# crop : 0.08\n", + "# randper_dist: 0.3\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --distortion_scale 0.3 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type randpers --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/randpers/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_randpers_resize_factor0.08_color_dist0.5_distortion_scale0.3_one_class_0/last.model\"" + ] + }, + { + "cell_type": "markdown", + "id": "5cfed222", + "metadata": {}, + "source": [ + "# Color Distortion = 0.8" + ] + }, + { + "cell_type": "markdown", + "id": "009f41d0", + "metadata": {}, + "source": [ + "## Examine crop" + ] + }, + { + "cell_type": "code", + "execution_count": 196, + "id": "0c216c1d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0109\t0.0127\t0.0131\t0.0112\n", + "weight_shi:\t-0.3601\t0.8696\t0.8266\t1.2633\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.3773116499053059\n", + "CNMC 1.9315 +- 0.0895 q0: 1.6857 q10: 1.8069 q20: 1.8435 q30: 1.8794 q40: 1.9068 q50: 1.9359 q60: 1.9582 q70: 1.9849 q80: 2.0138 q90: 2.0465 q100: 2.2963\n", + "one_class_1 1.9880 +- 0.1336 q0: 1.7198 q10: 1.8075 q20: 1.8598 q30: 1.9006 q40: 1.9419 q50: 1.9827 q60: 2.0260 q70: 2.0729 q80: 2.1099 q90: 2.1645 q100: 2.4634\n", + "[one_class_1 CSI 0.3773] [one_class_1 best 0.3773] \n", + "[one_class_mean CSI 0.3773] [one_class_mean best 0.3773] \n", + "0.3773\t0.3773\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.5\n", + "# blur_sigma : 2\n", + "# color_dist : 0.8\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.8 --resize_factor 0.5 --blur_sigma 2 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.8/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.5_color_dist0.8_blur_sigma2.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 197, + "id": "6320eef5", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0076\t0.0076\t0.0074\t0.0074\n", + "weight_shi:\t0.9058\t0.5362\t0.6368\t-14.1887\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5917127477491163\n", + "CNMC 2.8214 +- 1.0315 q0: -1.2486 q10: 1.3987 q20: 1.9532 q30: 2.3459 q40: 2.7058 q50: 2.9513 q60: 3.1875 q70: 3.4447 q80: 3.7111 q90: 4.0288 q100: 5.9040\n", + "one_class_1 2.3812 +- 1.3314 q0: -2.1268 q10: 0.5397 q20: 1.2181 q30: 1.7456 q40: 2.2246 q50: 2.5793 q60: 2.9176 q70: 3.1949 q80: 3.5267 q90: 3.9124 q100: 4.9106\n", + "[one_class_1 CSI 0.5917] [one_class_1 best 0.5917] \n", + "[one_class_mean CSI 0.5917] [one_class_mean best 0.5917] \n", + "0.5917\t0.5917\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.3\n", + "# blur_sigma : 2\n", + "# color_dist : 0.8\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.8 --resize_factor 0.3 --blur_sigma 2 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.8/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.3_color_dist0.8_blur_sigma2.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 198, + "id": "451c90e5", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0110\t0.0071\t0.0102\t0.0101\n", + "weight_shi:\t-0.2335\t0.3455\t0.5920\t0.5756\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.4148462107171432\n", + "CNMC 1.8587 +- 0.2019 q0: 1.3001 q10: 1.5984 q20: 1.6837 q30: 1.7477 q40: 1.8032 q50: 1.8568 q60: 1.9066 q70: 1.9618 q80: 2.0281 q90: 2.1033 q100: 2.4803\n", + "one_class_1 1.9549 +- 0.3011 q0: 1.3374 q10: 1.5829 q20: 1.6833 q30: 1.7648 q40: 1.8483 q50: 1.9432 q60: 2.0105 q70: 2.0860 q80: 2.2017 q90: 2.3676 q100: 3.0008\n", + "[one_class_1 CSI 0.4148] [one_class_1 best 0.4148] \n", + "[one_class_mean CSI 0.4148] [one_class_mean best 0.4148] \n", + "0.4148\t0.4148\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.02\n", + "# blur_sigma : 2\n", + "# color_dist : 0.8\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.8 --resize_factor 0.02 --blur_sigma 2 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.8/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.02_color_dist0.8_blur_sigma2.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 199, + "id": "54fef60e", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0055\t0.0047\t0.0063\t0.0070\n", + "weight_shi:\t-1.5156\t2.2142\t13.3925\t216.9532\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.3877887663435927\n", + "CNMC -9.2248 +- 14.9978 q0: -31.8010 q10: -22.8496 q20: -20.5631 q30: -18.8369 q40: -16.1600 q50: -13.7478 q60: -10.1906 q70: -5.6572 q80: 0.0581 q90: 9.1230 q100: 77.4578\n", + "one_class_1 0.7817 +- 24.0001 q0: -33.6751 q10: -22.8505 q20: -20.2689 q30: -16.3248 q40: -11.6706 q50: -5.3667 q60: 0.3825 q70: 6.7728 q80: 17.5805 q90: 39.7293 q100: 83.7649\n", + "[one_class_1 CSI 0.3878] [one_class_1 best 0.3878] \n", + "[one_class_mean CSI 0.3878] [one_class_mean best 0.3878] \n", + "0.3878\t0.3878\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.008\n", + "# blur_sigma : 2\n", + "# color_dist : 0.8\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.8 --resize_factor 0.008 --blur_sigma 2 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.8/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.008_color_dist0.8_blur_sigma2.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "markdown", + "id": "2dccb685", + "metadata": {}, + "source": [ + "## Examine blur_sigma" + ] + }, + { + "cell_type": "code", + "execution_count": 200, + "id": "0c13892c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0059\t0.0053\t0.0052\t0.0054\n", + "weight_shi:\t-0.0908\t0.2339\t0.2553\t0.2459\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.46215401209248624\n", + "CNMC 1.9646 +- 0.0814 q0: 1.7239 q10: 1.8537 q20: 1.8937 q30: 1.9247 q40: 1.9510 q50: 1.9695 q60: 1.9918 q70: 2.0122 q80: 2.0334 q90: 2.0642 q100: 2.1895\n", + "one_class_1 1.9790 +- 0.1048 q0: 1.6906 q10: 1.8438 q20: 1.8841 q30: 1.9178 q40: 1.9505 q50: 1.9783 q60: 2.0103 q70: 2.0393 q80: 2.0700 q90: 2.1155 q100: 2.2617\n", + "[one_class_1 CSI 0.4622] [one_class_1 best 0.4622] \n", + "[one_class_mean CSI 0.4622] [one_class_mean best 0.4622] \n", + "0.4622\t0.4622\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 40\n", + "# color_dist : 0.8 \n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.8 --resize_factor 0.08 --blur_sigma 40 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.8/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.8_blur_sigma40.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 201, + "id": "7b24db11", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0172\t0.0135\t0.0226\t0.0192\n", + "weight_shi:\t-0.0741\t0.1495\t0.1978\t0.1718\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.4793875773503884\n", + "CNMC 1.9531 +- 0.1138 q0: 1.6474 q10: 1.8092 q20: 1.8542 q30: 1.8878 q40: 1.9235 q50: 1.9532 q60: 1.9802 q70: 2.0102 q80: 2.0447 q90: 2.0976 q100: 2.3859\n", + "one_class_1 1.9692 +- 0.1523 q0: 1.6030 q10: 1.7796 q20: 1.8318 q30: 1.8785 q40: 1.9130 q50: 1.9548 q60: 1.9983 q70: 2.0455 q80: 2.0996 q90: 2.1848 q100: 2.3561\n", + "[one_class_1 CSI 0.4794] [one_class_1 best 0.4794] \n", + "[one_class_mean CSI 0.4794] [one_class_mean best 0.4794] \n", + "0.4794\t0.4794\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 20\n", + "# color_dist : 0.8 \n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.8 --resize_factor 0.08 --blur_sigma 20 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.8/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.8_blur_sigma20.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 202, + "id": "352c0a41", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0025\t0.0040\t0.0022\t0.0025\n", + "weight_shi:\t1.2410\t0.6755\t-1.1582\t-3.5877\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.6594658645520007\n", + "CNMC 2.1628 +- 0.3004 q0: 0.7838 q10: 1.7714 q20: 1.9641 q30: 2.0468 q40: 2.1213 q50: 2.1830 q60: 2.2509 q70: 2.3213 q80: 2.4001 q90: 2.5014 q100: 3.3713\n", + "one_class_1 1.9514 +- 0.4284 q0: -0.1104 q10: 1.4546 q20: 1.6765 q30: 1.7941 q40: 1.8892 q50: 2.0071 q60: 2.0917 q70: 2.1785 q80: 2.2711 q90: 2.4356 q100: 2.9090\n", + "[one_class_1 CSI 0.6595] [one_class_1 best 0.6595] \n", + "[one_class_mean CSI 0.6595] [one_class_mean best 0.6595] \n", + "0.6595\t0.6595\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 6\n", + "# color_dist : 0.8 \n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.8 --resize_factor 0.08 --blur_sigma 6 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.8/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.8_blur_sigma6.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 203, + "id": "d22c485a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0050\t0.0078\t0.0052\t0.0062\n", + "weight_shi:\t0.4106\t0.4163\t-2.7425\t-3.5688\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.6272255643666637\n", + "CNMC 2.1200 +- 0.6206 q0: -0.0284 q10: 1.2949 q20: 1.5957 q30: 1.8155 q40: 2.0312 q50: 2.1755 q60: 2.3270 q70: 2.4960 q80: 2.6611 q90: 2.8534 q100: 4.1276\n", + "one_class_1 1.8269 +- 0.6667 q0: -0.9521 q10: 0.9740 q20: 1.3397 q30: 1.5917 q40: 1.7532 q50: 1.8870 q60: 2.0460 q70: 2.2002 q80: 2.3477 q90: 2.5711 q100: 3.5803\n", + "[one_class_1 CSI 0.6272] [one_class_1 best 0.6272] \n", + "[one_class_mean CSI 0.6272] [one_class_mean best 0.6272] \n", + "0.6272\t0.6272\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 4\n", + "# color_dist : 0.8 \n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.8 --resize_factor 0.08 --blur_sigma 4 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.8/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.8_blur_sigma4.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 204, + "id": "00a8d2ac", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0047\t0.0072\t0.0056\t0.0047\n", + "weight_shi:\t0.3757\t1.6655\t5.1831\t-1.0361\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.49718323053707253\n", + "CNMC 1.9914 +- 0.1852 q0: 1.3248 q10: 1.7612 q20: 1.8463 q30: 1.9021 q40: 1.9490 q50: 1.9936 q60: 2.0285 q70: 2.0851 q80: 2.1355 q90: 2.2233 q100: 2.6945\n", + "one_class_1 1.9965 +- 0.2132 q0: 1.1855 q10: 1.7487 q20: 1.8267 q30: 1.8843 q40: 1.9394 q50: 1.9881 q60: 2.0429 q70: 2.1000 q80: 2.1632 q90: 2.2481 q100: 2.8524\n", + "[one_class_1 CSI 0.4972] [one_class_1 best 0.4972] \n", + "[one_class_mean CSI 0.4972] [one_class_mean best 0.4972] \n", + "0.4972\t0.4972\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 3\n", + "# color_dist : 0.8 \n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.8 --resize_factor 0.08 --blur_sigma 3 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.8/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.8_blur_sigma3.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 205, + "id": "cdab5a91", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0019\t0.0026\t0.0019\t0.0022\n", + "weight_shi:\t0.2520\t-1.0379\t-0.8245\t-0.8299\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.7393317230273752\n", + "CNMC 2.1093 +- 0.1696 q0: 1.2392 q10: 1.9016 q20: 2.0068 q30: 2.0585 q40: 2.1066 q50: 2.1333 q60: 2.1667 q70: 2.1958 q80: 2.2352 q90: 2.2885 q100: 2.5315\n", + "one_class_1 1.9282 +- 0.2660 q0: 0.4865 q10: 1.6153 q20: 1.7843 q30: 1.8731 q40: 1.9295 q50: 1.9714 q60: 2.0108 q70: 2.0668 q80: 2.1224 q90: 2.2106 q100: 2.5011\n", + "[one_class_1 CSI 0.7393] [one_class_1 best 0.7393] \n", + "[one_class_mean CSI 0.7393] [one_class_mean best 0.7393] \n", + "0.7393\t0.7393\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 2\n", + "# color_dist : 0.8 \n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.8 --resize_factor 0.08 --blur_sigma 2 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.8/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.8_blur_sigma2.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 206, + "id": "76bdab2e", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0044\t0.0059\t0.0046\t0.0046\n", + "weight_shi:\t0.2676\t-0.5492\t-0.7697\t-0.6319\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.6709070124267007\n", + "CNMC 2.0490 +- 0.0659 q0: 1.6783 q10: 1.9728 q20: 1.9995 q30: 2.0151 q40: 2.0347 q50: 2.0501 q60: 2.0654 q70: 2.0818 q80: 2.1012 q90: 2.1296 q100: 2.2543\n", + "one_class_1 1.9948 +- 0.1054 q0: 1.5066 q10: 1.8893 q20: 1.9323 q30: 1.9563 q40: 1.9732 q50: 1.9993 q60: 2.0219 q70: 2.0484 q80: 2.0819 q90: 2.1226 q100: 2.2211\n", + "[one_class_1 CSI 0.6709] [one_class_1 best 0.6709] \n", + "[one_class_mean CSI 0.6709] [one_class_mean best 0.6709] \n", + "0.6709\t0.6709\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 1.5\n", + "# color_dist : 0.8 \n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.8 --resize_factor 0.08 --blur_sigma 1.5 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.8/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.8_blur_sigma1.5_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 207, + "id": "0c1efb9f", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0058\t0.0159\t0.0080\t0.0086\n", + "weight_shi:\t0.5438\t-2.8363\t-21.1928\t-1.9421\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.6621902186572681\n", + "CNMC 2.2408 +- 0.8449 q0: -0.8843 q10: 1.1519 q20: 1.6038 q30: 1.9076 q40: 2.1231 q50: 2.3416 q60: 2.5656 q70: 2.7323 q80: 2.9607 q90: 3.2092 q100: 3.9264\n", + "one_class_1 1.6402 +- 1.1251 q0: -2.9414 q10: 0.0207 q20: 0.8939 q30: 1.3058 q40: 1.6627 q50: 1.9102 q60: 2.1044 q70: 2.2975 q80: 2.5539 q90: 2.8038 q100: 3.7386\n", + "[one_class_1 CSI 0.6622] [one_class_1 best 0.6622] \n", + "[one_class_mean CSI 0.6622] [one_class_mean best 0.6622] \n", + "0.6622\t0.6622\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 1\n", + "# color_dist : 0.8 \n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.8 --resize_factor 0.08 --blur_sigma 1 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.8/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.8_blur_sigma1.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "markdown", + "id": "f676267b", + "metadata": {}, + "source": [ + "# Color Distortion = 1" + ] + }, + { + "cell_type": "markdown", + "id": "744297b9", + "metadata": {}, + "source": [ + "## Examine crop" + ] + }, + { + "cell_type": "code", + "execution_count": 208, + "id": "21a87be2", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0061\t0.0065\t0.0065\t0.0056\n", + "weight_shi:\t1.6932\t-31.1268\t15.0080\t-10.2414\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.7102132895816242\n", + "CNMC 2.2522 +- 0.5226 q0: -0.3755 q10: 1.6169 q20: 1.9232 q30: 2.1160 q40: 2.2220 q50: 2.3245 q60: 2.4225 q70: 2.5144 q80: 2.6312 q90: 2.8060 q100: 3.9139\n", + "one_class_1 1.8127 +- 0.7110 q0: -1.7832 q10: 0.9329 q20: 1.3309 q30: 1.6150 q40: 1.7793 q50: 1.9225 q60: 2.0429 q70: 2.1887 q80: 2.3378 q90: 2.5668 q100: 3.4155\n", + "[one_class_1 CSI 0.7102] [one_class_1 best 0.7102] \n", + "[one_class_mean CSI 0.7102] [one_class_mean best 0.7102] \n", + "0.7102\t0.7102\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.5\n", + "# blur_sigma : 2\n", + "# color_dist : 1\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 1 --resize_factor 0.5 --blur_sigma 2 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist1.0/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_2.0_resize_factor_0.5_color_dist1.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 209, + "id": "8dd1d6d5", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0092\t0.0099\t0.0099\t0.0096\n", + "weight_shi:\t0.5734\t-1.4904\t-1.4266\t-2.6760\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5938636202513699\n", + "CNMC 2.0102 +- 0.1072 q0: 1.4470 q10: 1.8844 q20: 1.9436 q30: 1.9761 q40: 2.0084 q50: 2.0343 q60: 2.0578 q70: 2.0757 q80: 2.0944 q90: 2.1139 q100: 2.2014\n", + "one_class_1 1.9687 +- 0.1365 q0: 1.2909 q10: 1.8035 q20: 1.8848 q30: 1.9370 q40: 1.9730 q50: 2.0035 q60: 2.0287 q70: 2.0532 q80: 2.0725 q90: 2.0980 q100: 2.1942\n", + "[one_class_1 CSI 0.5939] [one_class_1 best 0.5939] \n", + "[one_class_mean CSI 0.5939] [one_class_mean best 0.5939] \n", + "0.5939\t0.5939\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.3\n", + "# blur_sigma : 2\n", + "# color_dist : 1\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 1 --resize_factor 0.3 --blur_sigma 2 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist1.0/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_2.0_resize_factor_0.3_color_dist1.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 210, + "id": "80437a6c", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0086\t0.0090\t0.0096\t0.0084\n", + "weight_shi:\t-0.6178\t0.6564\t1.4537\t1.9758\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.40624398667193307\n", + "CNMC 1.9234 +- 0.1679 q0: 1.2990 q10: 1.6940 q20: 1.7902 q30: 1.8538 q40: 1.9027 q50: 1.9438 q60: 1.9742 q70: 2.0088 q80: 2.0543 q90: 2.1136 q100: 2.6046\n", + "one_class_1 1.9913 +- 0.2119 q0: 1.3411 q10: 1.7247 q20: 1.8161 q30: 1.8739 q40: 1.9369 q50: 1.9987 q60: 2.0415 q70: 2.0979 q80: 2.1553 q90: 2.2420 q100: 2.6629\n", + "[one_class_1 CSI 0.4062] [one_class_1 best 0.4062] \n", + "[one_class_mean CSI 0.4062] [one_class_mean best 0.4062] \n", + "0.4062\t0.4062\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.02\n", + "# blur_sigma : 2\n", + "# color_dist : 1\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 1 --resize_factor 0.02 --blur_sigma 2 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist1.0/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_2.0_resize_factor_0.02_color_dist1.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 211, + "id": "5ee4b03d", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0077\t0.0063\t0.0079\t0.0085\n", + "weight_shi:\t-0.5622\t1.4395\t2.1736\t5.1802\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.40242330791278014\n", + "CNMC 1.7715 +- 0.3123 q0: 1.0132 q10: 1.4352 q20: 1.5132 q30: 1.5847 q40: 1.6416 q50: 1.7219 q60: 1.7995 q70: 1.8923 q80: 2.0104 q90: 2.1944 q100: 3.1272\n", + "one_class_1 1.9377 +- 0.4535 q0: 1.0669 q10: 1.4215 q20: 1.5153 q30: 1.6260 q40: 1.7391 q50: 1.8745 q60: 1.9976 q70: 2.1337 q80: 2.2999 q90: 2.5968 q100: 3.2364\n", + "[one_class_1 CSI 0.4024] [one_class_1 best 0.4024] \n", + "[one_class_mean CSI 0.4024] [one_class_mean best 0.4024] \n", + "0.4024\t0.4024\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.008\n", + "# blur_sigma : 2\n", + "# color_dist : 1\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 1 --resize_factor 0.008 --blur_sigma 2 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist1.0/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_2.0_resize_factor_0.008_color_dist1.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "markdown", + "id": "3993fc92", + "metadata": {}, + "source": [ + "## Examine blur_sigma" + ] + }, + { + "cell_type": "code", + "execution_count": 212, + "id": "d11c9dcd", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0118\t0.0082\t0.0118\t0.0109\n", + "weight_shi:\t-0.5332\t0.3382\t1.2635\t1.1178\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.513900282563121\n", + "CNMC 1.8224 +- 0.5573 q0: 0.4695 q10: 1.0940 q20: 1.3195 q30: 1.5008 q40: 1.6572 q50: 1.8032 q60: 1.9693 q70: 2.1209 q80: 2.3099 q90: 2.5687 q100: 3.4860\n", + "one_class_1 1.8135 +- 0.7140 q0: 0.2666 q10: 0.8849 q20: 1.1651 q30: 1.3949 q40: 1.5485 q50: 1.7703 q60: 1.9382 q70: 2.1803 q80: 2.4559 q90: 2.7728 q100: 4.0059\n", + "[one_class_1 CSI 0.5139] [one_class_1 best 0.5139] \n", + "[one_class_mean CSI 0.5139] [one_class_mean best 0.5139] \n", + "0.5139\t0.5139\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 40\n", + "# color_dist : 1\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 1 --resize_factor 0.08 --blur_sigma 40 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist1.0/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_40.0_resize_factor_0.08_color_dist1.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 213, + "id": "b5ffde5e", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0083\t0.0108\t0.0081\t0.0091\n", + "weight_shi:\t-0.0827\t0.1462\t0.2242\t0.2133\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5305926482950001\n", + "CNMC 1.9788 +- 0.0509 q0: 1.8514 q10: 1.9121 q20: 1.9387 q30: 1.9537 q40: 1.9657 q50: 1.9801 q60: 1.9904 q70: 2.0027 q80: 2.0193 q90: 2.0412 q100: 2.1780\n", + "one_class_1 1.9783 +- 0.0729 q0: 1.8180 q10: 1.8943 q20: 1.9205 q30: 1.9377 q40: 1.9541 q50: 1.9673 q60: 1.9856 q70: 2.0070 q80: 2.0322 q90: 2.0712 q100: 2.2575\n", + "[one_class_1 CSI 0.5306] [one_class_1 best 0.5306] \n", + "[one_class_mean CSI 0.5306] [one_class_mean best 0.5306] \n", + "0.5306\t0.5306\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 20\n", + "# color_dist : 1\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 1 --resize_factor 0.08 --blur_sigma 20 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist1.0/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_20.0_resize_factor_0.08_color_dist1.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 214, + "id": "46c0a5be", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0121\t0.0090\t0.0108\t0.0115\n", + "weight_shi:\t-0.1191\t0.1866\t0.3505\t0.3025\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5126887552031113\n", + "CNMC 1.9350 +- 0.1456 q0: 1.5801 q10: 1.7447 q20: 1.8072 q30: 1.8557 q40: 1.8955 q50: 1.9330 q60: 1.9678 q70: 2.0099 q80: 2.0570 q90: 2.1285 q100: 2.4351\n", + "one_class_1 1.9410 +- 0.1925 q0: 1.5534 q10: 1.7034 q20: 1.7718 q30: 1.8241 q40: 1.8672 q50: 1.9158 q60: 1.9602 q70: 2.0200 q80: 2.1069 q90: 2.2114 q100: 2.5473\n", + "[one_class_1 CSI 0.5127] [one_class_1 best 0.5127] \n", + "[one_class_mean CSI 0.5127] [one_class_mean best 0.5127] \n", + "0.5127\t0.5127\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 6\n", + "# color_dist : 1\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 1 --resize_factor 0.08 --blur_sigma 6 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist1.0/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_6.0_resize_factor_0.08_color_dist1.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 215, + "id": "9c074889", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0091\t0.0056\t0.0092\t0.0077\n", + "weight_shi:\t1.7473\t1.4099\t-3.2623\t7.2654\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.3547040683012791\n", + "CNMC 1.6471 +- 0.5245 q0: 0.2048 q10: 0.9772 q20: 1.1761 q30: 1.3840 q40: 1.5410 q50: 1.6358 q60: 1.7534 q70: 1.8764 q80: 2.0524 q90: 2.3159 q100: 3.4153\n", + "one_class_1 2.0065 +- 0.7329 q0: 0.2362 q10: 1.0671 q20: 1.3288 q30: 1.5682 q40: 1.7685 q50: 1.9697 q60: 2.1593 q70: 2.3883 q80: 2.6860 q90: 3.0258 q100: 4.4012\n", + "[one_class_1 CSI 0.3547] [one_class_1 best 0.3547] \n", + "[one_class_mean CSI 0.3547] [one_class_mean best 0.3547] \n", + "0.3547\t0.3547\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 4\n", + "# color_dist : 1\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 1 --resize_factor 0.08 --blur_sigma 4 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist1.0/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist1.0_blur_sigma4.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 216, + "id": "99c14a28", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0033\t0.0033\t0.0025\t0.0033\n", + "weight_shi:\t0.2828\t-1.2986\t-0.7648\t-1.3398\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5759223812272759\n", + "CNMC 1.9848 +- 0.2270 q0: 1.1862 q10: 1.7005 q20: 1.7975 q30: 1.8756 q40: 1.9360 q50: 1.9962 q60: 2.0534 q70: 2.1141 q80: 2.1839 q90: 2.2699 q100: 2.5657\n", + "one_class_1 1.9048 +- 0.2961 q0: 0.9850 q10: 1.4973 q20: 1.6832 q30: 1.7788 q40: 1.8554 q50: 1.9257 q60: 1.9946 q70: 2.0781 q80: 2.1586 q90: 2.2805 q100: 2.6712\n", + "[one_class_1 CSI 0.5759] [one_class_1 best 0.5759] \n", + "[one_class_mean CSI 0.5759] [one_class_mean best 0.5759] \n", + "0.5759\t0.5759\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 3\n", + "# color_dist : 1\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 1 --resize_factor 0.08 --blur_sigma 3 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist1.0/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist1.0_blur_sigma3.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 221, + "id": "bd3e218a", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0050\t0.0049\t0.0049\t0.0050\n", + "weight_shi:\t0.3094\t-1.0241\t-0.9471\t-0.9535\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5930793556750625\n", + "CNMC 2.0015 +- 0.1074 q0: 1.5164 q10: 1.8705 q20: 1.9260 q30: 1.9607 q40: 1.9870 q50: 2.0109 q60: 2.0342 q70: 2.0588 q80: 2.0868 q90: 2.1264 q100: 2.2720\n", + "one_class_1 1.9465 +- 0.1678 q0: 1.2629 q10: 1.7271 q20: 1.8484 q30: 1.9020 q40: 1.9374 q50: 1.9751 q60: 2.0018 q70: 2.0397 q80: 2.0761 q90: 2.1296 q100: 2.2873\n", + "[one_class_1 CSI 0.5931] [one_class_1 best 0.5931] \n", + "[one_class_mean CSI 0.5931] [one_class_mean best 0.5931] \n", + "0.5931\t0.5931\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 2\n", + "# color_dist : 1\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 1 --resize_factor 0.08 --blur_sigma 2 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist1.0/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_2.0_resize_factor_0.08_color_dist1.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 218, + "id": "c2f0113b", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0151\t0.0114\t0.0140\t0.0143\n", + "weight_shi:\t0.3904\t-1.7955\t-0.8990\t-1.3060\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.6319476093539534\n", + "CNMC 2.0984 +- 0.1571 q0: 1.7583 q10: 1.9064 q20: 1.9536 q30: 2.0042 q40: 2.0398 q50: 2.0803 q60: 2.1205 q70: 2.1755 q80: 2.2444 q90: 2.3124 q100: 2.6504\n", + "one_class_1 2.0194 +- 0.1919 q0: 1.5830 q10: 1.7904 q20: 1.8384 q30: 1.8841 q40: 1.9493 q50: 1.9958 q60: 2.0566 q70: 2.1204 q80: 2.1982 q90: 2.2910 q100: 2.6254\n", + "[one_class_1 CSI 0.6319] [one_class_1 best 0.6319] \n", + "[one_class_mean CSI 0.6319] [one_class_mean best 0.6319] \n", + "0.6319\t0.6319\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 1.5\n", + "# color_dist : 1\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 1 --resize_factor 0.08 --blur_sigma 1.5 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist1.0/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist1.0_blur_sigma1.5_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 219, + "id": "1a64397f", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0835\t0.0834\t0.0843\t0.0839\n", + "weight_shi:\t0.6194\t-4.8322\t-1.4623\t-2.0319\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5156447806844306\n", + "CNMC 2.0671 +- 0.1604 q0: 1.6242 q10: 1.8621 q20: 1.9293 q30: 1.9753 q40: 2.0244 q50: 2.0641 q60: 2.0983 q70: 2.1559 q80: 2.2074 q90: 2.2798 q100: 2.5007\n", + "one_class_1 2.0520 +- 0.2129 q0: 1.5274 q10: 1.7652 q20: 1.8656 q30: 1.9325 q40: 1.9996 q50: 2.0618 q60: 2.1083 q70: 2.1784 q80: 2.2379 q90: 2.3346 q100: 2.6253\n", + "[one_class_1 CSI 0.5156] [one_class_1 best 0.5156] \n", + "[one_class_mean CSI 0.5156] [one_class_mean best 0.5156] \n", + "0.5156\t0.5156\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 1\n", + "# color_dist : 1\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 1 --resize_factor 0.08 --blur_sigma 1 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist1.0/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist1.0_blur_sigma1.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "markdown", + "id": "c1bce058", + "metadata": {}, + "source": [ + "# Color Distortion = 0.5" + ] + }, + { + "cell_type": "markdown", + "id": "65e662af", + "metadata": {}, + "source": [ + "## Examine crop" + ] + }, + { + "cell_type": "code", + "execution_count": 184, + "id": "fdaec3de", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0077\t0.0072\t0.0077\t0.0083\n", + "weight_shi:\t-0.2495\t0.5029\t0.4407\t0.6284\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.4545575962892069\n", + "CNMC 1.9531 +- 0.0857 q0: 1.5656 q10: 1.8568 q20: 1.8889 q30: 1.9090 q40: 1.9290 q50: 1.9470 q60: 1.9664 q70: 1.9868 q80: 2.0131 q90: 2.0534 q100: 2.4858\n", + "one_class_1 1.9770 +- 0.1276 q0: 1.5910 q10: 1.8422 q20: 1.8816 q30: 1.9115 q40: 1.9369 q50: 1.9621 q60: 1.9818 q70: 2.0174 q80: 2.0584 q90: 2.1323 q100: 2.7000\n", + "[one_class_1 CSI 0.4546] [one_class_1 best 0.4546] \n", + "[one_class_mean CSI 0.4546] [one_class_mean best 0.4546] \n", + "0.4546\t0.4546\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.5\n", + "# blur_sigma : 2\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.5 --blur_sigma 2 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_2.0_resize_factor_0.5_color_dist0.5_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 185, + "id": "eaa5ec79", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0069\t0.0088\t0.0090\t0.0079\n", + "weight_shi:\t2.7516\t0.9415\t1.1553\t-18.6953\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.580519095798013\n", + "CNMC 2.6811 +- 1.0535 q0: -1.5616 q10: 1.2724 q20: 1.7695 q30: 2.1545 q40: 2.4883 q50: 2.7551 q60: 3.0169 q70: 3.2695 q80: 3.5643 q90: 3.8850 q100: 6.2124\n", + "one_class_1 2.2993 +- 1.4215 q0: -2.7435 q10: 0.4967 q20: 1.2345 q30: 1.7164 q40: 2.0762 q50: 2.3752 q60: 2.6957 q70: 3.0288 q80: 3.4597 q90: 3.9539 q100: 6.3139\n", + "[one_class_1 CSI 0.5805] [one_class_1 best 0.5805] \n", + "[one_class_mean CSI 0.5805] [one_class_mean best 0.5805] \n", + "0.5805\t0.5805\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.3\n", + "# blur_sigma : 2\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.3 --blur_sigma 2 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_2.0_resize_factor_0.3_color_dist0.5_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 220, + "id": "4a75f4d4", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0074\t0.0080\t0.0073\t0.0077\n", + "weight_shi:\t-0.8732\t0.8498\t2.4905\t1.5653\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.2671803947781525\n", + "CNMC 1.8416 +- 0.1772 q0: 1.2265 q10: 1.6388 q20: 1.7083 q30: 1.7532 q40: 1.7915 q50: 1.8242 q60: 1.8580 q70: 1.8990 q80: 1.9656 q90: 2.0775 q100: 2.5900\n", + "one_class_1 2.0431 +- 0.2719 q0: 1.2846 q10: 1.7272 q20: 1.8128 q30: 1.8857 q40: 1.9439 q50: 2.0013 q60: 2.0829 q70: 2.1644 q80: 2.2684 q90: 2.4103 q100: 2.9156\n", + "[one_class_1 CSI 0.2672] [one_class_1 best 0.2672] \n", + "[one_class_mean CSI 0.2672] [one_class_mean best 0.2672] \n", + "0.2672\t0.2672\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.02\n", + "# blur_sigma : 2\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.02 --blur_sigma 2 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_2.0_resize_factor_0.02_color_dist0.5_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 187, + "id": "9d31d62a", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0046\t0.0037\t0.0035\t0.0047\n", + "weight_shi:\t0.4014\t-0.7791\t-0.6536\t-1.3711\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.611233276618155\n", + "CNMC 1.9991 +- 0.2593 q0: 0.9291 q10: 1.6096 q20: 1.7794 q30: 1.9130 q40: 1.9994 q50: 2.0695 q60: 2.1203 q70: 2.1657 q80: 2.2124 q90: 2.2692 q100: 2.5149\n", + "one_class_1 1.8852 +- 0.3136 q0: 0.6811 q10: 1.4317 q20: 1.6563 q30: 1.7768 q40: 1.8804 q50: 1.9566 q60: 2.0179 q70: 2.0924 q80: 2.1459 q90: 2.2152 q100: 2.4864\n", + "[one_class_1 CSI 0.6112] [one_class_1 best 0.6112] \n", + "[one_class_mean CSI 0.6112] [one_class_mean best 0.6112] \n", + "0.6112\t0.6112\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.008\n", + "# blur_sigma : 2\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.008 --blur_sigma 2 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_2.0_resize_factor_0.008_color_dist0.5_one_class_0/last.model\"" + ] + }, + { + "cell_type": "markdown", + "id": "58a14458", + "metadata": {}, + "source": [ + "## Examine blur_sigma" + ] + }, + { + "cell_type": "code", + "execution_count": 188, + "id": "c7c2318d", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0050\t0.0073\t0.0050\t0.0055\n", + "weight_shi:\t-0.3869\t0.3100\t0.7499\t0.9321\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.4740262206422994\n", + "CNMC 1.9842 +- 0.2291 q0: 1.4232 q10: 1.6950 q20: 1.7851 q30: 1.8500 q40: 1.9214 q50: 1.9744 q60: 2.0301 q70: 2.0950 q80: 2.1712 q90: 2.2769 q100: 3.0240\n", + "one_class_1 2.0169 +- 0.2738 q0: 1.4504 q10: 1.6924 q20: 1.7765 q30: 1.8481 q40: 1.9251 q50: 1.9917 q60: 2.0673 q70: 2.1457 q80: 2.2342 q90: 2.3550 q100: 3.2798\n", + "[one_class_1 CSI 0.4740] [one_class_1 best 0.4740] \n", + "[one_class_mean CSI 0.4740] [one_class_mean best 0.4740] \n", + "0.4740\t0.4740\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 40\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 40 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_40.0_resize_factor_0.08_color_dist0.5_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 189, + "id": "dbd4fb10", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0041\t0.0073\t0.0038\t0.0040\n", + "weight_shi:\t-0.0807\t0.1383\t0.2679\t0.2225\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.3159839323874052\n", + "CNMC 1.9782 +- 0.0390 q0: 1.8641 q10: 1.9344 q20: 1.9491 q30: 1.9576 q40: 1.9654 q50: 1.9735 q60: 1.9830 q70: 1.9942 q80: 2.0052 q90: 2.0239 q100: 2.1760\n", + "one_class_1 2.0111 +- 0.0558 q0: 1.8790 q10: 1.9491 q20: 1.9646 q30: 1.9780 q40: 1.9912 q50: 2.0041 q60: 2.0170 q70: 2.0318 q80: 2.0532 q90: 2.0897 q100: 2.2666\n", + "[one_class_1 CSI 0.3160] [one_class_1 best 0.3160] \n", + "[one_class_mean CSI 0.3160] [one_class_mean best 0.3160] \n", + "0.3160\t0.3160\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 20\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 20 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_20.0_resize_factor_0.08_color_dist0.5_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 190, + "id": "c0cd8374", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0021\t0.0037\t0.0024\t0.0027\n", + "weight_shi:\t0.1478\t4.1795\t-0.4613\t-0.5806\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.4508957959874011\n", + "CNMC 2.0731 +- 0.5687 q0: 0.4702 q10: 1.3853 q20: 1.5945 q30: 1.7650 q40: 1.9267 q50: 2.0493 q60: 2.1848 q70: 2.3330 q80: 2.5050 q90: 2.7946 q100: 4.6939\n", + "one_class_1 2.1855 +- 0.7534 q0: 0.3032 q10: 1.1734 q20: 1.4954 q30: 1.7768 q40: 1.9835 q50: 2.1717 q60: 2.4165 q70: 2.5852 q80: 2.8103 q90: 3.1495 q100: 4.4871\n", + "[one_class_1 CSI 0.4509] [one_class_1 best 0.4509] \n", + "[one_class_mean CSI 0.4509] [one_class_mean best 0.4509] \n", + "0.4509\t0.4509\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 6\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 6 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_6.0_resize_factor_0.08_color_dist0.5_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 191, + "id": "1a733a07", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0019\t0.0025\t0.0018\t0.0018\n", + "weight_shi:\t0.1207\t-0.4216\t-0.2927\t-0.2699\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.622416167876928\n", + "CNMC 2.0481 +- 0.2777 q0: 0.5649 q10: 1.7109 q20: 1.8569 q30: 1.9499 q40: 2.0216 q50: 2.0813 q60: 2.1374 q70: 2.2010 q80: 2.2718 q90: 2.3476 q100: 2.6884\n", + "one_class_1 1.8936 +- 0.3857 q0: 0.4436 q10: 1.4038 q20: 1.6226 q30: 1.7768 q40: 1.8682 q50: 1.9483 q60: 2.0252 q70: 2.1209 q80: 2.2012 q90: 2.3215 q100: 2.8144\n", + "[one_class_1 CSI 0.6224] [one_class_1 best 0.6224] \n", + "[one_class_mean CSI 0.6224] [one_class_mean best 0.6224] \n", + "0.6224\t0.6224\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 4\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 4 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.5_blur_sigma4.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 192, + "id": "c59e2e1d", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0024\t0.0049\t0.0029\t0.0029\n", + "weight_shi:\t0.3727\t0.6016\t-2.1896\t-1.0076\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.7071838381996982\n", + "CNMC 2.1791 +- 0.2772 q0: 0.2329 q10: 1.8709 q20: 2.0154 q30: 2.0976 q40: 2.1641 q50: 2.2225 q60: 2.2692 q70: 2.3190 q80: 2.3739 q90: 2.4494 q100: 2.9055\n", + "one_class_1 1.9359 +- 0.4103 q0: -0.1517 q10: 1.4452 q20: 1.7034 q30: 1.8312 q40: 1.9334 q50: 2.0115 q60: 2.0642 q70: 2.1519 q80: 2.2408 q90: 2.3584 q100: 2.8261\n", + "[one_class_1 CSI 0.7072] [one_class_1 best 0.7072] \n", + "[one_class_mean CSI 0.7072] [one_class_mean best 0.7072] \n", + "0.7072\t0.7072\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 3\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 3 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.5_blur_sigma3.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 193, + "id": "5827615d", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0019\t0.0027\t0.0022\t0.0026\n", + "weight_shi:\t0.1899\t-0.4837\t-0.3535\t-0.3448\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.7415028509504856\n", + "CNMC 2.1337 +- 0.1823 q0: 0.9904 q10: 1.9206 q20: 2.0374 q30: 2.0868 q40: 2.1243 q50: 2.1600 q60: 2.1945 q70: 2.2226 q80: 2.2642 q90: 2.3222 q100: 2.5460\n", + "one_class_1 1.9400 +- 0.2874 q0: 0.6323 q10: 1.5817 q20: 1.7870 q30: 1.8866 q40: 1.9416 q50: 1.9843 q60: 2.0207 q70: 2.0829 q80: 2.1566 q90: 2.2468 q100: 2.5500\n", + "[one_class_1 CSI 0.7415] [one_class_1 best 0.7415] \n", + "[one_class_mean CSI 0.7415] [one_class_mean best 0.7415] \n", + "0.7415\t0.7415\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 2\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 2 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_2.0_resize_factor_0.08_color_dist0.5_one_class_0_7415/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 194, + "id": "65baeab1", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0022\t0.0033\t0.0025\t0.0029\n", + "weight_shi:\t0.4059\t-6.1160\t-2.6702\t-1.5404\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.7402292913641013\n", + "CNMC 2.5607 +- 0.6482 q0: -0.8214 q10: 1.7859 q20: 2.1154 q30: 2.3402 q40: 2.5031 q50: 2.6399 q60: 2.7654 q70: 2.9084 q80: 3.0413 q90: 3.2796 q100: 4.2054\n", + "one_class_1 1.8328 +- 0.9715 q0: -2.1220 q10: 0.6263 q20: 1.0844 q30: 1.4277 q40: 1.6691 q50: 1.8643 q60: 2.1102 q70: 2.3723 q80: 2.6504 q90: 3.0382 q100: 4.2076\n", + "[one_class_1 CSI 0.7402] [one_class_1 best 0.7402] \n", + "[one_class_mean CSI 0.7402] [one_class_mean best 0.7402] \n", + "0.7402\t0.7402\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 1.5\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 1.5 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.5_blur_sigma1.5_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 195, + "id": "a9c1c45f", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0077\t0.0092\t0.0076\t0.0081\n", + "weight_shi:\t-0.2259\t0.4304\t0.6270\t0.7623\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.36727255694305183\n", + "CNMC 1.9613 +- 0.2117 q0: 1.4830 q10: 1.7342 q20: 1.7989 q30: 1.8500 q40: 1.8941 q50: 1.9317 q60: 1.9759 q70: 2.0278 q80: 2.0943 q90: 2.2144 q100: 3.2689\n", + "one_class_1 2.0967 +- 0.3131 q0: 1.5468 q10: 1.7976 q20: 1.8621 q30: 1.9190 q40: 1.9608 q50: 2.0266 q60: 2.0831 q70: 2.1520 q80: 2.2654 q90: 2.5291 q100: 3.5407\n", + "[one_class_1 CSI 0.3673] [one_class_1 best 0.3673] \n", + "[one_class_mean CSI 0.3673] [one_class_mean best 0.3673] \n", + "0.3673\t0.3673\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 1.0\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 1.0 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.5_blur_sigma1.0_one_class_0/last.model\"" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.9" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/.ipynb_checkpoints/train-checkpoint.ipynb b/.ipynb_checkpoints/train-checkpoint.ipynb new file mode 100644 index 0000000..5f16fe9 --- /dev/null +++ b/.ipynb_checkpoints/train-checkpoint.ipynb @@ -0,0 +1,1799 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "c812e9f6", + "metadata": {}, + "outputs": [], + "source": [ + "#!pip3 install --upgrade pip setuptools wheel" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "3c2f5cb0", + "metadata": {}, + "outputs": [], + "source": [ + "!chmod +x eval.py" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9808149e", + "metadata": {}, + "outputs": [], + "source": [ + "#setup\n", + "!git clone https://github.com/NVIDIA/apex\n", + "!cp /home/feoktistovar67431/git/apex/setup.py .\n", + "!pip3 install -v --disable-pip-version-check --no-cache-dir ./\n", + "!pip install git+https://github.com/ildoonet/pytorch-gradual-warmup-lr.git\n", + "!python3 -m pip install torch torchvision scikit-learn tensorboard diffdist==0.1 tensorboardX torchlars==0.1.2 apex" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bf0756e3", + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "\n", + "print(f\"Is CUDA supported by this system? ->{torch.cuda.is_available()}\")\n", + "print(f\"CUDA version: {torch.version.cuda}\")\n", + "cuda_id = torch.cuda.current_device()\n", + "print(f\"ID of current CUDA device: {torch.cuda.current_device()}\")\n", + "print(f\"Number of available devices: {torch.cuda.device_count()}\\n\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5f7ff35c", + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "#TEST ONLY\n", + "#!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 '/home/feoktistovar67431/CSI/CSI/train.py' --dataset 'cifar10' --model 'resnet18' --mode simclr_CSI --shift_trans_type rotation --epochs 10 --batch_size 32 --optimizer sgd --one_class_idx 9" + ] + }, + { + "cell_type": "markdown", + "id": "e3f0081b", + "metadata": {}, + "source": [ + "# Combined shiftings" + ] + }, + { + "cell_type": "code", + "execution_count": 222, + "id": "26921f38", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/home/feoktistovar67431/.local/lib/python3.6/site-packages/torch/distributed/launch.py:186: FutureWarning: The module torch.distributed.launch is deprecated\n", + "and will be removed in future. Use torchrun.\n", + "Note that --use_env is set by default in torchrun.\n", + "If your script expects `--local_rank` argument to be set, please\n", + "change it to read from `os.environ['LOCAL_RANK']` instead. See \n", + "https://pytorch.org/docs/stable/distributed.html#launch-utility for \n", + "further instructions\n", + "\n", + " FutureWarning,\n", + "WARNING:torch.distributed.run:\n", + "*****************************************\n", + "Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. \n", + "*****************************************\n", + "Warning: using Python fallback for SyncBatchNorm, possibly because apex was installed without --cuda_ext. The exception raised when attempting to import the cuda backend was: No module named 'syncbn'\n", + "Warning: using Python fallback for SyncBatchNorm, possibly because apex was installed without --cuda_ext. The exception raised when attempting to import the cuda backend was: No module named 'syncbn'\n", + "Warning: apex was installed without --cpp_ext. Falling back to Python flatten and unflatten.\n", + "Warning: apex was installed without --cpp_ext. Falling back to Python flatten and unflatten.\n", + "[2022-04-27 21:19:03.912343] Namespace(K_shift=4, batch_size=8, blur_sigma=40.0, color_distort=0.5, dataset='CNMC', distortion_scale=0.8, epochs=10, error_step=5, image_size=(300, 300, 3), load_path=None, local_rank=0, lr_init=0.1, lr_scheduler='cosine', mode='simclr_CSI', model='resnet18_imagenet', multi_gpu=True, n_classes=2, n_gpus=2, n_superclasses=2, no_strict=False, noise_mean=0, noise_std=0.3, one_class_idx=1, ood_batch_size=100, ood_dataset=[0], ood_layer='simclr', ood_samples=1, ood_score=['norm_mean'], optimizer='sgd', print_score=False, proc_step=None, res='450px', resize_factor=0.08, resize_fix=False, resume_path=None, save_score=False, save_step=10, sharpness_factor=2, shift_trans=BlurRandpers(\n", + " (gauss): GaussBlur()\n", + " (randpers): RandPers()\n", + "), shift_trans_type='blur_randpers', sim_lambda=1.0, simclr_dim=128, suffix=None, temperature=0.5, test_batch_size=100, warmup=10, weight_decay=1e-06)\n", + "[2022-04-27 21:19:03.912780] DistributedDataParallel(\n", + " (module): ResNet(\n", + " (linear): Linear(in_features=512, out_features=2, bias=True)\n", + " (simclr_layer): Sequential(\n", + " (0): Linear(in_features=512, out_features=512, bias=True)\n", + " (1): ReLU()\n", + " (2): Linear(in_features=512, out_features=128, bias=True)\n", + " )\n", + " (shift_cls_layer): Linear(in_features=512, out_features=4, bias=True)\n", + " (joint_distribution_layer): Linear(in_features=512, out_features=8, bias=True)\n", + " (conv1): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)\n", + " (bn1): SyncBatchNorm(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)\n", + " (layer1): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): SyncBatchNorm(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): SyncBatchNorm(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): SyncBatchNorm(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): SyncBatchNorm(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (layer2): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (bn1): SyncBatchNorm(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): SyncBatchNorm(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (downsample): Sequential(\n", + " (0): Conv2d(64, 128, kernel_size=(1, 1), stride=(2, 2), bias=False)\n", + " (1): SyncBatchNorm(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): SyncBatchNorm(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): SyncBatchNorm(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (layer3): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (bn1): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (downsample): Sequential(\n", + " (0): Conv2d(128, 256, kernel_size=(1, 1), stride=(2, 2), bias=False)\n", + " (1): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (layer4): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (bn1): SyncBatchNorm(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): SyncBatchNorm(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (downsample): Sequential(\n", + " (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)\n", + " (1): SyncBatchNorm(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): SyncBatchNorm(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): SyncBatchNorm(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (avgpool): AdaptiveAvgPool2d(output_size=(1, 1))\n", + " (normalize): NormalizeLayer()\n", + " )\n", + ")\n", + "Epoch 1 (logs/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_randpers_resize_factor0.08_color_dist0.5_one_class_1)\n", + "/home/feoktistovar67431/.local/lib/python3.6/site-packages/torch/optim/lr_scheduler.py:154: UserWarning: The epoch parameter in `scheduler.step()` was not necessary and is being deprecated where possible. Please use `scheduler.step()` to step the scheduler. During the deprecation, if epoch is different from None, the closed form is used instead of the new chainable form, where available. Please open an issue if you are unable to replicate your use case: https://github.com/pytorch/pytorch/issues/new/choose.\n", + " warnings.warn(EPOCH_DEPRECATION_WARNING, UserWarning)\n", + "/home/feoktistovar67431/.local/lib/python3.6/site-packages/torch/optim/lr_scheduler.py:154: UserWarning: The epoch parameter in `scheduler.step()` was not necessary and is being deprecated where possible. Please use `scheduler.step()` to step the scheduler. During the deprecation, if epoch is different from None, the closed form is used instead of the new chainable form, where available. Please open an issue if you are unable to replicate your use case: https://github.com/pytorch/pytorch/issues/new/choose.\n", + " warnings.warn(EPOCH_DEPRECATION_WARNING, UserWarning)\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2022-04-27 21:19:06.681133] [Epoch 1; 0] [Time 1.753] [Data 0.128] [LR 0.10000]\n", + "[LossC 0.000000] [LossSim 4.795710] [LossShift 1.446792]\n", + "[2022-04-27 21:19:26.588634] [Epoch 1; 50] [Time 0.435] [Data 0.827] [LR 0.11004]\n", + "[LossC 0.000000] [LossSim 4.458384] [LossShift 1.450558]\n", + "[2022-04-27 21:19:47.065503] [Epoch 1; 100] [Time 0.441] [Data 0.818] [LR 0.12009]\n", + "[LossC 0.000000] [LossSim 4.495318] [LossShift 0.887940]\n", + "[2022-04-27 21:20:08.001796] [Epoch 1; 150] [Time 0.451] [Data 0.826] [LR 0.13013]\n", + "[LossC 0.000000] [LossSim 4.466498] [LossShift 1.651758]\n", + "[2022-04-27 21:20:29.557696] [Epoch 1; 200] [Time 0.463] [Data 0.859] [LR 0.14018]\n", + "[LossC 0.000000] [LossSim 4.488340] [LossShift 0.890679]\n", + "[2022-04-27 21:20:51.522911] [Epoch 1; 250] [Time 0.465] [Data 0.987] [LR 0.15022]\n", + "[LossC 0.000000] [LossSim 4.457443] [LossShift 1.463503]\n", + "[2022-04-27 21:21:13.774301] [Epoch 1; 300] [Time 0.481] [Data 0.873] [LR 0.16027]\n", + "[LossC 0.000000] [LossSim 4.408203] [LossShift 0.978724]\n", + "[2022-04-27 21:21:36.139558] [Epoch 1; 350] [Time 0.463] [Data 0.896] [LR 0.17031]\n", + "[LossC 0.000000] [LossSim 4.406531] [LossShift 0.853714]\n", + "[2022-04-27 21:21:58.598135] [Epoch 1; 400] [Time 0.469] [Data 0.870] [LR 0.18036]\n", + "[LossC 0.000000] [LossSim 4.494049] [LossShift 0.970959]\n", + "[2022-04-27 21:22:19.114742] [DONE] [Time 0.471] [Data 0.868] [LossC 0.000000] [LossSim 4.517576] [LossShift 1.226323]\n", + "Epoch 2 (logs/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_randpers_resize_factor0.08_color_dist0.5_one_class_1)\n", + "[2022-04-27 21:22:20.199138] [Epoch 2; 0] [Time 0.502] [Data 0.158] [LR 0.19000]\n", + "[LossC 0.000000] [LossSim 4.359697] [LossShift 0.896302]\n", + "[2022-04-27 21:22:42.722677] [Epoch 2; 50] [Time 0.452] [Data 0.869] [LR 0.20004]\n", + "[LossC 0.000000] [LossSim 4.424041] [LossShift 0.848778]\n", + "[2022-04-27 21:23:05.591518] [Epoch 2; 100] [Time 0.452] [Data 0.867] [LR 0.21009]\n", + "[LossC 0.000000] [LossSim 4.309733] [LossShift 0.864205]\n", + "[2022-04-27 21:23:28.092864] [Epoch 2; 150] [Time 0.471] [Data 0.871] [LR 0.22013]\n", + "[LossC 0.000000] [LossSim 4.339020] [LossShift 0.861768]\n", + "[2022-04-27 21:23:51.151448] [Epoch 2; 200] [Time 0.471] [Data 0.982] [LR 0.23018]\n", + "[LossC 0.000000] [LossSim 4.398156] [LossShift 0.844045]\n", + "[2022-04-27 21:24:13.759556] [Epoch 2; 250] [Time 0.474] [Data 0.873] [LR 0.24022]\n", + "[LossC 0.000000] [LossSim 4.331997] [LossShift 0.895239]\n", + "[2022-04-27 21:24:36.498251] [Epoch 2; 300] [Time 0.557] [Data 0.844] [LR 0.25027]\n", + "[LossC 0.000000] [LossSim 4.314375] [LossShift 0.844688]\n", + "[2022-04-27 21:24:59.086448] [Epoch 2; 350] [Time 0.448] [Data 0.855] [LR 0.26031]\n", + "[LossC 0.000000] [LossSim 4.494950] [LossShift 0.842451]\n", + "[2022-04-27 21:25:22.358179] [Epoch 2; 400] [Time 0.509] [Data 0.884] [LR 0.27036]\n", + "[LossC 0.000000] [LossSim 4.366556] [LossShift 0.884501]\n", + "[2022-04-27 21:25:43.075378] [DONE] [Time 0.487] [Data 0.907] [LossC 0.000000] [LossSim 4.395404] [LossShift 0.913691]\n", + "Epoch 3 (logs/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_randpers_resize_factor0.08_color_dist0.5_one_class_1)\n", + "[2022-04-27 21:25:44.090938] [Epoch 3; 0] [Time 0.461] [Data 0.134] [LR 0.28000]\n", + "[LossC 0.000000] [LossSim 4.363524] [LossShift 0.843010]\n", + "[2022-04-27 21:26:06.906782] [Epoch 3; 50] [Time 0.489] [Data 0.855] [LR 0.29004]\n", + "[LossC 0.000000] [LossSim 4.475645] [LossShift 1.142160]\n", + "[2022-04-27 21:26:30.509720] [Epoch 3; 100] [Time 0.454] [Data 0.893] [LR 0.30009]\n", + "[LossC 0.000000] [LossSim 4.336016] [LossShift 0.952089]\n", + "[2022-04-27 21:26:53.002780] [Epoch 3; 150] [Time 0.477] [Data 0.860] [LR 0.31013]\n", + "[LossC 0.000000] [LossSim 4.475717] [LossShift 0.875115]\n", + "[2022-04-27 21:27:15.597338] [Epoch 3; 200] [Time 0.471] [Data 0.857] [LR 0.32018]\n", + "[LossC 0.000000] [LossSim 4.349196] [LossShift 0.872518]\n", + "[2022-04-27 21:27:38.345896] [Epoch 3; 250] [Time 0.463] [Data 0.877] [LR 0.33022]\n", + "[LossC 0.000000] [LossSim 4.353239] [LossShift 0.881434]\n", + "[2022-04-27 21:28:01.311768] [Epoch 3; 300] [Time 0.476] [Data 0.876] [LR 0.34027]\n", + "[LossC 0.000000] [LossSim 4.418363] [LossShift 0.876285]\n", + "[2022-04-27 21:28:24.109063] [Epoch 3; 350] [Time 0.529] [Data 0.860] [LR 0.35031]\n", + "[LossC 0.000000] [LossSim 4.391089] [LossShift 0.891998]\n", + "[2022-04-27 21:28:46.767573] [Epoch 3; 400] [Time 0.490] [Data 0.923] [LR 0.36036]\n", + "[LossC 0.000000] [LossSim 4.366334] [LossShift 0.961224]\n", + "[2022-04-27 21:29:07.659288] [DONE] [Time 0.485] [Data 0.909] [LossC 0.000000] [LossSim 4.379301] [LossShift 0.903935]\n", + "Epoch 4 (logs/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_randpers_resize_factor0.08_color_dist0.5_one_class_1)\n", + "[2022-04-27 21:29:08.649924] [Epoch 4; 0] [Time 0.441] [Data 0.154] [LR 0.37000]\n", + "[LossC 0.000000] [LossSim 4.468335] [LossShift 0.975977]\n", + "[2022-04-27 21:29:31.468727] [Epoch 4; 50] [Time 0.459] [Data 0.911] [LR 0.38004]\n", + "[LossC 0.000000] [LossSim 4.803634] [LossShift 2.258877]\n", + "[2022-04-27 21:29:53.609175] [Epoch 4; 100] [Time 0.471] [Data 0.855] [LR 0.39009]\n", + "[LossC 0.000000] [LossSim 4.457827] [LossShift 0.855588]\n", + "[2022-04-27 21:30:16.236645] [Epoch 4; 150] [Time 0.472] [Data 0.861] [LR 0.40013]\n", + "[LossC 0.000000] [LossSim 4.359911] [LossShift 0.869267]\n", + "[2022-04-27 21:30:38.965445] [Epoch 4; 200] [Time 0.457] [Data 0.922] [LR 0.41018]\n", + "[LossC 0.000000] [LossSim 4.300039] [LossShift 0.853143]\n", + "[2022-04-27 21:31:01.744464] [Epoch 4; 250] [Time 0.464] [Data 0.847] [LR 0.42022]\n", + "[LossC 0.000000] [LossSim 4.343868] [LossShift 0.904560]\n", + "[2022-04-27 21:31:24.138632] [Epoch 4; 300] [Time 0.468] [Data 0.929] [LR 0.43027]\n", + "[LossC 0.000000] [LossSim 4.440177] [LossShift 1.008291]\n", + "[2022-04-27 21:31:47.197617] [Epoch 4; 350] [Time 0.459] [Data 0.988] [LR 0.44031]\n", + "[LossC 0.000000] [LossSim 4.313808] [LossShift 0.843529]\n", + "[2022-04-27 21:32:10.020673] [Epoch 4; 400] [Time 0.464] [Data 0.915] [LR 0.45036]\n", + "[LossC 0.000000] [LossSim 4.347077] [LossShift 0.842586]\n", + "[2022-04-27 21:32:30.667648] [DONE] [Time 0.484] [Data 0.903] [LossC 0.000000] [LossSim 4.378773] [LossShift 0.932685]\n", + "Epoch 5 (logs/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_randpers_resize_factor0.08_color_dist0.5_one_class_1)\n", + "[2022-04-27 21:32:31.676676] [Epoch 5; 0] [Time 0.472] [Data 0.141] [LR 0.46000]\n", + "[LossC 0.000000] [LossSim 4.296750] [LossShift 0.850581]\n", + "[2022-04-27 21:32:54.231546] [Epoch 5; 50] [Time 0.531] [Data 0.852] [LR 0.47004]\n", + "[LossC 0.000000] [LossSim 4.324140] [LossShift 0.856480]\n", + "[2022-04-27 21:33:16.815921] [Epoch 5; 100] [Time 0.554] [Data 0.887] [LR 0.48009]\n", + "[LossC 0.000000] [LossSim 4.298337] [LossShift 0.911719]\n", + "[2022-04-27 21:33:39.742560] [Epoch 5; 150] [Time 0.513] [Data 0.938] [LR 0.49013]\n", + "[LossC 0.000000] [LossSim 4.311210] [LossShift 0.854077]\n", + "[2022-04-27 21:34:02.227222] [Epoch 5; 200] [Time 0.544] [Data 0.883] [LR 0.50018]\n", + "[LossC 0.000000] [LossSim 4.316729] [LossShift 0.873590]\n", + "[2022-04-27 21:34:25.029707] [Epoch 5; 250] [Time 0.595] [Data 0.907] [LR 0.51022]\n", + "[LossC 0.000000] [LossSim 4.332903] [LossShift 0.852887]\n", + "[2022-04-27 21:34:47.734705] [Epoch 5; 300] [Time 0.457] [Data 0.884] [LR 0.52027]\n", + "[LossC 0.000000] [LossSim 4.326703] [LossShift 0.827790]\n", + "[2022-04-27 21:35:10.065878] [Epoch 5; 350] [Time 0.480] [Data 0.848] [LR 0.53031]\n", + "[LossC 0.000000] [LossSim 4.629390] [LossShift 0.972859]\n", + "[2022-04-27 21:35:32.496680] [Epoch 5; 400] [Time 0.471] [Data 0.945] [LR 0.54036]\n", + "[LossC 0.000000] [LossSim 4.476654] [LossShift 0.924936]\n", + "[2022-04-27 21:35:53.353584] [DONE] [Time 0.484] [Data 0.901] [LossC 0.000000] [LossSim 4.361738] [LossShift 0.904301]\n", + "Epoch 6 (logs/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_randpers_resize_factor0.08_color_dist0.5_one_class_1)\n", + "[2022-04-27 21:35:54.394370] [Epoch 6; 0] [Time 0.459] [Data 0.168] [LR 0.55000]\n", + "[LossC 0.000000] [LossSim 4.356859] [LossShift 0.916392]\n", + "[2022-04-27 21:36:16.884891] [Epoch 6; 50] [Time 0.461] [Data 0.861] [LR 0.56004]\n", + "[LossC 0.000000] [LossSim 4.396854] [LossShift 0.942714]\n", + "[2022-04-27 21:36:39.738454] [Epoch 6; 100] [Time 0.460] [Data 0.898] [LR 0.57009]\n", + "[LossC 0.000000] [LossSim 4.463193] [LossShift 0.884684]\n", + "[2022-04-27 21:37:02.620539] [Epoch 6; 150] [Time 0.467] [Data 0.885] [LR 0.58013]\n", + "[LossC 0.000000] [LossSim 4.373494] [LossShift 0.972907]\n", + "[2022-04-27 21:37:26.181037] [Epoch 6; 200] [Time 0.469] [Data 0.986] [LR 0.59018]\n", + "[LossC 0.000000] [LossSim 4.492169] [LossShift 0.874383]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2022-04-27 21:37:48.941984] [Epoch 6; 250] [Time 0.455] [Data 0.864] [LR 0.60022]\n", + "[LossC 0.000000] [LossSim 4.365623] [LossShift 0.879145]\n", + "[2022-04-27 21:38:11.891998] [Epoch 6; 300] [Time 0.472] [Data 1.195] [LR 0.61027]\n", + "[LossC 0.000000] [LossSim 4.348284] [LossShift 1.021375]\n", + "[2022-04-27 21:38:34.705143] [Epoch 6; 350] [Time 0.536] [Data 0.864] [LR 0.62031]\n", + "[LossC 0.000000] [LossSim 4.290128] [LossShift 0.857135]\n", + "[2022-04-27 21:38:57.461264] [Epoch 6; 400] [Time 0.467] [Data 0.956] [LR 0.63036]\n", + "[LossC 0.000000] [LossSim 4.288968] [LossShift 0.835112]\n", + "[2022-04-27 21:39:18.226831] [DONE] [Time 0.491] [Data 0.911] [LossC 0.000000] [LossSim 4.369289] [LossShift 0.965370]\n", + "Epoch 7 (logs/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_randpers_resize_factor0.08_color_dist0.5_one_class_1)\n", + "[2022-04-27 21:39:19.197901] [Epoch 7; 0] [Time 0.448] [Data 0.145] [LR 0.64000]\n", + "[LossC 0.000000] [LossSim 4.337277] [LossShift 0.845977]\n", + "[2022-04-27 21:39:41.903147] [Epoch 7; 50] [Time 0.516] [Data 0.844] [LR 0.65004]\n", + "[LossC 0.000000] [LossSim 4.348597] [LossShift 0.887782]\n", + "[2022-04-27 21:40:04.761686] [Epoch 7; 100] [Time 0.462] [Data 0.904] [LR 0.66009]\n", + "[LossC 0.000000] [LossSim 4.288217] [LossShift 0.847829]\n", + "[2022-04-27 21:40:27.497629] [Epoch 7; 150] [Time 0.505] [Data 0.909] [LR 0.67013]\n", + "[LossC 0.000000] [LossSim 4.574395] [LossShift 0.856589]\n", + "[2022-04-27 21:40:50.169432] [Epoch 7; 200] [Time 0.503] [Data 0.874] [LR 0.68018]\n", + "[LossC 0.000000] [LossSim 4.347064] [LossShift 1.008280]\n", + "[2022-04-27 21:41:13.461267] [Epoch 7; 250] [Time 0.535] [Data 0.876] [LR 0.69022]\n", + "[LossC 0.000000] [LossSim 4.344507] [LossShift 0.942077]\n", + "[2022-04-27 21:41:36.295103] [Epoch 7; 300] [Time 0.481] [Data 0.856] [LR 0.70027]\n", + "[LossC 0.000000] [LossSim 4.309855] [LossShift 0.832647]\n", + "[2022-04-27 21:41:58.827571] [Epoch 7; 350] [Time 0.464] [Data 0.853] [LR 0.71031]\n", + "[LossC 0.000000] [LossSim 4.432234] [LossShift 1.124480]\n", + "[2022-04-27 21:42:21.525643] [Epoch 7; 400] [Time 0.462] [Data 0.971] [LR 0.72036]\n", + "[LossC 0.000000] [LossSim 4.344445] [LossShift 0.938462]\n", + "[2022-04-27 21:42:42.184827] [DONE] [Time 0.488] [Data 0.907] [LossC 0.000000] [LossSim 4.358003] [LossShift 0.918527]\n", + "Epoch 8 (logs/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_randpers_resize_factor0.08_color_dist0.5_one_class_1)\n", + "[2022-04-27 21:42:43.188401] [Epoch 8; 0] [Time 0.472] [Data 0.151] [LR 0.73000]\n", + "[LossC 0.000000] [LossSim 4.423952] [LossShift 0.940491]\n", + "[2022-04-27 21:43:05.626867] [Epoch 8; 50] [Time 0.609] [Data 0.911] [LR 0.74004]\n", + "[LossC 0.000000] [LossSim 4.442121] [LossShift 0.870375]\n", + "[2022-04-27 21:43:28.441870] [Epoch 8; 100] [Time 0.480] [Data 0.858] [LR 0.75009]\n", + "[LossC 0.000000] [LossSim 4.287797] [LossShift 0.879039]\n", + "[2022-04-27 21:43:51.203855] [Epoch 8; 150] [Time 0.464] [Data 1.064] [LR 0.76013]\n", + "[LossC 0.000000] [LossSim 4.277451] [LossShift 0.845034]\n", + "[2022-04-27 21:44:13.634754] [Epoch 8; 200] [Time 0.568] [Data 0.851] [LR 0.77018]\n", + "[LossC 0.000000] [LossSim 4.329644] [LossShift 0.961596]\n", + "[2022-04-27 21:44:36.887687] [Epoch 8; 250] [Time 0.723] [Data 0.942] [LR 0.78022]\n", + "[LossC 0.000000] [LossSim 4.317680] [LossShift 0.864846]\n", + "[2022-04-27 21:44:59.265520] [Epoch 8; 300] [Time 0.450] [Data 0.856] [LR 0.79027]\n", + "[LossC 0.000000] [LossSim 4.362687] [LossShift 0.917989]\n", + "[2022-04-27 21:45:22.337561] [Epoch 8; 350] [Time 0.480] [Data 0.891] [LR 0.80031]\n", + "[LossC 0.000000] [LossSim 4.263648] [LossShift 0.859828]\n", + "[2022-04-27 21:45:45.275990] [Epoch 8; 400] [Time 0.497] [Data 0.868] [LR 0.81036]\n", + "[LossC 0.000000] [LossSim 4.380607] [LossShift 0.836404]\n", + "[2022-04-27 21:46:06.499931] [DONE] [Time 0.488] [Data 0.908] [LossC 0.000000] [LossSim 4.348544] [LossShift 0.891716]\n", + "Epoch 9 (logs/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_randpers_resize_factor0.08_color_dist0.5_one_class_1)\n", + "[2022-04-27 21:46:07.537821] [Epoch 9; 0] [Time 0.464] [Data 0.159] [LR 0.82000]\n", + "[LossC 0.000000] [LossSim 4.373352] [LossShift 0.876816]\n", + "[2022-04-27 21:46:30.396968] [Epoch 9; 50] [Time 0.455] [Data 0.856] [LR 0.83004]\n", + "[LossC 0.000000] [LossSim 4.306937] [LossShift 0.909936]\n", + "[2022-04-27 21:46:53.286257] [Epoch 9; 100] [Time 0.451] [Data 0.855] [LR 0.84009]\n", + "[LossC 0.000000] [LossSim 4.355694] [LossShift 1.014931]\n", + "[2022-04-27 21:47:16.173773] [Epoch 9; 150] [Time 0.465] [Data 1.050] [LR 0.85013]\n", + "[LossC 0.000000] [LossSim 4.293055] [LossShift 0.837927]\n", + "[2022-04-27 21:47:38.465545] [Epoch 9; 200] [Time 0.465] [Data 0.872] [LR 0.86018]\n", + "[LossC 0.000000] [LossSim 4.365509] [LossShift 0.908220]\n", + "[2022-04-27 21:48:01.092709] [Epoch 9; 250] [Time 0.461] [Data 0.937] [LR 0.87022]\n", + "[LossC 0.000000] [LossSim 4.350402] [LossShift 0.842791]\n", + "[2022-04-27 21:48:24.019747] [Epoch 9; 300] [Time 0.472] [Data 0.906] [LR 0.88027]\n", + "[LossC 0.000000] [LossSim 4.499863] [LossShift 1.153011]\n", + "[2022-04-27 21:48:46.872260] [Epoch 9; 350] [Time 0.477] [Data 0.890] [LR 0.89031]\n", + "[LossC 0.000000] [LossSim 4.301045] [LossShift 0.840660]\n", + "[2022-04-27 21:49:09.507846] [Epoch 9; 400] [Time 0.447] [Data 0.851] [LR 0.90036]\n", + "[LossC 0.000000] [LossSim 4.358407] [LossShift 0.889107]\n", + "[2022-04-27 21:49:30.079116] [DONE] [Time 0.485] [Data 0.905] [LossC 0.000000] [LossSim 4.353526] [LossShift 0.893255]\n", + "Epoch 10 (logs/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_randpers_resize_factor0.08_color_dist0.5_one_class_1)\n", + "[2022-04-27 21:49:31.077872] [Epoch 10; 0] [Time 0.455] [Data 0.157] [LR 0.91000]\n", + "[LossC 0.000000] [LossSim 4.342908] [LossShift 0.914479]\n", + "[2022-04-27 21:49:53.899316] [Epoch 10; 50] [Time 0.466] [Data 0.991] [LR 0.92004]\n", + "[LossC 0.000000] [LossSim 4.321300] [LossShift 0.815638]\n", + "[2022-04-27 21:50:16.668189] [Epoch 10; 100] [Time 0.497] [Data 0.877] [LR 0.93009]\n", + "[LossC 0.000000] [LossSim 4.261489] [LossShift 0.859249]\n", + "[2022-04-27 21:50:39.620289] [Epoch 10; 150] [Time 0.585] [Data 0.871] [LR 0.94013]\n", + "[LossC 0.000000] [LossSim 4.288896] [LossShift 0.847932]\n", + "[2022-04-27 21:51:02.703581] [Epoch 10; 200] [Time 0.472] [Data 0.893] [LR 0.95018]\n", + "[LossC 0.000000] [LossSim 4.321000] [LossShift 0.911242]\n", + "[2022-04-27 21:51:25.530056] [Epoch 10; 250] [Time 0.460] [Data 0.888] [LR 0.96022]\n", + "[LossC 0.000000] [LossSim 4.281656] [LossShift 0.857911]\n", + "[2022-04-27 21:51:48.577854] [Epoch 10; 300] [Time 0.594] [Data 0.853] [LR 0.97027]\n", + "[LossC 0.000000] [LossSim 4.266364] [LossShift 0.833280]\n", + "[2022-04-27 21:52:11.521917] [Epoch 10; 350] [Time 0.470] [Data 0.921] [LR 0.98031]\n", + "[LossC 0.000000] [LossSim 4.421701] [LossShift 0.852391]\n", + "[2022-04-27 21:52:34.254971] [Epoch 10; 400] [Time 0.472] [Data 1.054] [LR 0.99036]\n", + "[LossC 0.000000] [LossSim 4.423033] [LossShift 0.933093]\n", + "[2022-04-27 21:52:55.124955] [DONE] [Time 0.491] [Data 0.912] [LossC 0.000000] [LossSim 4.332921] [LossShift 0.889218]\n" + ] + } + ], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur_randpers\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# blur_sigma : 40\n", + "# randpers : 0.8\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --blur_sigma 40 --distortion_scale 0.8 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur_randpers --epochs 10 --batch_size 8 --resize_factor 0.08 --optimizer sgd --one_class_idx 1 --res 450px" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3ec34e63", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/home/feoktistovar67431/.local/lib/python3.6/site-packages/torch/distributed/launch.py:186: FutureWarning: The module torch.distributed.launch is deprecated\n", + "and will be removed in future. Use torchrun.\n", + "Note that --use_env is set by default in torchrun.\n", + "If your script expects `--local_rank` argument to be set, please\n", + "change it to read from `os.environ['LOCAL_RANK']` instead. See \n", + "https://pytorch.org/docs/stable/distributed.html#launch-utility for \n", + "further instructions\n", + "\n", + " FutureWarning,\n", + "WARNING:torch.distributed.run:\n", + "*****************************************\n", + "Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. \n", + "*****************************************\n", + "Warning: using Python fallback for SyncBatchNorm, possibly because apex was installed without --cuda_ext. The exception raised when attempting to import the cuda backend was: No module named 'syncbn'\n", + "Warning: apex was installed without --cpp_ext. Falling back to Python flatten and unflatten.\n", + "Warning: using Python fallback for SyncBatchNorm, possibly because apex was installed without --cuda_ext. The exception raised when attempting to import the cuda backend was: No module named 'syncbn'\n", + "Warning: apex was installed without --cpp_ext. Falling back to Python flatten and unflatten.\n", + "[2022-04-27 21:53:02.070110] Namespace(K_shift=4, batch_size=8, blur_sigma=40.0, color_distort=0.5, dataset='CNMC', distortion_scale=0.6, epochs=10, error_step=5, image_size=(300, 300, 3), load_path=None, local_rank=0, lr_init=0.1, lr_scheduler='cosine', mode='simclr_CSI', model='resnet18_imagenet', multi_gpu=True, n_classes=2, n_gpus=2, n_superclasses=2, no_strict=False, noise_mean=0, noise_std=0.3, one_class_idx=1, ood_batch_size=100, ood_dataset=[0], ood_layer='simclr', ood_samples=1, ood_score=['norm_mean'], optimizer='sgd', print_score=False, proc_step=None, res='450px', resize_factor=0.08, resize_fix=False, resume_path=None, save_score=False, save_step=10, sharpness_factor=128.0, shift_trans=BlurSharpness(\n", + " (gauss): GaussBlur()\n", + " (sharp): RandomAdjustSharpness()\n", + "), shift_trans_type='blur_sharp', sim_lambda=1.0, simclr_dim=128, suffix=None, temperature=0.5, test_batch_size=100, warmup=10, weight_decay=1e-06)\n", + "[2022-04-27 21:53:02.070601] DistributedDataParallel(\n", + " (module): ResNet(\n", + " (linear): Linear(in_features=512, out_features=2, bias=True)\n", + " (simclr_layer): Sequential(\n", + " (0): Linear(in_features=512, out_features=512, bias=True)\n", + " (1): ReLU()\n", + " (2): Linear(in_features=512, out_features=128, bias=True)\n", + " )\n", + " (shift_cls_layer): Linear(in_features=512, out_features=4, bias=True)\n", + " (joint_distribution_layer): Linear(in_features=512, out_features=8, bias=True)\n", + " (conv1): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)\n", + " (bn1): SyncBatchNorm(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)\n", + " (layer1): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): SyncBatchNorm(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): SyncBatchNorm(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): SyncBatchNorm(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): SyncBatchNorm(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (layer2): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (bn1): SyncBatchNorm(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): SyncBatchNorm(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (downsample): Sequential(\n", + " (0): Conv2d(64, 128, kernel_size=(1, 1), stride=(2, 2), bias=False)\n", + " (1): SyncBatchNorm(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): SyncBatchNorm(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): SyncBatchNorm(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (layer3): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (bn1): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (downsample): Sequential(\n", + " (0): Conv2d(128, 256, kernel_size=(1, 1), stride=(2, 2), bias=False)\n", + " (1): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (layer4): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (bn1): SyncBatchNorm(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): SyncBatchNorm(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (downsample): Sequential(\n", + " (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)\n", + " (1): SyncBatchNorm(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): SyncBatchNorm(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): SyncBatchNorm(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (avgpool): AdaptiveAvgPool2d(output_size=(1, 1))\n", + " (normalize): NormalizeLayer()\n", + " )\n", + ")\n", + "Epoch 1 (logs/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_sharp_resize_factor0.08_color_dist0.5_one_class_1)\n", + "/home/feoktistovar67431/.local/lib/python3.6/site-packages/torch/optim/lr_scheduler.py:154: UserWarning: The epoch parameter in `scheduler.step()` was not necessary and is being deprecated where possible. Please use `scheduler.step()` to step the scheduler. During the deprecation, if epoch is different from None, the closed form is used instead of the new chainable form, where available. Please open an issue if you are unable to replicate your use case: https://github.com/pytorch/pytorch/issues/new/choose.\n", + " warnings.warn(EPOCH_DEPRECATION_WARNING, UserWarning)\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/home/feoktistovar67431/.local/lib/python3.6/site-packages/torch/optim/lr_scheduler.py:154: UserWarning: The epoch parameter in `scheduler.step()` was not necessary and is being deprecated where possible. Please use `scheduler.step()` to step the scheduler. During the deprecation, if epoch is different from None, the closed form is used instead of the new chainable form, where available. Please open an issue if you are unable to replicate your use case: https://github.com/pytorch/pytorch/issues/new/choose.\n", + " warnings.warn(EPOCH_DEPRECATION_WARNING, UserWarning)\n", + "[2022-04-27 21:53:04.749961] [Epoch 1; 0] [Time 1.525] [Data 0.149] [LR 0.10000]\n", + "[LossC 0.000000] [LossSim 4.858340] [LossShift 1.407876]\n", + "[2022-04-27 21:53:25.624987] [Epoch 1; 50] [Time 0.458] [Data 0.878] [LR 0.11004]\n", + "[LossC 0.000000] [LossSim 4.845747] [LossShift 1.667100]\n", + "[2022-04-27 21:53:47.668063] [Epoch 1; 100] [Time 0.474] [Data 0.893] [LR 0.12009]\n", + "[LossC 0.000000] [LossSim 4.844110] [LossShift 1.436306]\n", + "[2022-04-27 21:54:10.188214] [Epoch 1; 150] [Time 0.454] [Data 0.867] [LR 0.13013]\n", + "[LossC 0.000000] [LossSim 4.843646] [LossShift 1.547756]\n", + "[2022-04-27 21:54:33.381892] [Epoch 1; 200] [Time 0.517] [Data 0.932] [LR 0.14018]\n", + "[LossC 0.000000] [LossSim 4.738900] [LossShift 1.359678]\n", + "[2022-04-27 21:54:56.617839] [Epoch 1; 250] [Time 0.469] [Data 1.055] [LR 0.15022]\n", + "[LossC 0.000000] [LossSim 4.796278] [LossShift 1.271640]\n", + "[2022-04-27 21:55:19.371901] [Epoch 1; 300] [Time 0.469] [Data 0.898] [LR 0.16027]\n", + "[LossC 0.000000] [LossSim 4.608876] [LossShift 1.552633]\n", + "[2022-04-27 21:55:42.571197] [Epoch 1; 350] [Time 0.516] [Data 0.918] [LR 0.17031]\n", + "[LossC 0.000000] [LossSim 4.842148] [LossShift 1.336090]\n", + "[2022-04-27 21:56:05.642156] [Epoch 1; 400] [Time 0.523] [Data 0.867] [LR 0.18036]\n", + "[LossC 0.000000] [LossSim 4.832942] [LossShift 1.156906]\n", + "[2022-04-27 21:56:26.681201] [DONE] [Time 0.489] [Data 0.909] [LossC 0.000000] [LossSim 4.770748] [LossShift 1.591873]\n", + "Epoch 2 (logs/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_sharp_resize_factor0.08_color_dist0.5_one_class_1)\n", + "[2022-04-27 21:56:27.693232] [Epoch 2; 0] [Time 0.440] [Data 0.148] [LR 0.19000]\n", + "[LossC 0.000000] [LossSim 4.602440] [LossShift 1.091861]\n", + "[2022-04-27 21:56:50.382773] [Epoch 2; 50] [Time 0.515] [Data 0.877] [LR 0.20004]\n", + "[LossC 0.000000] [LossSim 4.600789] [LossShift 1.042183]\n", + "[2022-04-27 21:57:13.401066] [Epoch 2; 100] [Time 0.472] [Data 0.977] [LR 0.21009]\n", + "[LossC 0.000000] [LossSim 4.711175] [LossShift 1.322048]\n", + "[2022-04-27 21:57:36.339250] [Epoch 2; 150] [Time 0.608] [Data 0.852] [LR 0.22013]\n", + "[LossC 0.000000] [LossSim 4.559575] [LossShift 1.136288]\n", + "[2022-04-27 21:57:59.495503] [Epoch 2; 200] [Time 0.467] [Data 1.097] [LR 0.23018]\n", + "[LossC 0.000000] [LossSim 4.471087] [LossShift 1.055894]\n", + "[2022-04-27 21:58:22.207180] [Epoch 2; 250] [Time 0.498] [Data 0.879] [LR 0.24022]\n", + "[LossC 0.000000] [LossSim 4.526820] [LossShift 0.970052]\n", + "[2022-04-27 21:58:45.158632] [Epoch 2; 300] [Time 0.468] [Data 1.074] [LR 0.25027]\n", + "[LossC 0.000000] [LossSim 4.660821] [LossShift 1.274141]\n", + "[2022-04-27 21:59:08.291492] [Epoch 2; 350] [Time 0.482] [Data 0.860] [LR 0.26031]\n", + "[LossC 0.000000] [LossSim 4.487653] [LossShift 0.929607]\n", + "[2022-04-27 21:59:31.435978] [Epoch 2; 400] [Time 0.469] [Data 1.006] [LR 0.27036]\n", + "[LossC 0.000000] [LossSim 4.729589] [LossShift 1.065959]\n", + "[2022-04-27 21:59:52.467171] [DONE] [Time 0.494] [Data 0.915] [LossC 0.000000] [LossSim 4.540043] [LossShift 1.051491]\n", + "Epoch 3 (logs/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_sharp_resize_factor0.08_color_dist0.5_one_class_1)\n", + "[2022-04-27 21:59:53.543037] [Epoch 3; 0] [Time 0.515] [Data 0.131] [LR 0.28000]\n", + "[LossC 0.000000] [LossSim 4.606118] [LossShift 1.089750]\n", + "[2022-04-27 22:00:16.551717] [Epoch 3; 50] [Time 0.454] [Data 0.864] [LR 0.29004]\n", + "[LossC 0.000000] [LossSim 4.470480] [LossShift 1.156890]\n", + "[2022-04-27 22:00:39.247741] [Epoch 3; 100] [Time 0.463] [Data 0.960] [LR 0.30009]\n", + "[LossC 0.000000] [LossSim 4.465283] [LossShift 1.034453]\n", + "[2022-04-27 22:01:02.437289] [Epoch 3; 150] [Time 0.485] [Data 0.857] [LR 0.31013]\n", + "[LossC 0.000000] [LossSim 4.579294] [LossShift 1.223945]\n", + "[2022-04-27 22:01:25.646166] [Epoch 3; 200] [Time 0.458] [Data 0.864] [LR 0.32018]\n", + "[LossC 0.000000] [LossSim 4.475991] [LossShift 0.937372]\n", + "[2022-04-27 22:01:48.449946] [Epoch 3; 250] [Time 0.472] [Data 0.846] [LR 0.33022]\n", + "[LossC 0.000000] [LossSim 4.492799] [LossShift 1.123910]\n", + "[2022-04-27 22:02:11.088044] [Epoch 3; 300] [Time 0.584] [Data 0.884] [LR 0.34027]\n", + "[LossC 0.000000] [LossSim 4.520730] [LossShift 1.016755]\n", + "[2022-04-27 22:02:34.026722] [Epoch 3; 350] [Time 0.462] [Data 0.904] [LR 0.35031]\n", + "[LossC 0.000000] [LossSim 4.588828] [LossShift 1.008489]\n", + "[2022-04-27 22:02:57.093785] [Epoch 3; 400] [Time 0.468] [Data 1.008] [LR 0.36036]\n", + "[LossC 0.000000] [LossSim 4.431605] [LossShift 0.948913]\n", + "[2022-04-27 22:03:18.112107] [DONE] [Time 0.493] [Data 0.914] [LossC 0.000000] [LossSim 4.458634] [LossShift 1.007948]\n", + "Epoch 4 (logs/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_sharp_resize_factor0.08_color_dist0.5_one_class_1)\n", + "[2022-04-27 22:03:19.173064] [Epoch 4; 0] [Time 0.486] [Data 0.144] [LR 0.37000]\n", + "[LossC 0.000000] [LossSim 4.522823] [LossShift 0.872640]\n", + "[2022-04-27 22:03:41.681406] [Epoch 4; 50] [Time 0.515] [Data 0.965] [LR 0.38004]\n", + "[LossC 0.000000] [LossSim 4.627268] [LossShift 1.079998]\n", + "[2022-04-27 22:04:04.353249] [Epoch 4; 100] [Time 0.456] [Data 0.890] [LR 0.39009]\n", + "[LossC 0.000000] [LossSim 4.401687] [LossShift 1.002750]\n", + "[2022-04-27 22:04:27.711134] [Epoch 4; 150] [Time 0.474] [Data 0.937] [LR 0.40013]\n", + "[LossC 0.000000] [LossSim 4.423962] [LossShift 0.875453]\n", + "[2022-04-27 22:04:50.564132] [Epoch 4; 200] [Time 0.535] [Data 0.917] [LR 0.41018]\n", + "[LossC 0.000000] [LossSim 4.401275] [LossShift 0.953443]\n", + "[2022-04-27 22:05:13.697441] [Epoch 4; 250] [Time 0.459] [Data 0.858] [LR 0.42022]\n", + "[LossC 0.000000] [LossSim 4.430320] [LossShift 0.948798]\n", + "[2022-04-27 22:05:36.625607] [Epoch 4; 300] [Time 0.475] [Data 0.875] [LR 0.43027]\n", + "[LossC 0.000000] [LossSim 4.321131] [LossShift 0.913674]\n", + "[2022-04-27 22:05:59.610157] [Epoch 4; 350] [Time 0.462] [Data 0.924] [LR 0.44031]\n", + "[LossC 0.000000] [LossSim 4.468315] [LossShift 0.879398]\n", + "[2022-04-27 22:06:22.584148] [Epoch 4; 400] [Time 0.462] [Data 0.924] [LR 0.45036]\n", + "[LossC 0.000000] [LossSim 4.320601] [LossShift 0.835482]\n", + "[2022-04-27 22:06:43.326378] [DONE] [Time 0.492] [Data 0.912] [LossC 0.000000] [LossSim 4.410098] [LossShift 0.938872]\n", + "Epoch 5 (logs/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_sharp_resize_factor0.08_color_dist0.5_one_class_1)\n", + "[2022-04-27 22:06:44.342767] [Epoch 5; 0] [Time 0.473] [Data 0.149] [LR 0.46000]\n", + "[LossC 0.000000] [LossSim 4.376451] [LossShift 0.939952]\n", + "[2022-04-27 22:07:06.782078] [Epoch 5; 50] [Time 0.449] [Data 0.856] [LR 0.47004]\n", + "[LossC 0.000000] [LossSim 4.396927] [LossShift 0.920150]\n", + "[2022-04-27 22:07:29.728200] [Epoch 5; 100] [Time 0.463] [Data 0.908] [LR 0.48009]\n", + "[LossC 0.000000] [LossSim 4.447166] [LossShift 0.918573]\n", + "[2022-04-27 22:07:52.322851] [Epoch 5; 150] [Time 0.473] [Data 1.023] [LR 0.49013]\n", + "[LossC 0.000000] [LossSim 4.367201] [LossShift 0.944386]\n", + "[2022-04-27 22:08:15.084181] [Epoch 5; 200] [Time 0.466] [Data 0.909] [LR 0.50018]\n", + "[LossC 0.000000] [LossSim 4.325580] [LossShift 0.883697]\n", + "[2022-04-27 22:08:37.787865] [Epoch 5; 250] [Time 0.521] [Data 0.937] [LR 0.51022]\n", + "[LossC 0.000000] [LossSim 4.426981] [LossShift 0.855859]\n", + "[2022-04-27 22:09:00.704213] [Epoch 5; 300] [Time 0.467] [Data 0.885] [LR 0.52027]\n", + "[LossC 0.000000] [LossSim 4.355620] [LossShift 0.837514]\n", + "[2022-04-27 22:09:23.448209] [Epoch 5; 350] [Time 0.482] [Data 0.899] [LR 0.53031]\n", + "[LossC 0.000000] [LossSim 4.432379] [LossShift 0.906252]\n", + "[2022-04-27 22:09:46.070029] [Epoch 5; 400] [Time 0.542] [Data 0.907] [LR 0.54036]\n", + "[LossC 0.000000] [LossSim 4.362264] [LossShift 0.886713]\n", + "[2022-04-27 22:10:06.772650] [DONE] [Time 0.486] [Data 0.904] [LossC 0.000000] [LossSim 4.392308] [LossShift 0.915971]\n", + "Epoch 6 (logs/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_sharp_resize_factor0.08_color_dist0.5_one_class_1)\n", + "[2022-04-27 22:10:07.752875] [Epoch 6; 0] [Time 0.446] [Data 0.148] [LR 0.55000]\n", + "[LossC 0.000000] [LossSim 4.358101] [LossShift 0.934794]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2022-04-27 22:10:30.582189] [Epoch 6; 50] [Time 0.484] [Data 0.911] [LR 0.56004]\n", + "[LossC 0.000000] [LossSim 4.426515] [LossShift 0.982254]\n", + "[2022-04-27 22:10:53.219031] [Epoch 6; 100] [Time 0.596] [Data 0.861] [LR 0.57009]\n", + "[LossC 0.000000] [LossSim 4.355786] [LossShift 0.859021]\n", + "[2022-04-27 22:11:16.124596] [Epoch 6; 150] [Time 0.591] [Data 0.880] [LR 0.58013]\n", + "[LossC 0.000000] [LossSim 4.331424] [LossShift 0.872154]\n", + "[2022-04-27 22:11:38.965621] [Epoch 6; 200] [Time 0.449] [Data 0.886] [LR 0.59018]\n", + "[LossC 0.000000] [LossSim 4.351139] [LossShift 0.876345]\n", + "[2022-04-27 22:12:01.754661] [Epoch 6; 250] [Time 0.461] [Data 0.920] [LR 0.60022]\n", + "[LossC 0.000000] [LossSim 4.491778] [LossShift 1.031505]\n", + "[2022-04-27 22:12:24.410563] [Epoch 6; 300] [Time 0.467] [Data 0.890] [LR 0.61027]\n", + "[LossC 0.000000] [LossSim 4.340865] [LossShift 0.851271]\n", + "[2022-04-27 22:12:47.216964] [Epoch 6; 350] [Time 0.467] [Data 0.897] [LR 0.62031]\n", + "[LossC 0.000000] [LossSim 4.372048] [LossShift 0.921748]\n", + "[2022-04-27 22:13:09.822383] [Epoch 6; 400] [Time 0.469] [Data 0.935] [LR 0.63036]\n", + "[LossC 0.000000] [LossSim 4.349135] [LossShift 0.854723]\n", + "[2022-04-27 22:13:30.781444] [DONE] [Time 0.487] [Data 0.907] [LossC 0.000000] [LossSim 4.368142] [LossShift 0.896633]\n", + "Epoch 7 (logs/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_sharp_resize_factor0.08_color_dist0.5_one_class_1)\n", + "[2022-04-27 22:13:31.766230] [Epoch 7; 0] [Time 0.455] [Data 0.133] [LR 0.64000]\n", + "[LossC 0.000000] [LossSim 4.423601] [LossShift 0.868863]\n", + "[2022-04-27 22:13:54.496806] [Epoch 7; 50] [Time 0.463] [Data 0.904] [LR 0.65004]\n", + "[LossC 0.000000] [LossSim 4.383883] [LossShift 0.905446]\n", + "[2022-04-27 22:14:17.511831] [Epoch 7; 100] [Time 0.470] [Data 1.031] [LR 0.66009]\n", + "[LossC 0.000000] [LossSim 4.296111] [LossShift 0.895986]\n", + "[2022-04-27 22:14:40.280189] [Epoch 7; 150] [Time 0.477] [Data 0.871] [LR 0.67013]\n", + "[LossC 0.000000] [LossSim 4.305459] [LossShift 0.909102]\n", + "[2022-04-27 22:15:03.937648] [Epoch 7; 200] [Time 0.513] [Data 1.929] [LR 0.68018]\n", + "[LossC 0.000000] [LossSim 4.345171] [LossShift 0.866567]\n", + "[2022-04-27 22:15:26.668402] [Epoch 7; 250] [Time 0.594] [Data 0.859] [LR 0.69022]\n", + "[LossC 0.000000] [LossSim 4.381218] [LossShift 0.895947]\n", + "[2022-04-27 22:15:49.487447] [Epoch 7; 300] [Time 0.473] [Data 0.861] [LR 0.70027]\n", + "[LossC 0.000000] [LossSim 4.351787] [LossShift 0.836976]\n", + "[2022-04-27 22:16:12.051757] [Epoch 7; 350] [Time 0.466] [Data 1.045] [LR 0.71031]\n", + "[LossC 0.000000] [LossSim 4.400456] [LossShift 0.845599]\n", + "[2022-04-27 22:16:34.818097] [Epoch 7; 400] [Time 0.468] [Data 0.849] [LR 0.72036]\n", + "[LossC 0.000000] [LossSim 4.433661] [LossShift 1.035500]\n", + "[2022-04-27 22:16:56.032426] [DONE] [Time 0.491] [Data 0.912] [LossC 0.000000] [LossSim 4.370436] [LossShift 0.907309]\n", + "Epoch 8 (logs/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_sharp_resize_factor0.08_color_dist0.5_one_class_1)\n", + "[2022-04-27 22:16:57.048328] [Epoch 8; 0] [Time 0.470] [Data 0.160] [LR 0.73000]\n", + "[LossC 0.000000] [LossSim 4.345762] [LossShift 0.854992]\n" + ] + } + ], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur_sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 128\n", + "# blur_sigma : 40\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --blur_sigma 40 --sharpness_factor 128 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur_sharp --epochs 10 --batch_size 8 --resize_factor 0.08 --optimizer sgd --one_class_idx 1 --res 450px" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cb3bca71", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : randpers_sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 128\n", + "# randpers : 0.8\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --sharpness_factor 128 --distortion_scale 0.8 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type randpers_sharp --epochs 10 --batch_size 8 --resize_factor 0.08 --optimizer sgd --one_class_idx 1 --res 450px" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "baf0eff6", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur_randpers_sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 128\n", + "# blur_sigma : 40\n", + "# randpers : 0.8\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --blur_sigma 40 --sharpness_factor 128 --distortion_scale 0.8 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur_randpers_sharp --epochs 10 --batch_size 8 --resize_factor 0.08 --optimizer sgd --one_class_idx 1 --res 450px" + ] + }, + { + "cell_type": "markdown", + "id": "30642f7c", + "metadata": {}, + "source": [ + "# Rotation" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d3be9f07", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : rotation\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type rotation --epochs 10 --batch_size 8 --resize_factor 0.08 --optimizer sgd --one_class_idx 1 --res 450px" + ] + }, + { + "cell_type": "markdown", + "id": "d5b3adfc", + "metadata": {}, + "source": [ + "# Cutperm" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f2a006f7", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : rotation\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type cutperm --epochs 10 --batch_size 8 --resize_factor 0.08 --optimizer sgd --one_class_idx 1 --res 450px" + ] + }, + { + "cell_type": "markdown", + "id": "dff09fe7", + "metadata": {}, + "source": [ + "# Adjust Sharpness" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "695ed30c", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 4096\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --sharpness_factor 4096 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type sharp --epochs 10 --batch_size 8 --optimizer sgd --one_class_idx 1" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3537b825", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 2048\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --sharpness_factor 2048 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type sharp --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a6495274", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 1024\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --sharpness_factor 1024 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type sharp --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3f9a0fe8", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 512\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --sharpness_factor 512 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type sharp --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "44688e2b", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 256\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --sharpness_factor 256 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type sharp --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e97c21fe", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 150\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --sharpness_factor 150 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type sharp --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9ecf758b", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 140\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --sharpness_factor 140 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type sharp --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0d9767a5", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 130\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --sharpness_factor 130 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type sharp --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bd662097", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 128\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --sharpness_factor 128 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type sharp --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a7c01b6f", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 120\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --sharpness_factor 120 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type sharp --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0d129e42", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 100\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --sharpness_factor 100 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type sharp --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d70d2983", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 80\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --sharpness_factor 80 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type sharp --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6b32d416", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 64\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --sharpness_factor 64 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type sharp --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cf996327", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 32\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --sharpness_factor 32 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type sharp --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4d841ffb", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 16\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --sharpness_factor 16 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type sharp --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fd929ab1", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 8\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --sharpness_factor 8 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type sharp --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e1d33ea1", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 5\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --sharpness_factor 5 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type sharp --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0c1fd73c", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 4\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --sharpness_factor 4 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type sharp --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9395e2f2", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 3\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --sharpness_factor 3 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type sharp --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "959cc49f", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 2\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --sharpness_factor 2 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type sharp --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "markdown", + "id": "76fd693e", + "metadata": {}, + "source": [ + "# Random Perspective" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c6dfe547", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : randpers\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# randper_dist: 0.95\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --distortion_scale 0.95 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type randpers --epochs 10 --batch_size 8 --optimizer sgd --one_class_idx 0 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ccc4b932", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : randpers\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# randper_dist: 0.9\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --distortion_scale 0.9 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type randpers --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4148f1e6", + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : randpers\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# randper_dist: 0.85\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --distortion_scale 0.85 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type randpers --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "022d5ce0", + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : randpers\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# randper_dist: 0.8\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --distortion_scale 0.8 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type randpers --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2bec00e6", + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : randpers\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# randper_dist: 0.75\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --distortion_scale 0.75 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type randpers --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1875267e", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : randpers\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# randper_dist: 0.6\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --distortion_scale 0.6 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type randpers --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a02ed7ec", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : randpers\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# randper_dist: 0.3\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --distortion_scale 0.3 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type randpers --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "markdown", + "id": "d599ef3f", + "metadata": {}, + "source": [ + "## Examine crop" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7195ad51", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.5\n", + "# blur_sigma : 2\n", + "# color_dist : 0.8\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --resize_factor 0.5 --res 450px --blur_sigma 2 --color_distort 0.8 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur --epochs 10 --batch_size 8 --optimizer sgd --one_class_idx 0 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7401d0e7", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.3\n", + "# blur_sigma : 2\n", + "# color_dist : 0.8\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --resize_factor 0.3 --res 450px --blur_sigma 2 --color_distort 0.8 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 0 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b88a2670", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.02\n", + "# blur_sigma : 2\n", + "# color_dist : 0.8\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --resize_factor 0.02 --res 450px --blur_sigma 2 --color_distort 0.8 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 0 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "83922b52", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.008\n", + "# blur_sigma : 2\n", + "# color_dist : 0.8\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --resize_factor 0.008 --res 450px --blur_sigma 2 --color_distort 0.8 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 0 " + ] + }, + { + "cell_type": "markdown", + "id": "006079f3", + "metadata": {}, + "source": [ + "## Examine blur_sigma" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4b65d654", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 180\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --resize_factor 0.08 --res 450px --blur_sigma 180 --color_distort 0.5 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur --epochs 10 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8aa50f84", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 120\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --resize_factor 0.08 --res 450px --blur_sigma 120 --color_distort 0.5 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f94522c3", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 110\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --resize_factor 0.08 --res 450px --blur_sigma 110 --color_distort 0.5 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8bd4c63a", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 105\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --resize_factor 0.08 --res 450px --blur_sigma 105 --color_distort 0.5 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cade09f1", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 100\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --resize_factor 0.08 --res 450px --blur_sigma 100 --color_distort 0.5 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0f1af3f1", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 95\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --resize_factor 0.08 --res 450px --blur_sigma 95 --color_distort 0.5 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e5b5e043", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 90\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --resize_factor 0.08 --res 450px --blur_sigma 90 --color_distort 0.5 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f4c30628", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 80\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --resize_factor 0.08 --res 450px --gauss_sigma 80 --color_distort 0.5 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13a022fc", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 60\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --resize_factor 0.08 --res 450px --blur_sigma 60 --color_distort 0.5 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "02779f69", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 40\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --resize_factor 0.08 --res 450px --blur_sigma 40 --color_distort 0.5 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b63a705a", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 20\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --resize_factor 0.08 --res 450px --blur_sigma 20 --color_distort 0.5 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "dde3e377", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 6\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --resize_factor 0.08 --res 450px --blur_sigma 6 --color_distort 0.5 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c23c0e0a", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 4\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --resize_factor 0.08 --res 450px --blur_sigma 4 --color_distort 0.5 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "35fbd79f", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 3\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --resize_factor 0.08 --res 450px --blur_sigma 3 --color_distort 0.5 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "42510921", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 2\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --resize_factor 0.08 --res 450px --blur_sigma 2 --color_distort 0.5 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7672da24", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 1.5\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --resize_factor 0.08 --res 450px --blur_sigma 1.5 --color_distort 0.5 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1e94687e", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 1\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --resize_factor 0.08 --res 450px --blur_sigma 1 --color_distort 0.5 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.9" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/README.md b/README.md new file mode 100644 index 0000000..6f4c0b6 --- /dev/null +++ b/README.md @@ -0,0 +1,176 @@ +# CSI: Novelty Detection via Contrastive Learning on Distributionally Shifted Instances + +Official PyTorch implementation of +["**CSI: Novelty Detection via Contrastive Learning on Distributionally Shifted Instances**"]( +https://arxiv.org/abs/2007.08176) (NeurIPS 2020) by +[Jihoon Tack*](https://jihoontack.github.io), +[Sangwoo Mo*](https://sites.google.com/view/sangwoomo), +[Jongheon Jeong](https://sites.google.com/view/jongheonj), +and [Jinwoo Shin](http://alinlab.kaist.ac.kr/shin.html). + +

+ +

+ +## 1. Requirements +### Environments +Currently, requires following packages +- python 3.6+ +- torch 1.4+ +- torchvision 0.5+ +- CUDA 10.1+ +- scikit-learn 0.22+ +- tensorboard 2.0+ +- [torchlars](https://github.com/kakaobrain/torchlars) == 0.1.2 +- [pytorch-gradual-warmup-lr](https://github.com/ildoonet/pytorch-gradual-warmup-lr) packages +- [apex](https://github.com/NVIDIA/apex) == 0.1 +- [diffdist](https://github.com/ag14774/diffdist) == 0.1 + +### Datasets +For CIFAR, please download the following datasets to `~/data`. +* [LSUN_resize](https://www.dropbox.com/s/moqh2wh8696c3yl/LSUN_resize.tar.gz), +[ImageNet_resize](https://www.dropbox.com/s/kp3my3412u5k9rl/Imagenet_resize.tar.gz) +* [LSUN_fix](https://drive.google.com/file/d/1KVWj9xpHfVwGcErH5huVujk9snhEGOxE/view?usp=sharing), +[ImageNet_fix](https://drive.google.com/file/d/1sO_-noq10mmziB1ECDyNhD5T4u5otyKA/view?usp=sharing) + +For ImageNet-30, please download the following datasets to `~/data`. +* [ImageNet-30-train](https://drive.google.com/file/d/1B5c39Fc3haOPzlehzmpTLz6xLtGyKEy4/view), +[ImageNet-30-test](https://drive.google.com/file/d/13xzVuQMEhSnBRZr-YaaO08coLU2dxAUq/view) +* [CUB-200](http://www.vision.caltech.edu/visipedia/CUB-200-2011.html), +[Stanford Dogs](http://vision.stanford.edu/aditya86/ImageNetDogs/), +[Oxford Pets](https://www.robots.ox.ac.uk/~vgg/data/pets/), +[Oxford flowers](https://www.robots.ox.ac.uk/~vgg/data/flowers/), +[Food-101](https://www.kaggle.com/dansbecker/food-101), +[Places-365](http://data.csail.mit.edu/places/places365/val_256.tar), +[Caltech-256](https://www.kaggle.com/jessicali9530/caltech256), +[DTD](https://www.robots.ox.ac.uk/~vgg/data/dtd/) + +For Food-101, remove hotdog class to avoid overlap. + +## 2. Training +Currently, all code examples are assuming distributed launch with 4 multi GPUs. +To run the code with single GPU, remove `-m torch.distributed.launch --nproc_per_node=4`. + +### Unlabeled one-class & multi-class +To train unlabeled one-class & multi-class models in the paper, run this command: + +```train +CUDA_VISIBLE_DEVICES=0,1,2,3 python -m torch.distributed.launch --nproc_per_node=4 train.py --dataset --model --mode simclr_CSI --shift_trans_type rotation --batch_size 32 --one_class_idx +``` + +> Option --one_class_idx denotes the in-distribution of one-class training. +> For multi-class training, set --one_class_idx as None. +> To run SimCLR simply change --mode to simclr. +> Total batch size should be 512 = 4 (GPU) * 32 (--batch_size option) * 4 (cardinality of shifted transformation set). + +### Labeled multi-class +To train labeled multi-class model (confidence calibrated classifier) in the paper, run this command: + +```train +# Representation train +CUDA_VISIBLE_DEVICES=0,1,2,3 python -m torch.distributed.launch --nproc_per_node=4 train.py --dataset --model --mode sup_simclr_CSI --shift_trans_type rotation --batch_size 32 --epoch 700 +# Linear layer train +python train.py --mode sup_CSI_linear --dataset --model --batch_size 32 --epoch 100 --shift_trans_type rotation --load_path +``` + +> To run SupCLR simply change --mode to sup_simclr, sup_linear for representation training and linear layer training respectively. +> Total batch size should be same as above. Currently only supports rotation for shifted transformation. + +## 3. Evaluation + +We provide the checkpoint of the CSI pre-trained model. Download the checkpoint from the following link: +- One-class CIFAR-10: [ResNet-18](https://drive.google.com/drive/folders/1z02i0G_lzrZe0NwpH-tnjpO8pYHV7mE9?usp=sharing) +- Unlabeled (multi-class) CIFAR-10: [ResNet-18](https://drive.google.com/file/d/1yUq6Si6hWaMa1uYyLDHk0A4BrPIa8ECV/view?usp=sharing) +- Unlabeled (multi-class) ImageNet-30: [ResNet-18](https://drive.google.com/file/d/1KucQWSik8RyoJgU-fz8XLmCWhvMOP7fT/view?usp=sharing) +- Labeled (multi-class) CIFAR-10: [ResNet-18](https://drive.google.com/file/d/1rW2-0MJEzPHLb_PAW-LvCivHt-TkDpRO/view?usp=sharing) + +### Unlabeled one-class & multi-class +To evaluate my model on unlabeled one-class & multi-class out-of-distribution (OOD) detection setting, run this command: + +```eval +python eval.py --mode ood_pre --dataset --model --ood_score CSI --shift_trans_type rotation --print_score --ood_samples 10 --resize_factor 0.54 --resize_fix --one_class_idx --load_path +``` + +> Option --one_class_idx denotes the in-distribution of one-class evaluation. +> For multi-class evaluation, set --one_class_idx as None. +> The resize_factor & resize fix option fix the cropping size of RandomResizedCrop(). +> For SimCLR evaluation, change --ood_score to simclr. + +### Labeled multi-class +To evaluate my model on labeled multi-class accuracy, ECE, OOD detection setting, run this command: + +```eval +# OOD AUROC +python eval.py --mode ood --ood_score baseline_marginalized --print_score --dataset --model --shift_trans_type rotation --load_path +# Accuray & ECE +python eval.py --mode test_marginalized_acc --dataset --model --shift_trans_type rotation --load_path +``` + +> This option is for marginalized inference. +> For single inference (also used for SupCLR) change --ood_score baseline in first command, +> and --mode test_acc in second command. + +## 4. Results + +Our model achieves the following performance on: + +### One-Class Out-of-Distribution Detection + +| Method | Dataset | AUROC (Mean) | +| --------------|------------------ | --------------| +| SimCLR | CIFAR-10-OC | 87.9% | +| Rot+Trans | CIFAR-10-OC | 90.0% | +| CSI (ours) | CIFAR-10-OC | 94.3% | + +We only show CIFAR-10 one-class result in this repo. For other setting, please see our paper. + +### Unlabeled Multi-Class Out-of-Distribution Detection + +| Method | Dataset | OOD Dataset | AUROC (Mean) | +| --------------|------------------ |---------------|--------------| +| Rot+Trans | CIFAR-10 | CIFAR-100 | 82.5% | +| CSI (ours) | CIFAR-10 | CIFAR-100 | 89.3% | + +We only show CIFAR-10 to CIFAR-100 OOD detection result in this repo. For other OOD dataset results, see our paper. + +### Labeled Multi-Class Result + +| Method | Dataset | OOD Dataset | Acc | ECE | AUROC (Mean) | +| ---------------- |------------------ |---------------|-------|-------|--------------| +| SupCLR | CIFAR-10 | CIFAR-100 | 93.9% | 5.54% | 88.3% | +| CSI (ours) | CIFAR-10 | CIFAR-100 | 94.8% | 4.24% | 90.6% | +| CSI-ensem (ours) | CIFAR-10 | CIFAR-100 | 96.0% | 3.64% | 92.3% | + +We only show CIFAR-10 with CIFAR-100 as OOD in this repo. For other dataset results, please see our paper. + +## 5. New OOD dataset + +

+ +

+ +We find that current benchmark datasets for OOD detection, are visually far from in-distribution datasets (e.g. CIFAR). + +To address this issue, we provide new datasets for OOD detection evaluation: +[LSUN_fix](https://drive.google.com/file/d/1KVWj9xpHfVwGcErH5huVujk9snhEGOxE/view?usp=sharing), +[ImageNet_fix](https://drive.google.com/file/d/1sO_-noq10mmziB1ECDyNhD5T4u5otyKA/view?usp=sharing). +See the above figure for the visualization of current benchmark and our dataset. + +To generate OOD datasets, run the following codes inside the `./datasets` folder: + +```OOD dataset generation +# ImageNet FIX generation code +python imagenet_fix_preprocess.py +# LSUN FIX generation code +python lsun_fix_preprocess.py +``` + +## Citation +``` +@inproceedings{tack2020csi, + title={CSI: Novelty Detection via Contrastive Learning on Distributionally Shifted Instances}, + author={Jihoon Tack and Sangwoo Mo and Jongheon Jeong and Jinwoo Shin}, + booktitle={Advances in Neural Information Processing Systems}, + year={2020} +} +``` diff --git a/common/LARS.py b/common/LARS.py new file mode 100644 index 0000000..7595f88 --- /dev/null +++ b/common/LARS.py @@ -0,0 +1,119 @@ +""" +References: + - https://github.com/PyTorchLightning/PyTorch-Lightning-Bolts/blob/master/pl_bolts/optimizers/lars_scheduling.py + - https://github.com/NVIDIA/apex/blob/master/apex/parallel/LARC.py + - https://arxiv.org/pdf/1708.03888.pdf + - https://github.com/noahgolmant/pytorch-lars/blob/master/lars.py +""" + +import torch +from .wrapper import OptimWrapper + +# from torchlars._adaptive_lr import compute_adaptive_lr # Impossible to build extensions + + +__all__ = ["LARS"] + + +class LARS(OptimWrapper): + """Implements 'LARS (Layer-wise Adaptive Rate Scaling)'__ as Optimizer a + :class:`~torch.optim.Optimizer` wrapper. + __ : https://arxiv.org/abs/1708.03888 + Wraps an arbitrary optimizer like :class:`torch.optim.SGD` to use LARS. If + you want to the same performance obtained with small-batch training when + you use large-batch training, LARS will be helpful:: + Args: + optimizer (Optimizer): + optimizer to wrap + eps (float, optional): + epsilon to help with numerical stability while calculating the + adaptive learning rate + trust_coef (float, optional): + trust coefficient for calculating the adaptive learning rate + Example:: + base_optimizer = optim.SGD(model.parameters(), lr=0.1) + optimizer = LARS(optimizer=base_optimizer) + output = model(input) + loss = loss_fn(output, target) + loss.backward() + optimizer.step() + """ + + def __init__(self, optimizer, trust_coef=0.02, clip=True, eps=1e-8): + if eps < 0.0: + raise ValueError("invalid epsilon value: , %f" % eps) + if trust_coef < 0.0: + raise ValueError("invalid trust coefficient: %f" % trust_coef) + + self.optim = optimizer + self.eps = eps + self.trust_coef = trust_coef + self.clip = clip + + def __getstate__(self): + self.optim.__get + lars_dict = {} + lars_dict["trust_coef"] = self.trust_coef + lars_dict["clip"] = self.clip + lars_dict["eps"] = self.eps + return (self.optim, lars_dict) + + def __setstate__(self, state): + self.optim, lars_dict = state + self.trust_coef = lars_dict["trust_coef"] + self.clip = lars_dict["clip"] + self.eps = lars_dict["eps"] + + @torch.no_grad() + def step(self, closure=None): + weight_decays = [] + + for group in self.optim.param_groups: + weight_decay = group.get("weight_decay", 0) + weight_decays.append(weight_decay) + + # reset weight decay + group["weight_decay"] = 0 + + # update the parameters + for p in group["params"]: + if p.grad is not None: + self.update_p(p, group, weight_decay) + + # update the optimizer + self.optim.step(closure=closure) + + # return weight decay control to optimizer + for group_idx, group in enumerate(self.optim.param_groups): + group["weight_decay"] = weight_decays[group_idx] + + def update_p(self, p, group, weight_decay): + # calculate new norms + p_norm = torch.norm(p.data) + g_norm = torch.norm(p.grad.data) + + if p_norm != 0 and g_norm != 0: + # calculate new lr + divisor = g_norm + p_norm * weight_decay + self.eps + adaptive_lr = (self.trust_coef * p_norm) / divisor + + # clip lr + if self.clip: + adaptive_lr = min(adaptive_lr / group["lr"], 1) + + # update params with clipped lr + p.grad.data += weight_decay * p.data + p.grad.data *= adaptive_lr + + +from torch.optim import SGD +from pylot.util import delegates, separate_kwargs + + +class SGDLARS(LARS): + @delegates(to=LARS.__init__) + @delegates(to=SGD.__init__, keep=True, but=["eps", "trust_coef"]) + def __init__(self, params, **kwargs): + sgd_kwargs, lars_kwargs = separate_kwargs(kwargs, SGD.__init__) + optim = SGD(params, **sgd_kwargs) + super().__init__(optim, **lars_kwargs) \ No newline at end of file diff --git a/common/__init__.py b/common/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/common/__init__.pyc b/common/__init__.pyc new file mode 100644 index 0000000..eb9ad90 Binary files /dev/null and b/common/__init__.pyc differ diff --git a/common/__pycache__/LARS.cpython-37.pyc b/common/__pycache__/LARS.cpython-37.pyc new file mode 100644 index 0000000..895f1be Binary files /dev/null and b/common/__pycache__/LARS.cpython-37.pyc differ diff --git a/common/__pycache__/__init__.cpython-36.pyc b/common/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..55586ac Binary files /dev/null and b/common/__pycache__/__init__.cpython-36.pyc differ diff --git a/common/__pycache__/__init__.cpython-37.pyc b/common/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000..3cc4c42 Binary files /dev/null and b/common/__pycache__/__init__.cpython-37.pyc differ diff --git a/common/__pycache__/common.cpython-36.pyc b/common/__pycache__/common.cpython-36.pyc new file mode 100644 index 0000000..0ab3d65 Binary files /dev/null and b/common/__pycache__/common.cpython-36.pyc differ diff --git a/common/__pycache__/common.cpython-37.pyc b/common/__pycache__/common.cpython-37.pyc new file mode 100644 index 0000000..ea303d4 Binary files /dev/null and b/common/__pycache__/common.cpython-37.pyc differ diff --git a/common/__pycache__/eval.cpython-36.pyc b/common/__pycache__/eval.cpython-36.pyc new file mode 100644 index 0000000..1503604 Binary files /dev/null and b/common/__pycache__/eval.cpython-36.pyc differ diff --git a/common/__pycache__/eval.cpython-37.pyc b/common/__pycache__/eval.cpython-37.pyc new file mode 100644 index 0000000..3c1d6bc Binary files /dev/null and b/common/__pycache__/eval.cpython-37.pyc differ diff --git a/common/__pycache__/eval.cpython-37.pyc.2498080381488 b/common/__pycache__/eval.cpython-37.pyc.2498080381488 new file mode 100644 index 0000000..e69de29 diff --git a/common/__pycache__/eval.cpython-37.pyc.2731703741232 b/common/__pycache__/eval.cpython-37.pyc.2731703741232 new file mode 100644 index 0000000..e69de29 diff --git a/common/__pycache__/train.cpython-36.pyc b/common/__pycache__/train.cpython-36.pyc new file mode 100644 index 0000000..ae23ee7 Binary files /dev/null and b/common/__pycache__/train.cpython-36.pyc differ diff --git a/common/__pycache__/train.cpython-37.pyc b/common/__pycache__/train.cpython-37.pyc new file mode 100644 index 0000000..5682f53 Binary files /dev/null and b/common/__pycache__/train.cpython-37.pyc differ diff --git a/common/common.py b/common/common.py new file mode 100644 index 0000000..1e1ab98 --- /dev/null +++ b/common/common.py @@ -0,0 +1,114 @@ +from argparse import ArgumentParser + + +def parse_args(default=False): + """Command-line argument parser for training.""" + + parser = ArgumentParser(description='Pytorch implementation of CSI') + + parser.add_argument('--dataset', help='Dataset', + choices=['cifar10', 'cifar100', 'imagenet', 'CNMC', 'CNMC_grayscale'], type=str) + parser.add_argument('--one_class_idx', help='None: multi-class, Not None: one-class', + default=None, type=int) + parser.add_argument('--model', help='Model', + choices=['resnet18', 'resnet18_imagenet'], type=str) + parser.add_argument('--mode', help='Training mode', + default='simclr', type=str) + parser.add_argument('--simclr_dim', help='Dimension of simclr layer', + default=128, type=int) + + parser.add_argument('--shift_trans_type', help='shifting transformation type', default='none', + choices=['rotation', 'cutperm', 'blur', 'randpers', 'sharp', 'blur_randpers', + 'blur_sharp', 'randpers_sharp', 'blur_randpers_sharp', 'noise', 'none'], type=str) + + parser.add_argument("--local_rank", type=int, + default=0, help='Local rank for distributed learning') + parser.add_argument('--resume_path', help='Path to the resume checkpoint', + default=None, type=str) + parser.add_argument('--load_path', help='Path to the loading checkpoint', + default=None, type=str) + parser.add_argument("--no_strict", help='Do not strictly load state_dicts', + action='store_true') + parser.add_argument('--suffix', help='Suffix for the log dir', + default=None, type=str) + parser.add_argument('--error_step', help='Epoch steps to compute errors', + default=5, type=int) + parser.add_argument('--save_step', help='Epoch steps to save models', + default=10, type=int) + + ##### Training Configurations ##### + parser.add_argument('--epochs', help='Epochs', + default=1000, type=int) + parser.add_argument('--optimizer', help='Optimizer', + choices=['sgd', 'lars'], + default='lars', type=str) + parser.add_argument('--lr_scheduler', help='Learning rate scheduler', + choices=['step_decay', 'cosine'], + default='cosine', type=str) + parser.add_argument('--warmup', help='Warm-up epochs', + default=10, type=int) + parser.add_argument('--lr_init', help='Initial learning rate', + default=1e-1, type=float) + parser.add_argument('--weight_decay', help='Weight decay', + default=1e-6, type=float) + parser.add_argument('--batch_size', help='Batch size', + default=128, type=int) + parser.add_argument('--test_batch_size', help='Batch size for test loader', + default=100, type=int) + + parser.add_argument('--blur_sigma', help='Distortion grade', + default=2.0, type=float) + parser.add_argument('--color_distort', help='Color distortion grade', + default=0.5, type=float) + parser.add_argument('--distortion_scale', help='Perspective distortion grade', + default=0.6, type=float) + parser.add_argument('--sharpness_factor', help='Sharpening or blurring factor of image. ' + 'Can be any non negative number. 0 gives a blurred image, ' + '1 gives the original image while 2 increases the sharpness ' + 'by a factor of 2.', + default=2, type=float) + parser.add_argument('--noise_mean', help='mean', + default=0, type=float) + parser.add_argument('--noise_std', help='std', + default=0.3, type=float) + + + ##### Objective Configurations ##### + parser.add_argument('--sim_lambda', help='Weight for SimCLR loss', + default=1.0, type=float) + parser.add_argument('--temperature', help='Temperature for similarity', + default=0.5, type=float) + + ##### Evaluation Configurations ##### + parser.add_argument("--ood_dataset", help='Datasets for OOD detection', + default=None, nargs="*", type=str) + parser.add_argument("--ood_score", help='score function for OOD detection', + default=['norm_mean'], nargs="+", type=str) + parser.add_argument("--ood_layer", help='layer for OOD scores', + choices=['penultimate', 'simclr', 'shift'], + default=['simclr', 'shift'], nargs="+", type=str) + parser.add_argument("--ood_samples", help='number of samples to compute OOD score', + default=1, type=int) + parser.add_argument("--ood_batch_size", help='batch size to compute OOD score', + default=100, type=int) + parser.add_argument("--resize_factor", help='resize scale is sampled from [resize_factor, 1.0]', + default=0.08, type=float) + parser.add_argument("--resize_fix", help='resize scale is fixed to resize_factor (not (resize_factor, 1.0])', + action='store_true') + + parser.add_argument("--print_score", help='print quantiles of ood score', + action='store_true') + parser.add_argument("--save_score", help='save ood score for plotting histogram', + action='store_true') + + ##### Process configuration option ##### + parser.add_argument("--proc_step", help='choose process to initiate.', + choices=['eval', 'train'], + default=None, type=str) + parser.add_argument("--res", help='resolution of dataset', + default="32px", type=str) + + if default: + return parser.parse_args('') # empty string + else: + return parser.parse_args() diff --git a/common/eval.py b/common/eval.py new file mode 100644 index 0000000..03482ee --- /dev/null +++ b/common/eval.py @@ -0,0 +1,81 @@ +from copy import deepcopy + +import torch +import torch.nn as nn +from torch.utils.data import DataLoader + +from common.common import parse_args +import models.classifier as C +from datasets import get_dataset, get_superclass_list, get_subclass_dataset + +P = parse_args() + +### Set torch device ### + +P.n_gpus = torch.cuda.device_count() +assert P.n_gpus <= 1 # no multi GPU +P.multi_gpu = False + +if torch.cuda.is_available(): + torch.cuda.set_device(P.local_rank) +device = torch.device(f"cuda" if torch.cuda.is_available() else "cpu") + +### Initialize dataset ### +ood_eval = P.mode == 'ood_pre' +if P.dataset == 'imagenet' and ood_eval or P.dataset == 'CNMC' and ood_eval or P.dataset == 'CNMC_grayscale' and ood_eval: + P.batch_size = 1 + P.test_batch_size = 1 +train_set, test_set, image_size, n_classes = get_dataset(P, dataset=P.dataset, eval=ood_eval) + +P.image_size = image_size +P.n_classes = n_classes + +if P.one_class_idx is not None: + cls_list = get_superclass_list(P.dataset) + P.n_superclasses = len(cls_list) + + full_test_set = deepcopy(test_set) # test set of full classes + train_set = get_subclass_dataset(train_set, classes=cls_list[P.one_class_idx]) + test_set = get_subclass_dataset(test_set, classes=cls_list[P.one_class_idx]) + +kwargs = {'pin_memory': False, 'num_workers': 2} + +train_loader = DataLoader(train_set, shuffle=True, batch_size=P.batch_size, **kwargs) +test_loader = DataLoader(test_set, shuffle=False, batch_size=P.test_batch_size, **kwargs) + +if P.ood_dataset is None: + if P.one_class_idx is not None: + P.ood_dataset = list(range(P.n_superclasses)) + P.ood_dataset.pop(P.one_class_idx) + elif P.dataset == 'cifar10': + P.ood_dataset = ['svhn', 'lsun_resize', 'imagenet_resize', 'lsun_fix', 'imagenet_fix', 'cifar100', 'interp'] + elif P.dataset == 'imagenet': + P.ood_dataset = ['cub', 'stanford_dogs', 'flowers102', 'places365', 'food_101', 'caltech_256', 'dtd', 'pets'] + +ood_test_loader = dict() +for ood in P.ood_dataset: + if ood == 'interp': + ood_test_loader[ood] = None # dummy loader + continue + + if P.one_class_idx is not None: + ood_test_set = get_subclass_dataset(full_test_set, classes=cls_list[ood]) + ood = f'one_class_{ood}' # change save name + else: + ood_test_set = get_dataset(P, dataset=ood, test_only=True, image_size=P.image_size, eval=ood_eval) + + ood_test_loader[ood] = DataLoader(ood_test_set, shuffle=False, batch_size=P.test_batch_size, **kwargs) + +### Initialize model ### + +simclr_aug = C.get_simclr_augmentation(P, image_size=P.image_size).to(device) +P.shift_trans, P.K_shift = C.get_shift_module(P, eval=True) +P.shift_trans = P.shift_trans.to(device) + +model = C.get_classifier(P.model, n_classes=P.n_classes).to(device) +model = C.get_shift_classifer(model, P.K_shift).to(device) +criterion = nn.CrossEntropyLoss().to(device) + +if P.load_path is not None: + checkpoint = torch.load(P.load_path) + model.load_state_dict(checkpoint, strict=not P.no_strict) diff --git a/common/train.py b/common/train.py new file mode 100644 index 0000000..7d56f76 --- /dev/null +++ b/common/train.py @@ -0,0 +1,148 @@ +from copy import deepcopy + +import torch +import torch.nn as nn +import torch.optim as optim +import torch.optim.lr_scheduler as lr_scheduler +from torch.utils.data import DataLoader + +from common.common import parse_args +import models.classifier as C +from datasets import get_dataset, get_superclass_list, get_subclass_dataset +from utils.utils import load_checkpoint + +P = parse_args() + +### Set torch device ### + +if torch.cuda.is_available(): + torch.cuda.set_device(P.local_rank) +device = torch.device(f"cuda" if torch.cuda.is_available() else "cpu") + +P.n_gpus = torch.cuda.device_count() + +if P.n_gpus > 1: + import apex + import torch.distributed as dist + from torch.utils.data.distributed import DistributedSampler + + P.multi_gpu = True + torch.distributed.init_process_group( + 'nccl', + init_method='env://', + world_size=P.n_gpus, + rank=P.local_rank, + ) +else: + P.multi_gpu = False + +### only use one ood_layer while training +P.ood_layer = P.ood_layer[0] + +### Initialize dataset ### +train_set, test_set, image_size, n_classes = get_dataset(P, dataset=P.dataset) +P.image_size = image_size +P.n_classes = n_classes + +if P.one_class_idx is not None: + cls_list = get_superclass_list(P.dataset) + P.n_superclasses = len(cls_list) + + full_test_set = deepcopy(test_set) # test set of full classes + train_set = get_subclass_dataset(train_set, classes=cls_list[P.one_class_idx]) + test_set = get_subclass_dataset(test_set, classes=cls_list[P.one_class_idx]) + +kwargs = {'pin_memory': False, 'num_workers': 2} + +if P.multi_gpu: + train_sampler = DistributedSampler(train_set, num_replicas=P.n_gpus, rank=P.local_rank) + test_sampler = DistributedSampler(test_set, num_replicas=P.n_gpus, rank=P.local_rank) + train_loader = DataLoader(train_set, sampler=train_sampler, batch_size=P.batch_size, **kwargs) + test_loader = DataLoader(test_set, sampler=test_sampler, batch_size=P.test_batch_size, **kwargs) +else: + train_loader = DataLoader(train_set, shuffle=True, batch_size=P.batch_size, **kwargs) + test_loader = DataLoader(test_set, shuffle=False, batch_size=P.test_batch_size, **kwargs) + +if P.ood_dataset is None: + if P.one_class_idx is not None: + P.ood_dataset = list(range(P.n_superclasses)) + P.ood_dataset.pop(P.one_class_idx) + elif P.dataset == 'cifar10': + P.ood_dataset = ['svhn', 'lsun_resize', 'imagenet_resize', 'lsun_fix', 'imagenet_fix', 'cifar100', 'interp'] + elif P.dataset == 'imagenet': + P.ood_dataset = ['cub', 'stanford_dogs', 'flowers102'] + +ood_test_loader = dict() +for ood in P.ood_dataset: + if ood == 'interp': + ood_test_loader[ood] = None # dummy loader + continue + + if P.one_class_idx is not None: + ood_test_set = get_subclass_dataset(full_test_set, classes=cls_list[ood]) + ood = f'one_class_{ood}' # change save name + else: + ood_test_set = get_dataset(P, dataset=ood, test_only=True, image_size=P.image_size) + + if P.multi_gpu: + ood_sampler = DistributedSampler(ood_test_set, num_replicas=P.n_gpus, rank=P.local_rank) + ood_test_loader[ood] = DataLoader(ood_test_set, sampler=ood_sampler, batch_size=P.test_batch_size, **kwargs) + else: + ood_test_loader[ood] = DataLoader(ood_test_set, shuffle=False, batch_size=P.test_batch_size, **kwargs) + +### Initialize model ### + +simclr_aug = C.get_simclr_augmentation(P, image_size=P.image_size).to(device) +P.shift_trans, P.K_shift = C.get_shift_module(P, eval=True) +P.shift_trans = P.shift_trans.to(device) + +model = C.get_classifier(P.model, n_classes=P.n_classes).to(device) +model = C.get_shift_classifer(model, P.K_shift).to(device) + +criterion = nn.CrossEntropyLoss().to(device) + +if P.optimizer == 'sgd': + optimizer = optim.SGD(model.parameters(), lr=P.lr_init, momentum=0.9, weight_decay=P.weight_decay) + lr_decay_gamma = 0.1 +elif P.optimizer == 'lars': + from torchlars import LARS + base_optimizer = optim.SGD(model.parameters(), lr=P.lr_init, momentum=0.9, weight_decay=P.weight_decay) + optimizer = LARS(base_optimizer, eps=1e-8, trust_coef=0.001) + lr_decay_gamma = 0.1 +else: + raise NotImplementedError() + +if P.lr_scheduler == 'cosine': + scheduler = lr_scheduler.CosineAnnealingLR(optimizer, P.epochs) +elif P.lr_scheduler == 'step_decay': + milestones = [int(0.5 * P.epochs), int(0.75 * P.epochs)] + scheduler = lr_scheduler.MultiStepLR(optimizer, gamma=lr_decay_gamma, milestones=milestones) +else: + raise NotImplementedError() + +from training.scheduler import GradualWarmupScheduler +scheduler_warmup = GradualWarmupScheduler(optimizer, multiplier=10.0, total_epoch=P.warmup, after_scheduler=scheduler) + +if P.resume_path is not None: + resume = True + model_state, optim_state, config = load_checkpoint(P.resume_path, mode='last') + model.load_state_dict(model_state, strict=not P.no_strict) + optimizer.load_state_dict(optim_state) + start_epoch = config['epoch'] + best = config['best'] + error = 100.0 +else: + resume = False + start_epoch = 1 + best = 100.0 + error = 100.0 + +if P.mode == 'sup_linear' or P.mode == 'sup_CSI_linear': + assert P.load_path is not None + checkpoint = torch.load(P.load_path) + model.load_state_dict(checkpoint, strict=not P.no_strict) + +if P.multi_gpu: + simclr_aug = apex.parallel.DistributedDataParallel(simclr_aug, delay_allreduce=True) + model = apex.parallel.convert_syncbn_model(model) + model = apex.parallel.DistributedDataParallel(model, delay_allreduce=True) diff --git a/data/ImageNet_FIX.tar.gz b/data/ImageNet_FIX.tar.gz new file mode 100644 index 0000000..24d94b8 Binary files /dev/null and b/data/ImageNet_FIX.tar.gz differ diff --git a/data/Imagenet_resize.tar.gz b/data/Imagenet_resize.tar.gz new file mode 100644 index 0000000..989ab1b Binary files /dev/null and b/data/Imagenet_resize.tar.gz differ diff --git a/data/LSUN_FIX.tar.gz b/data/LSUN_FIX.tar.gz new file mode 100644 index 0000000..037f846 Binary files /dev/null and b/data/LSUN_FIX.tar.gz differ diff --git a/data/LSUN_resize.tar.gz b/data/LSUN_resize.tar.gz new file mode 100644 index 0000000..a727a35 Binary files /dev/null and b/data/LSUN_resize.tar.gz differ diff --git a/datasets/__init__.py b/datasets/__init__.py new file mode 100644 index 0000000..aad5ccb --- /dev/null +++ b/datasets/__init__.py @@ -0,0 +1,2 @@ +from datasets.datasets import get_dataset, get_superclass_list, get_subclass_dataset + diff --git a/datasets/__pycache__/__init__.cpython-36.pyc b/datasets/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..4d188db Binary files /dev/null and b/datasets/__pycache__/__init__.cpython-36.pyc differ diff --git a/datasets/__pycache__/__init__.cpython-37.pyc b/datasets/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000..b3d35a0 Binary files /dev/null and b/datasets/__pycache__/__init__.cpython-37.pyc differ diff --git a/datasets/__pycache__/datasets.cpython-36.pyc b/datasets/__pycache__/datasets.cpython-36.pyc new file mode 100644 index 0000000..90ee4ba Binary files /dev/null and b/datasets/__pycache__/datasets.cpython-36.pyc differ diff --git a/datasets/__pycache__/datasets.cpython-37.pyc b/datasets/__pycache__/datasets.cpython-37.pyc new file mode 100644 index 0000000..ae14429 Binary files /dev/null and b/datasets/__pycache__/datasets.cpython-37.pyc differ diff --git a/datasets/__pycache__/datasets.cpython-37.pyc.2427217203392 b/datasets/__pycache__/datasets.cpython-37.pyc.2427217203392 new file mode 100644 index 0000000..e69de29 diff --git a/datasets/__pycache__/postprocess_data.cpython-36.pyc b/datasets/__pycache__/postprocess_data.cpython-36.pyc new file mode 100644 index 0000000..8ec31c8 Binary files /dev/null and b/datasets/__pycache__/postprocess_data.cpython-36.pyc differ diff --git a/datasets/__pycache__/postprocess_data.cpython-37.pyc b/datasets/__pycache__/postprocess_data.cpython-37.pyc new file mode 100644 index 0000000..ee25cf0 Binary files /dev/null and b/datasets/__pycache__/postprocess_data.cpython-37.pyc differ diff --git a/datasets/__pycache__/prepare_data.cpython-36.pyc b/datasets/__pycache__/prepare_data.cpython-36.pyc new file mode 100644 index 0000000..d93a0c1 Binary files /dev/null and b/datasets/__pycache__/prepare_data.cpython-36.pyc differ diff --git a/datasets/__pycache__/prepare_data.cpython-37.pyc b/datasets/__pycache__/prepare_data.cpython-37.pyc new file mode 100644 index 0000000..8e6541c Binary files /dev/null and b/datasets/__pycache__/prepare_data.cpython-37.pyc differ diff --git a/datasets/datasets.py b/datasets/datasets.py new file mode 100644 index 0000000..3a17685 --- /dev/null +++ b/datasets/datasets.py @@ -0,0 +1,361 @@ +import os + +import numpy as np +import torch +from torch.utils.data.dataset import Subset +from torchvision import datasets, transforms + +from utils.utils import set_random_seed + +DATA_PATH = '~/data/' +IMAGENET_PATH = '~/data/ImageNet' +CNMC_PATH = r'~/data/CSI/CNMC_orig' +CNMC_GRAY_PATH = r'~/data/CSI/CNMC_orig_gray' +CNMC_ROT4_PATH = r'~/data/CSI/CNMC_rotated_4' + +CIFAR10_SUPERCLASS = list(range(10)) # one class +IMAGENET_SUPERCLASS = list(range(30)) # one class +CNMC_SUPERCLASS = list(range(2)) # one class + +STD_RES = 450 +STD_CENTER_CROP = 300 + +CIFAR100_SUPERCLASS = [ + [4, 31, 55, 72, 95], + [1, 33, 67, 73, 91], + [54, 62, 70, 82, 92], + [9, 10, 16, 29, 61], + [0, 51, 53, 57, 83], + [22, 25, 40, 86, 87], + [5, 20, 26, 84, 94], + [6, 7, 14, 18, 24], + [3, 42, 43, 88, 97], + [12, 17, 38, 68, 76], + [23, 34, 49, 60, 71], + [15, 19, 21, 32, 39], + [35, 63, 64, 66, 75], + [27, 45, 77, 79, 99], + [2, 11, 36, 46, 98], + [28, 30, 44, 78, 93], + [37, 50, 65, 74, 80], + [47, 52, 56, 59, 96], + [8, 13, 48, 58, 90], + [41, 69, 81, 85, 89], +] + + +class MultiDataTransform(object): + def __init__(self, transform): + self.transform1 = transform + self.transform2 = transform + + def __call__(self, sample): + x1 = self.transform1(sample) + x2 = self.transform2(sample) + return x1, x2 + + +class MultiDataTransformList(object): + def __init__(self, transform, clean_trasform, sample_num): + self.transform = transform + self.clean_transform = clean_trasform + self.sample_num = sample_num + + def __call__(self, sample): + set_random_seed(0) + + sample_list = [] + for i in range(self.sample_num): + sample_list.append(self.transform(sample)) + + return sample_list, self.clean_transform(sample) + + +def get_transform(image_size=None): + # Note: data augmentation is implemented in the layers + # Hence, we only define the identity transformation here + if image_size: # use pre-specified image size + train_transform = transforms.Compose([ + transforms.Resize((image_size[0], image_size[1])), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + ]) + test_transform = transforms.Compose([ + transforms.Resize((image_size[0], image_size[1])), + transforms.ToTensor(), + ]) + else: # use default image size + train_transform = transforms.Compose([ + transforms.ToTensor(), + ]) + test_transform = transforms.ToTensor() + + return train_transform, test_transform + + +def get_subset_with_len(dataset, length, shuffle=False): + set_random_seed(0) + dataset_size = len(dataset) + + index = np.arange(dataset_size) + if shuffle: + np.random.shuffle(index) + + index = torch.from_numpy(index[0:length]) + subset = Subset(dataset, index) + + assert len(subset) == length + + return subset + + +def get_transform_imagenet(): + + train_transform = transforms.Compose([ + transforms.Resize(256), + transforms.RandomResizedCrop(224), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + ]) + test_transform = transforms.Compose([ + transforms.Resize(256), + transforms.CenterCrop(224), + transforms.ToTensor(), + ]) + + train_transform = MultiDataTransform(train_transform) + + return train_transform, test_transform + +def get_transform_cnmc(res, center_crop_size): + train_transform = transforms.Compose([ + transforms.Resize(res), + transforms.CenterCrop(center_crop_size), + transforms.RandomVerticalFlip(), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + ]) + test_transform = transforms.Compose([ + transforms.Resize(res), + transforms.CenterCrop(center_crop_size), + transforms.ToTensor(), + ]) + train_transform = MultiDataTransform(train_transform) + + return train_transform, test_transform + + +def get_dataset(P, dataset, test_only=False, image_size=None, download=False, eval=False): + if P.res != '': + res = int(P.res.replace('px', '')) + size_factor = int(STD_RES/res) # always remove same portion + center_crop_size = int(STD_CENTER_CROP/size_factor) # remove black border + if dataset in ['CNMC', 'CNMC_grayscale', 'CNMC_ROT4_PATH']: + if eval: + train_transform, test_transform = get_simclr_eval_transform_cnmc(P.ood_samples, + P.resize_factor, P.resize_fix, res, center_crop_size) + else: + train_transform, test_transform = get_transform_cnmc(res, center_crop_size) + elif dataset in ['imagenet', 'cub', 'stanford_dogs', 'flowers102', + 'places365', 'food_101', 'caltech_256', 'dtd', 'pets']: + if eval: + train_transform, test_transform = get_simclr_eval_transform_imagenet(P.ood_samples, + P.resize_factor, P.resize_fix) + else: + train_transform, test_transform = get_transform_imagenet() + else: + train_transform, test_transform = get_transform(image_size=image_size) + + if dataset == 'CNMC': + image_size = (center_crop_size, center_crop_size, 3) #original 450,450,3 + n_classes = 2 + train_dir = os.path.join(CNMC_PATH, '0_training') + test_dir = os.path.join(CNMC_PATH, '1_validation') + train_set = datasets.ImageFolder(train_dir, transform=train_transform) + test_set = datasets.ImageFolder(test_dir, transform=test_transform) + + elif dataset == 'CNMC_grayscale': + image_size = (center_crop_size, center_crop_size, 3) #original 450,450,3 + n_classes = 2 + train_dir = os.path.join(CNMC_GRAY_PATH, '0_training') + test_dir = os.path.join(CNMC_GRAY_PATH, '1_validation') + train_set = datasets.ImageFolder(train_dir, transform=train_transform) + test_set = datasets.ImageFolder(test_dir, transform=test_transform) + + elif dataset == 'cifar10': + image_size = (32, 32, 3) + n_classes = 10 + train_set = datasets.CIFAR10(DATA_PATH, train=True, download=download, transform=train_transform) + test_set = datasets.CIFAR10(DATA_PATH, train=False, download=download, transform=test_transform) + + elif dataset == 'cifar100': + image_size = (32, 32, 3) + n_classes = 100 + train_set = datasets.CIFAR100(DATA_PATH, train=True, download=download, transform=train_transform) + test_set = datasets.CIFAR100(DATA_PATH, train=False, download=download, transform=test_transform) + + elif dataset == 'svhn': + assert test_only and image_size is not None + test_set = datasets.SVHN(DATA_PATH, split='test', download=download, transform=test_transform) + + elif dataset == 'lsun_resize': + assert test_only and image_size is not None + test_dir = os.path.join(DATA_PATH, 'LSUN_resize') + test_set = datasets.ImageFolder(test_dir, transform=test_transform) + + elif dataset == 'lsun_fix': + assert test_only and image_size is not None + test_dir = os.path.join(DATA_PATH, 'LSUN_fix') + test_set = datasets.ImageFolder(test_dir, transform=test_transform) + + elif dataset == 'imagenet_resize': + assert test_only and image_size is not None + test_dir = os.path.join(DATA_PATH, 'Imagenet_resize') + test_set = datasets.ImageFolder(test_dir, transform=test_transform) + + elif dataset == 'imagenet_fix': + assert test_only and image_size is not None + test_dir = os.path.join(DATA_PATH, 'Imagenet_fix') + test_set = datasets.ImageFolder(test_dir, transform=test_transform) + + elif dataset == 'imagenet': + image_size = (224, 224, 3) + n_classes = 30 + train_dir = os.path.join(IMAGENET_PATH, 'one_class_train') + test_dir = os.path.join(IMAGENET_PATH, 'one_class_test') + train_set = datasets.ImageFolder(train_dir, transform=train_transform) + test_set = datasets.ImageFolder(test_dir, transform=test_transform) + + elif dataset == 'stanford_dogs': + assert test_only and image_size is not None + test_dir = os.path.join(DATA_PATH, 'stanford_dogs') + test_set = datasets.ImageFolder(test_dir, transform=test_transform) + test_set = get_subset_with_len(test_set, length=3000, shuffle=True) + + elif dataset == 'cub': + assert test_only and image_size is not None + test_dir = os.path.join(DATA_PATH, 'cub200') + test_set = datasets.ImageFolder(test_dir, transform=test_transform) + test_set = get_subset_with_len(test_set, length=3000, shuffle=True) + + elif dataset == 'flowers102': + assert test_only and image_size is not None + test_dir = os.path.join(DATA_PATH, 'flowers102') + test_set = datasets.ImageFolder(test_dir, transform=test_transform) + test_set = get_subset_with_len(test_set, length=3000, shuffle=True) + + elif dataset == 'places365': + assert test_only and image_size is not None + test_dir = os.path.join(DATA_PATH, 'places365') + test_set = datasets.ImageFolder(test_dir, transform=test_transform) + test_set = get_subset_with_len(test_set, length=3000, shuffle=True) + + elif dataset == 'food_101': + assert test_only and image_size is not None + test_dir = os.path.join(DATA_PATH, 'food-101', 'images') + test_set = datasets.ImageFolder(test_dir, transform=test_transform) + test_set = get_subset_with_len(test_set, length=3000, shuffle=True) + + elif dataset == 'caltech_256': + assert test_only and image_size is not None + test_dir = os.path.join(DATA_PATH, 'caltech-256') + test_set = datasets.ImageFolder(test_dir, transform=test_transform) + test_set = get_subset_with_len(test_set, length=3000, shuffle=True) + + elif dataset == 'dtd': + assert test_only and image_size is not None + test_dir = os.path.join(DATA_PATH, 'dtd', 'images') + test_set = datasets.ImageFolder(test_dir, transform=test_transform) + test_set = get_subset_with_len(test_set, length=3000, shuffle=True) + + elif dataset == 'pets': + assert test_only and image_size is not None + test_dir = os.path.join(DATA_PATH, 'pets') + test_set = datasets.ImageFolder(test_dir, transform=test_transform) + test_set = get_subset_with_len(test_set, length=3000, shuffle=True) + + else: + raise NotImplementedError() + + if test_only: + return test_set + else: + return train_set, test_set, image_size, n_classes + + +def get_superclass_list(dataset): + if dataset == 'CNMC': + return CNMC_SUPERCLASS + if dataset == 'CNMC_grayscale': + return CNMC_SUPERCLASS + elif dataset == 'cifar10': + return CIFAR10_SUPERCLASS + elif dataset == 'cifar100': + return CIFAR100_SUPERCLASS + elif dataset == 'imagenet': + return IMAGENET_SUPERCLASS + else: + raise NotImplementedError() + + +def get_subclass_dataset(dataset, classes): + if not isinstance(classes, list): + classes = [classes] + + indices = [] + for idx, tgt in enumerate(dataset.targets): + if tgt in classes: + indices.append(idx) + + dataset = Subset(dataset, indices) + return dataset + + +def get_simclr_eval_transform_imagenet(sample_num, resize_factor, resize_fix): + + resize_scale = (resize_factor, 1.0) # resize scaling factor + if resize_fix: # if resize_fix is True, use same scale + resize_scale = (resize_factor, resize_factor) + + transform = transforms.Compose([ + transforms.Resize(256), + transforms.RandomResizedCrop(224, scale=resize_scale), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + ]) + + clean_trasform = transforms.Compose([ + transforms.Resize(256), + transforms.CenterCrop(224), + transforms.ToTensor(), + ]) + + transform = MultiDataTransformList(transform, clean_trasform, sample_num) + + return transform, transform + +def get_simclr_eval_transform_cnmc(sample_num, resize_factor, resize_fix, res, center_crop_size): + + resize_scale = (resize_factor, 1.0) # resize scaling factor + if resize_fix: # if resize_fix is True, use same scale + resize_scale = (resize_factor, resize_factor) + + transform = transforms.Compose([ + transforms.Resize(res), + transforms.CenterCrop(center_crop_size), + transforms.RandomVerticalFlip(), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + ]) + + clean_trasform = transforms.Compose([ + transforms.Resize(res), + transforms.CenterCrop(center_crop_size), + transforms.ToTensor(), + ]) + + transform = MultiDataTransformList(transform, clean_trasform, sample_num) + + return transform, transform + + diff --git a/datasets/imagenet_fix_preprocess.py b/datasets/imagenet_fix_preprocess.py new file mode 100644 index 0000000..dcabbd9 --- /dev/null +++ b/datasets/imagenet_fix_preprocess.py @@ -0,0 +1,66 @@ +import os +import time +import random + +import cv2 +import numpy as np +import torch + +import torch.nn.functional as F +from torchvision import datasets, transforms +from torch.utils.data import DataLoader +from torchvision.utils import save_image + +from datasets import get_subclass_dataset + +def set_random_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + +IMAGENET_PATH = '~/data/ImageNet' + + +check = time.time() + +transform = transforms.Compose([ + transforms.Resize(256), + transforms.CenterCrop(256), + transforms.Resize(32), + transforms.ToTensor(), + ]) + +# remove airliner(1), ambulance(2), parking_meter(18), schooner(22) since similar class exist in CIFAR-10 +class_idx_list = list(range(30)) +remove_idx_list = [1, 2, 18, 22] +for remove_idx in remove_idx_list: + class_idx_list.remove(remove_idx) + +set_random_seed(0) +train_dir = os.path.join(IMAGENET_PATH, 'one_class_train') +Imagenet_set = datasets.ImageFolder(train_dir, transform=transform) +Imagenet_set = get_subclass_dataset(Imagenet_set, class_idx_list) +Imagenet_dataloader = DataLoader(Imagenet_set, batch_size=100, shuffle=True, pin_memory=False) + +total_test_image = None +for n, (test_image, target) in enumerate(Imagenet_dataloader): + + if n == 0: + total_test_image = test_image + else: + total_test_image = torch.cat((total_test_image, test_image), dim=0).cpu() + + if total_test_image.size(0) >= 10000: + break + +print (f'Preprocessing time {time.time()-check}') + +if not os.path.exists('./Imagenet_fix'): + os.mkdir('./Imagenet_fix') + +check = time.time() +for i in range(10000): + save_image(total_test_image[i], f'Imagenet_fix/correct_resize_{i}.png') +print (f'Saving time {time.time()-check}') + diff --git a/datasets/lsun_fix_preprocess.py b/datasets/lsun_fix_preprocess.py new file mode 100644 index 0000000..0058512 --- /dev/null +++ b/datasets/lsun_fix_preprocess.py @@ -0,0 +1,61 @@ +import os +import time +import random + +import numpy as np +import torch + +from torchvision import datasets, transforms +from torch.utils.data import DataLoader +from torchvision.utils import save_image + +def set_random_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + +check = time.time() + +transform = transforms.Compose([ + transforms.Resize(256), + transforms.CenterCrop(256), + transforms.Resize(32), + transforms.ToTensor(), + ]) + +set_random_seed(0) + +LSUN_class_list = ['bedroom', 'bridge', 'church_outdoor', 'classroom', + 'conference_room', 'dining_room', 'kitchen', 'living_room', 'restaurant', 'tower'] + +total_test_image_all_class = [] +for LSUN_class in LSUN_class_list: + LSUN_set = datasets.LSUN('~/data/lsun/', classes=LSUN_class + '_train', transform=transform) + LSUN_loader = DataLoader(LSUN_set, batch_size=100, shuffle=True, pin_memory=False) + + total_test_image = None + for n, (test_image, _) in enumerate(LSUN_loader): + + if n == 0: + total_test_image = test_image + else: + total_test_image = torch.cat((total_test_image, test_image), dim=0).cpu() + + if total_test_image.size(0) >= 1000: + break + + total_test_image_all_class.append(total_test_image) + +total_test_image_all_class = torch.cat(total_test_image_all_class, dim=0) + +print (f'Preprocessing time {time.time()-check}') + +if not os.path.exists('./LSUN_fix'): + os.mkdir('./LSUN_fix') + +check = time.time() +for i in range(10000): + save_image(total_test_image_all_class[i], f'LSUN_fix/correct_resize_{i}.png') +print (f'Saving time {time.time()-check}') + diff --git a/datasets/postprocess_data.py b/datasets/postprocess_data.py new file mode 100644 index 0000000..ba7720f --- /dev/null +++ b/datasets/postprocess_data.py @@ -0,0 +1,37 @@ +import re +import matplotlib.pyplot as plt + +PATH = r'C:\Users\feokt\PycharmProjects\CSI\CSI\logs' + + +def postprocess_data(log: list): + for pth in log: + loss_sim = [] + loss_shift = [] + with open(PATH + pth) as f: + lines = f.readlines() + for line in lines: + # line = '[2022-01-31 20:40:23.947855] [DONE] [Time 0.179] [Data 0.583] [LossC 0.000000] [LossSim 4.024234] [LossShift 0.065126]' + part = re.search('\[DONE\]', line) + if part is not None: + l_sim = re.search('(\[LossSim.[0-9]*.[0-9]*\])', line).group() + if l_sim is not None: + loss_sim.append(float(re.search('(\s[0-9].*[0-9])', l_sim).group())) + l_shift = re.search('(\[LossShift.[0-9]*.[0-9]*\])', line).group() + if l_shift is not None: + loss_shift.append(float(re.search('(\s[0-9].*[0-9])', l_shift).group())) + loss = [loss_sim[i] + loss_shift[i] for i in range(len(loss_sim))] + + plt.ylabel("loss") + plt.xlabel("epoch") + plt.title("Loss over epochs") + plt.plot(list(range(1, 101)), loss) + for idx in range(len(log)): + log[idx] = log[idx][38:] + plt.legend(log) + plt.grid() + #plt.plot(list(range(1, 101)), loss_sim) + #plt.plot(list(range(1, 101)), loss_shift) + plt.show() + + diff --git a/datasets/prepare_data.py b/datasets/prepare_data.py new file mode 100644 index 0000000..fca651f --- /dev/null +++ b/datasets/prepare_data.py @@ -0,0 +1,196 @@ +import csv +import os +from PIL import Image +from torchvision import transforms +from torchvision.utils import save_image +import torch + + +def transform_image(img_in, target_dir, transformation, suffix): + """ + Transforms an image according to provided transformation. + + Parameters: + img_in (path): Image to transform + target_dir (path): Destination path + transformation (callable): Transformation to be applied + suffix (str): Suffix of resulting image. + + Returns: + binary_sum (str): Binary string of the sum of a and b + """ + if suffix == 'rot': + im = Image.open(img_in) + im = im.rotate(270) + tensor = transforms.ToTensor()(im) + save_image(tensor, target_dir + os.sep + suffix + '.jpg') + elif suffix == 'sobel': + im = Image.open(img_in) + tensor = transforms.ToTensor()(im) + sobel_filter = torch.tensor([[1., 2., 1.], [0., 0., 0.], [-1., -2., -1.]]) + f = sobel_filter.expand(1, 3, 3, 3) + tensor = torch.conv2d(tensor, f, stride=1, padding=1 ) + save_image(tensor, target_dir + os.sep + suffix + '.jpg') + elif suffix == 'noise': + im = Image.open(img_in) + tensor = transforms.ToTensor()(im) + tensor = tensor + (torch.randn(tensor.size()) * 0.2 + 0) + save_image(tensor, target_dir + os.sep + suffix + '.jpg') + elif suffix == 'cutout': + print("asd") + else: + im = Image.open(img_in) + im_trans = transformation(im) + im_trans.save(target_dir + os.sep + suffix + '.jpg') + + +def sort_and_rename_images(excel_path: str): + """Renames images and sorts them according to csv.""" + base_dir = excel_path.rsplit(os.sep, 1)[0] + dir_all = base_dir + os.sep + 'all' + if not os.path.isdir(dir_all): + os.mkdir(dir_all) + dir_hem = base_dir + os.sep + 'hem' + if not os.path.isdir(dir_hem): + os.mkdir(dir_hem) + + with open(excel_path, mode='r') as file: + csv_file = csv.reader(file) + for lines in csv_file: + print(lines) + if lines[2] == '1': + os.rename(base_dir + os.sep + lines[1], dir_all + os.sep + lines[0]) + elif lines[2] == '0': + os.rename(base_dir + os.sep + lines[1], dir_hem + os.sep + lines[0]) + + +def drop_color_channels(source_dir, target_dir, rgb): + """Rotates all images in in source dir.""" + if rgb == 0: + suffix = "red_only" + drop_1 = 1 + drop_2 = 2 + elif rgb == 1: + suffix = "green_only" + drop_1 = 0 + drop_2 = 2 + elif rgb == 2: + suffix = "blue_only" + drop_1 = 0 + drop_2 = 1 + elif rgb == 3: + suffix = "no_red" + drop_1 = 0 + elif rgb == 4: + suffix = "no_green" + drop_1 = 1 + elif rgb == 5: + suffix = "no_blue" + drop_1 = 2 + else: + suffix = "" + print("Invalid RGB-channel") + if suffix != "": + dirs = os.listdir(source_dir) + for item in dirs: + if os.path.isfile(source_dir + os.sep + item): + im = Image.open(source_dir + os.sep + item) + tensor = transforms.ToTensor()(im) + tensor[drop_1, :, :] = 0 + if rgb < 3: + tensor[drop_2, :, :] = 0 + save_image(tensor, target_dir + os.sep + item, 'bmp') + + +def rotate_images(target_dir, source_dir, rotate, theta): + """Rotates all images in in source dir.""" + dirs = os.listdir(source_dir) + for item in dirs: + if os.path.isfile(source_dir + os.sep + item): + for i in range(0, rotate): + im = Image.open(source_dir + os.sep + item) + im = im.rotate(i*theta) + tensor = transforms.ToTensor()(im) + save_image(tensor, target_dir + os.sep + str(i) + '_' + item, 'bmp') + + +def grayscale_image(source_dir, target_dir): + """Grayscale transforms all images in path.""" + t = transforms.Grayscale() + dirs = os.listdir(source_dir) + if not os.path.isdir(target_dir): + os.mkdir(target_dir) + for item in dirs: + if os.path.isfile(source_dir + os.sep + item): + im = Image.open(source_dir + os.sep + item).convert('RGB') + im_resize = t(im) + tensor = transforms.ToTensor()(im_resize) + padding = torch.zeros(1, tensor.shape[1], tensor.shape[2]) + tensor = torch.cat((tensor, padding), 0) + im_resize.save(target_dir + os.sep + item, 'bmp') + + +def resize(source_dir): + """Rotates all images in in source dir.""" + t = transforms.Compose([transforms.Resize((128, 128))]) + dirs = os.listdir(source_dir) + target_dir = source_dir + os.sep + 'resized' + if not os.path.isdir(target_dir): + os.mkdir(target_dir) + for item in dirs: + if os.path.isfile(source_dir + os.sep + item): + im = Image.open(source_dir + os.sep + item) + im_resize = t(im) + im_resize.save(source_dir + os.sep + 'resized' + os.sep + item, 'bmp') + + +def crop_image(source_dir): + """Center Crops all images in path.""" + t = transforms.CenterCrop((224, 224)) + dirs = os.listdir(source_dir) + target_dir = source_dir + os.sep + 'cropped' + if not os.path.isdir(target_dir): + os.mkdir(target_dir) + for item in dirs: + if os.path.isfile(source_dir + os.sep + item): + im = Image.open(source_dir + os.sep + item) + im_resize = t(im, ) + im_resize.save(source_dir + os.sep + 'cropped' + os.sep + item, 'bmp') + + +def mk_dirs(target_dir): + dir_0 = target_dir + r"\fold_0" + dir_1 = target_dir + r"\fold_1" + dir_2 = target_dir + r"\fold_2" + dir_3 = target_dir + r"\phase2" + dir_4 = target_dir + r"\phase3" + dir_0_all = dir_0 + r"\all" + dir_0_hem = dir_0 + r"\hem" + dir_1_all = dir_1 + r"\all" + dir_1_hem = dir_1 + r"\hem" + dir_2_all = dir_2 + r"\all" + dir_2_hem = dir_2 + r"\hem" + if not os.path.isdir(dir_0): + os.mkdir(dir_0) + if not os.path.isdir(dir_1): + os.mkdir(dir_1) + if not os.path.isdir(dir_2): + os.mkdir(dir_2) + if not os.path.isdir(dir_3): + os.mkdir(dir_3) + if not os.path.isdir(dir_4): + os.mkdir(dir_4) + + if not os.path.isdir(dir_0_all): + os.mkdir(dir_0_all) + if not os.path.isdir(dir_0_hem): + os.mkdir(dir_0_hem) + if not os.path.isdir(dir_1_all): + os.mkdir(dir_1_all) + if not os.path.isdir(dir_1_hem): + os.mkdir(dir_1_hem) + if not os.path.isdir(dir_2_all): + os.mkdir(dir_2_all) + if not os.path.isdir(dir_2_hem): + os.mkdir(dir_2_hem) + return dir_0_all, dir_0_hem, dir_1_all, dir_1_hem, dir_2_all, dir_2_hem, dir_3, dir_4 \ No newline at end of file diff --git a/eval.ipynb b/eval.ipynb new file mode 100644 index 0000000..73e9d3f --- /dev/null +++ b/eval.ipynb @@ -0,0 +1,4691 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "5dba5871", + "metadata": {}, + "source": [ + "# In-Distribution = hem" + ] + }, + { + "cell_type": "markdown", + "id": "de7c031b", + "metadata": {}, + "source": [ + "## combined" + ] + }, + { + "cell_type": "code", + "execution_count": 129, + "id": "7c21eb48", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0152\t0.0130\t0.0105\t0.0135\n", + "weight_shi:\t-0.0578\t0.1014\t0.1178\t0.1034\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5853569764733287\n", + "CNMC 1.9901 +- 0.1085 q0: 1.7212 q10: 1.8609 q20: 1.9042 q30: 1.9291 q40: 1.9549 q50: 1.9800 q60: 2.0013 q70: 2.0351 q80: 2.0786 q90: 2.1427 q100: 2.3180\n", + "one_class_0 1.9561 +- 0.0829 q0: 1.6679 q10: 1.8555 q20: 1.8869 q30: 1.9128 q40: 1.9357 q50: 1.9564 q60: 1.9747 q70: 1.9978 q80: 2.0224 q90: 2.0600 q100: 2.2041\n", + "[one_class_0 CSI 0.5854] [one_class_0 best 0.5854] \n", + "[one_class_mean CSI 0.5854] [one_class_mean best 0.5854] \n", + "0.5854\t0.5854\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur_randpers\n", + "# crop : 0.08\n", + "# blur_sigma : 40\n", + "# randpers : 0.8\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --distortion_scale 0.8 --resize_factor 0.08 --blur_sigma 40 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur_randpers --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_randpers_resize_factor0.08_color_dist0.5_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 130, + "id": "846efb49", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0109\t0.0072\t0.0133\t0.0129\n", + "weight_shi:\t0.4840\t0.0844\t0.4048\t0.2004\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.4842849583244716\n", + "CNMC 1.9963 +- 0.4334 q0: 0.3449 q10: 1.4686 q20: 1.6647 q30: 1.7749 q40: 1.8802 q50: 1.9851 q60: 2.0904 q70: 2.2032 q80: 2.3314 q90: 2.5160 q100: 3.5596\n", + "one_class_0 2.0168 +- 0.3659 q0: 0.5032 q10: 1.5638 q20: 1.7269 q30: 1.8222 q40: 1.9245 q50: 2.0083 q60: 2.0883 q70: 2.1776 q80: 2.3057 q90: 2.4967 q100: 3.3674\n", + "[one_class_0 CSI 0.4843] [one_class_0 best 0.4843] \n", + "[one_class_mean CSI 0.4843] [one_class_mean best 0.4843] \n", + "0.4843\t0.4843\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur_sharp\n", + "# crop : 0.08\n", + "# blur_sigma : 40\n", + "# randpers : 0.8\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --sharpness_factor 128 --resize_factor 0.08 --blur_sigma 40 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur_sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_sharp_resize_factor0.08_color_dist0.5_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 131, + "id": "ebf2e296", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0019\t0.0039\t0.0042\t0.0047\n", + "weight_shi:\t0.0159\t0.3020\t1.0707\t0.5438\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.43598274238142987\n", + "CNMC 1.9968 +- 0.4243 q0: 1.0210 q10: 1.5387 q20: 1.6392 q30: 1.7221 q40: 1.7914 q50: 1.8964 q60: 2.0368 q70: 2.1923 q80: 2.3638 q90: 2.6239 q100: 3.6290\n", + "one_class_0 2.0836 +- 0.4325 q0: 1.0885 q10: 1.6040 q20: 1.7218 q30: 1.8127 q40: 1.9018 q50: 1.9885 q60: 2.1022 q70: 2.2500 q80: 2.4798 q90: 2.6977 q100: 3.7788\n", + "[one_class_0 CSI 0.4360] [one_class_0 best 0.4360] \n", + "[one_class_mean CSI 0.4360] [one_class_mean best 0.4360] \n", + "0.4360\t0.4360\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : randpers_sharp\n", + "# crop : 0.08\n", + "# blur_sigma : 40\n", + "# randpers : 0.8\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --distortion_scale 0.8 --resize_factor 0.08 --sharpness_factor 128 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type randpers_sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_randpers_sharp_resize_factor0.08_color_dist0.5_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 132, + "id": "a7b553d3", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0011\t0.0008\t0.0009\t0.0009\n", + "weight_shi:\t-0.0836\t0.1015\t0.0813\t0.0787\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5724992151024417\n", + "CNMC 2.0160 +- 0.0836 q0: 1.8554 q10: 1.9224 q20: 1.9466 q30: 1.9663 q40: 1.9854 q50: 2.0042 q60: 2.0232 q70: 2.0465 q80: 2.0759 q90: 2.1259 q100: 2.3440\n", + "one_class_0 1.9930 +- 0.0670 q0: 1.8047 q10: 1.9141 q20: 1.9399 q30: 1.9557 q40: 1.9704 q50: 1.9843 q60: 2.0012 q70: 2.0244 q80: 2.0475 q90: 2.0793 q100: 2.2942\n", + "[one_class_0 CSI 0.5725] [one_class_0 best 0.5725] \n", + "[one_class_mean CSI 0.5725] [one_class_mean best 0.5725] \n", + "0.5725\t0.5725\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur_randpers_sharp\n", + "# crop : 0.08\n", + "# blur_sigma : 40\n", + "# randpers : 0.8\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --sharpness_factor 128 --distortion_scale 0.8 --resize_factor 0.08 --blur_sigma 40 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur_randpers_sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_randpers_sharp_resize_factor0.08_color_dist0.5_one_class_1/last.model\"" + ] + }, + { + "cell_type": "markdown", + "id": "b5d5f05f", + "metadata": {}, + "source": [ + "## sharp" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "13c15d92", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0082\t0.0048\t0.0035\t0.0035\n", + "weight_shi:\t-0.0162\t0.0291\t0.0264\t0.0261\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.46516067612594825\n", + "CNMC 2.0611 +- 0.2843 q0: 1.4233 q10: 1.7048 q20: 1.8158 q30: 1.8910 q40: 1.9831 q50: 2.0498 q60: 2.1209 q70: 2.1990 q80: 2.3022 q90: 2.4508 q100: 3.0255\n", + "one_class_0 2.0896 +- 0.2109 q0: 1.5218 q10: 1.8407 q20: 1.9143 q30: 1.9720 q40: 2.0252 q50: 2.0691 q60: 2.1174 q70: 2.1761 q80: 2.2568 q90: 2.3717 q100: 2.9418\n", + "[one_class_0 CSI 0.4652] [one_class_0 best 0.4652] \n", + "[one_class_mean CSI 0.4652] [one_class_mean best 0.4652] \n", + "0.4652\t0.4652\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharpness : 4096\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --sharpness_factor 4096 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor4096.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "25951e79", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0095\t0.0075\t0.0068\t0.0072\n", + "weight_shi:\t-0.0480\t0.0769\t0.0704\t0.0693\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.4025752235692077\n", + "CNMC 1.9780 +- 0.1552 q0: 1.6133 q10: 1.7905 q20: 1.8304 q30: 1.8774 q40: 1.9265 q50: 1.9698 q60: 2.0166 q70: 2.0610 q80: 2.1123 q90: 2.1776 q100: 2.5595\n", + "one_class_0 2.0255 +- 0.1272 q0: 1.6800 q10: 1.8659 q20: 1.9179 q30: 1.9585 q40: 1.9884 q50: 2.0210 q60: 2.0530 q70: 2.0845 q80: 2.1202 q90: 2.1844 q100: 2.7354\n", + "[one_class_0 CSI 0.4026] [one_class_0 best 0.4026] \n", + "[one_class_mean CSI 0.4026] [one_class_mean best 0.4026] \n", + "0.4026\t0.4026\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharpness : 2048\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --sharpness_factor 2048 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor2048.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 133, + "id": "4fc12b02", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0089\t0.0059\t0.0064\t0.0063\n", + "weight_shi:\t-0.0361\t0.0765\t0.0742\t0.0727\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.4227749420188578\n", + "CNMC 2.0207 +- 0.1832 q0: 1.5627 q10: 1.7726 q20: 1.8618 q30: 1.9226 q40: 1.9811 q50: 2.0217 q60: 2.0627 q70: 2.1123 q80: 2.1832 q90: 2.2560 q100: 2.6861\n", + "one_class_0 2.0632 +- 0.1200 q0: 1.6644 q10: 1.9104 q20: 1.9645 q30: 2.0071 q40: 2.0386 q50: 2.0633 q60: 2.0887 q70: 2.1181 q80: 2.1534 q90: 2.2160 q100: 2.5742\n", + "[one_class_0 CSI 0.4228] [one_class_0 best 0.4228] \n", + "[one_class_mean CSI 0.4228] [one_class_mean best 0.4228] \n", + "0.4228\t0.4228\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharpness : 1024\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --sharpness_factor 1024 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor1024.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "99698eb6", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0067\t0.0032\t0.0035\t0.0038\n", + "weight_shi:\t-0.0499\t0.0682\t0.0675\t0.0722\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.3898554522529092\n", + "CNMC 1.9663 +- 0.1069 q0: 1.6970 q10: 1.8437 q20: 1.8799 q30: 1.9043 q40: 1.9235 q50: 1.9537 q60: 1.9831 q70: 2.0145 q80: 2.0505 q90: 2.1087 q100: 2.5004\n", + "one_class_0 2.0038 +- 0.0970 q0: 1.7568 q10: 1.8902 q20: 1.9239 q30: 1.9445 q40: 1.9657 q50: 1.9897 q60: 2.0196 q70: 2.0483 q80: 2.0868 q90: 2.1398 q100: 2.3773\n", + "[one_class_0 CSI 0.3899] [one_class_0 best 0.3899] \n", + "[one_class_mean CSI 0.3899] [one_class_mean best 0.3899] \n", + "0.3899\t0.3899\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharpness : 512\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --sharpness_factor 512 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor512.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "01e6d61a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0053\t0.0084\t0.0092\t0.0087\n", + "weight_shi:\t0.4300\t0.0647\t0.0695\t0.0685\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5784846919656873\n", + "CNMC 2.1270 +- 0.9610 q0: -0.9326 q10: 0.9101 q20: 1.3880 q30: 1.6010 q40: 1.8639 q50: 2.1067 q60: 2.3374 q70: 2.5413 q80: 2.8893 q90: 3.3775 q100: 5.1585\n", + "one_class_0 1.8950 +- 0.7309 q0: -0.2104 q10: 1.0100 q20: 1.3020 q30: 1.4936 q40: 1.6684 q50: 1.8373 q60: 2.0139 q70: 2.2017 q80: 2.4870 q90: 2.8570 q100: 4.5441\n", + "[one_class_0 CSI 0.5785] [one_class_0 best 0.5785] \n", + "[one_class_mean CSI 0.5785] [one_class_mean best 0.5785] \n", + "0.5785\t0.5785\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharpness : 256\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --sharpness_factor 256 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor256.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "id": "65cc4fcd", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0089\t0.0063\t0.0075\t0.0065\n", + "weight_shi:\t-0.0184\t0.0363\t0.0371\t0.0371\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.3679688370350115\n", + "CNMC 1.9800 +- 0.0919 q0: 1.7207 q10: 1.8629 q20: 1.8911 q30: 1.9241 q40: 1.9548 q50: 1.9755 q60: 2.0034 q70: 2.0354 q80: 2.0631 q90: 2.1071 q100: 2.2242\n", + "one_class_0 2.0217 +- 0.0794 q0: 1.7727 q10: 1.9194 q20: 1.9543 q30: 1.9779 q40: 1.9999 q50: 2.0212 q60: 2.0423 q70: 2.0650 q80: 2.0906 q90: 2.1259 q100: 2.2548\n", + "[one_class_0 CSI 0.3680] [one_class_0 best 0.3680] \n", + "[one_class_mean CSI 0.3680] [one_class_mean best 0.3680] \n", + "0.3680\t0.3680\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharpness : 150\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --sharpness_factor 150 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor150.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "id": "e13b48db", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0083\t0.0053\t0.0056\t0.0053\n", + "weight_shi:\t-0.1256\t0.0869\t0.0823\t0.0921\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.4935733347512128\n", + "CNMC 2.0389 +- 0.1184 q0: 1.7300 q10: 1.8883 q20: 1.9358 q30: 1.9771 q40: 2.0079 q50: 2.0342 q60: 2.0657 q70: 2.0974 q80: 2.1469 q90: 2.1945 q100: 2.5086\n", + "one_class_0 2.0418 +- 0.0930 q0: 1.7624 q10: 1.9334 q20: 1.9610 q30: 1.9867 q40: 2.0125 q50: 2.0354 q60: 2.0608 q70: 2.0915 q80: 2.1163 q90: 2.1599 q100: 2.3964\n", + "[one_class_0 CSI 0.4936] [one_class_0 best 0.4936] \n", + "[one_class_mean CSI 0.4936] [one_class_mean best 0.4936] \n", + "0.4936\t0.4936\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharpness : 140\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --sharpness_factor 140 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor140.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "id": "29cf690f", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0045\t0.0043\t0.0070\t0.0053\n", + "weight_shi:\t-0.0813\t0.0676\t0.0710\t0.0626\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.4419042880725954\n", + "CNMC 2.0129 +- 0.1195 q0: 1.5835 q10: 1.8643 q20: 1.9252 q30: 1.9581 q40: 1.9900 q50: 2.0157 q60: 2.0378 q70: 2.0653 q80: 2.1055 q90: 2.1542 q100: 2.4828\n", + "one_class_0 2.0362 +- 0.1010 q0: 1.6891 q10: 1.9098 q20: 1.9486 q30: 1.9817 q40: 2.0087 q50: 2.0351 q60: 2.0576 q70: 2.0853 q80: 2.1206 q90: 2.1651 q100: 2.3927\n", + "[one_class_0 CSI 0.4419] [one_class_0 best 0.4419] \n", + "[one_class_mean CSI 0.4419] [one_class_mean best 0.4419] \n", + "0.4419\t0.4419\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharpness : 130\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --sharpness_factor 130 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor130.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "id": "dfaa2119", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0130\t0.0032\t0.0032\t0.0028\n", + "weight_shi:\t-0.0796\t0.1288\t0.1192\t0.1286\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.47003337080586194\n", + "CNMC 1.9969 +- 0.1287 q0: 1.5279 q10: 1.8373 q20: 1.9073 q30: 1.9387 q40: 1.9663 q50: 1.9928 q60: 2.0270 q70: 2.0640 q80: 2.0993 q90: 2.1517 q100: 2.4161\n", + "one_class_0 2.0110 +- 0.1133 q0: 1.6407 q10: 1.8709 q20: 1.9143 q30: 1.9536 q40: 1.9857 q50: 2.0121 q60: 2.0374 q70: 2.0675 q80: 2.1043 q90: 2.1554 q100: 2.3778\n", + "[one_class_0 CSI 0.4700] [one_class_0 best 0.4700] \n", + "[one_class_mean CSI 0.4700] [one_class_mean best 0.4700] \n", + "0.4700\t0.4700\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharpness : 120\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --sharpness_factor 120 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor120.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "id": "e3eecf30", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0091\t0.0036\t0.0042\t0.0040\n", + "weight_shi:\t0.2410\t0.5432\t0.2487\t0.3103\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5940902277722075\n", + "CNMC 2.0988 +- 0.5314 q0: 1.0497 q10: 1.4954 q20: 1.6747 q30: 1.8162 q40: 1.9078 q50: 2.0314 q60: 2.1299 q70: 2.2698 q80: 2.4594 q90: 2.8458 q100: 4.3420\n", + "one_class_0 1.9324 +- 0.3714 q0: 1.0642 q10: 1.5287 q20: 1.6460 q30: 1.7249 q40: 1.8095 q50: 1.8731 q60: 1.9535 q70: 2.0458 q80: 2.1797 q90: 2.4138 q100: 3.6869\n", + "[one_class_0 CSI 0.5941] [one_class_0 best 0.5941] \n", + "[one_class_mean CSI 0.5941] [one_class_mean best 0.5941] \n", + "0.5941\t0.5941\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharpness : 128\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --sharpness_factor 128 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor128.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "id": "d7d86bff", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0077\t0.0039\t0.0057\t0.0045\n", + "weight_shi:\t-0.0543\t0.1223\t0.1116\t0.1079\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.4230414273995078\n", + "CNMC 1.9898 +- 0.1931 q0: 1.3874 q10: 1.7331 q20: 1.8325 q30: 1.9150 q40: 1.9682 q50: 2.0080 q60: 2.0511 q70: 2.1020 q80: 2.1509 q90: 2.2176 q100: 2.4975\n", + "one_class_0 2.0442 +- 0.1594 q0: 1.5034 q10: 1.8378 q20: 1.9120 q30: 1.9673 q40: 2.0118 q50: 2.0508 q60: 2.0920 q70: 2.1314 q80: 2.1747 q90: 2.2473 q100: 2.5530\n", + "[one_class_0 CSI 0.4230] [one_class_0 best 0.4230] \n", + "[one_class_mean CSI 0.4230] [one_class_mean best 0.4230] \n", + "0.4230\t0.4230\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharpness : 100\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --sharpness_factor 100 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor100.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "id": "d60476b1", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0071\t0.0035\t0.0055\t0.0033\n", + "weight_shi:\t-0.7731\t-0.4426\t3.0750\t-1.0296\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.3861885880958892\n", + "CNMC 1.8504 +- 1.7281 q0: -3.9915 q10: -0.3957 q20: 0.3727 q30: 1.0483 q40: 1.6967 q50: 2.0760 q60: 2.4763 q70: 2.8382 q80: 3.3079 q90: 3.8465 q100: 5.6520\n", + "one_class_0 2.5429 +- 1.3399 q0: -4.5019 q10: 0.9296 q20: 1.4679 q30: 1.9042 q40: 2.2539 q50: 2.5979 q60: 2.9289 q70: 3.3053 q80: 3.6585 q90: 4.1959 q100: 6.6848\n", + "[one_class_0 CSI 0.3862] [one_class_0 best 0.3862] \n", + "[one_class_mean CSI 0.3862] [one_class_mean best 0.3862] \n", + "0.3862\t0.3862\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharpness : 80\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --sharpness_factor 80 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor80.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "id": "b367669a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0083\t0.0112\t0.0076\t0.0136\n", + "weight_shi:\t-0.0567\t0.1140\t0.0842\t0.1028\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.376367240907848\n", + "CNMC 1.9968 +- 0.0768 q0: 1.7939 q10: 1.9005 q20: 1.9341 q30: 1.9569 q40: 1.9751 q50: 1.9937 q60: 2.0157 q70: 2.0367 q80: 2.0629 q90: 2.0964 q100: 2.2761\n", + "one_class_0 2.0289 +- 0.0677 q0: 1.8223 q10: 1.9439 q20: 1.9701 q30: 1.9928 q40: 2.0111 q50: 2.0279 q60: 2.0448 q70: 2.0625 q80: 2.0815 q90: 2.1167 q100: 2.3343\n", + "[one_class_0 CSI 0.3764] [one_class_0 best 0.3764] \n", + "[one_class_mean CSI 0.3764] [one_class_mean best 0.3764] \n", + "0.3764\t0.3764\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharpness : 64\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --sharpness_factor 64 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor64.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "id": "dce638a8", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0150\t0.0058\t0.0129\t0.0054\n", + "weight_shi:\t-0.0982\t0.1165\t0.1059\t0.0929\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.4102051874132815\n", + "CNMC 1.9836 +- 0.2025 q0: 1.4858 q10: 1.7023 q20: 1.8012 q30: 1.8770 q40: 1.9428 q50: 2.0013 q60: 2.0495 q70: 2.0936 q80: 2.1462 q90: 2.2425 q100: 2.5144\n", + "one_class_0 2.0499 +- 0.1863 q0: 1.5414 q10: 1.8119 q20: 1.8950 q30: 1.9488 q40: 1.9984 q50: 2.0487 q60: 2.0962 q70: 2.1512 q80: 2.2168 q90: 2.2846 q100: 2.6101\n", + "[one_class_0 CSI 0.4102] [one_class_0 best 0.4102] \n", + "[one_class_mean CSI 0.4102] [one_class_mean best 0.4102] \n", + "0.4102\t0.4102\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharpness : 32\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --sharpness_factor 32 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor32.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "id": "28387a64", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0088\t0.0070\t0.0079\t0.0070\n", + "weight_shi:\t-0.0517\t0.1752\t0.1985\t0.2796\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.4567964532758079\n", + "CNMC 2.0252 +- 0.2349 q0: 1.3683 q10: 1.7184 q20: 1.8035 q30: 1.8906 q40: 1.9540 q50: 2.0153 q60: 2.0821 q70: 2.1619 q80: 2.2506 q90: 2.3384 q100: 2.6113\n", + "one_class_0 2.0617 +- 0.2072 q0: 1.5917 q10: 1.7983 q20: 1.8819 q30: 1.9423 q40: 1.9904 q50: 2.0477 q60: 2.1076 q70: 2.1646 q80: 2.2533 q90: 2.3431 q100: 2.6298\n", + "[one_class_0 CSI 0.4568] [one_class_0 best 0.4568] \n", + "[one_class_mean CSI 0.4568] [one_class_mean best 0.4568] \n", + "0.4568\t0.4568\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharpness : 16\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --sharpness_factor 16 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor16.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "id": "424cd4b8", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0167\t0.0112\t0.0119\t0.0098\n", + "weight_shi:\t-0.1065\t0.1467\t0.1401\t0.1203\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.4809307872269316\n", + "CNMC 2.0080 +- 0.2589 q0: 1.4062 q10: 1.6813 q20: 1.7879 q30: 1.8753 q40: 1.9309 q50: 1.9964 q60: 2.0601 q70: 2.1217 q80: 2.2165 q90: 2.3636 q100: 3.0258\n", + "one_class_0 2.0288 +- 0.2475 q0: 1.4597 q10: 1.7282 q20: 1.8162 q30: 1.8799 q40: 1.9422 q50: 1.9987 q60: 2.0671 q70: 2.1423 q80: 2.2336 q90: 2.3568 q100: 3.5008\n", + "[one_class_0 CSI 0.4809] [one_class_0 best 0.4809] \n", + "[one_class_mean CSI 0.4809] [one_class_mean best 0.4809] \n", + "0.4809\t0.4809\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharpness : 8\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --sharpness_factor 8 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor8.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "id": "b30452ce", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0033\t0.0030\t0.0025\t0.0028\n", + "weight_shi:\t-0.0191\t0.0502\t0.0455\t0.0473\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.4414099292073041\n", + "CNMC 1.9717 +- 0.1514 q0: 1.4512 q10: 1.7885 q20: 1.8443 q30: 1.8923 q40: 1.9275 q50: 1.9674 q60: 2.0006 q70: 2.0525 q80: 2.0992 q90: 2.1705 q100: 2.4112\n", + "one_class_0 1.9985 +- 0.1198 q0: 1.6117 q10: 1.8478 q20: 1.9013 q30: 1.9366 q40: 1.9671 q50: 1.9967 q60: 2.0257 q70: 2.0616 q80: 2.0994 q90: 2.1570 q100: 2.3699\n", + "[one_class_0 CSI 0.4414] [one_class_0 best 0.4414] \n", + "[one_class_mean CSI 0.4414] [one_class_mean best 0.4414] \n", + "0.4414\t0.4414\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharpness : 5\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --sharpness_factor 5 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor5.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 34, + "id": "61511c88", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0048\t0.0048\t0.0033\t0.0040\n", + "weight_shi:\t-0.0216\t0.0573\t0.0466\t0.0474\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.3330541883146477\n", + "CNMC 1.9613 +- 0.1613 q0: 1.4001 q10: 1.7205 q20: 1.8317 q30: 1.8884 q40: 1.9420 q50: 1.9740 q60: 2.0204 q70: 2.0591 q80: 2.1023 q90: 2.1507 q100: 2.3497\n", + "one_class_0 2.0506 +- 0.1298 q0: 1.5709 q10: 1.8729 q20: 1.9474 q30: 2.0023 q40: 2.0388 q50: 2.0668 q60: 2.0970 q70: 2.1272 q80: 2.1610 q90: 2.1981 q100: 2.4918\n", + "[one_class_0 CSI 0.3331] [one_class_0 best 0.3331] \n", + "[one_class_mean CSI 0.3331] [one_class_mean best 0.3331] \n", + "0.3331\t0.3331\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharpness : 4\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --sharpness_factor 4 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor4.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 35, + "id": "9aa87298", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0025\t0.0027\t0.0023\t0.0026\n", + "weight_shi:\t-0.0247\t0.0743\t0.0809\t0.0786\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.32097499468295204\n", + "CNMC 1.9756 +- 0.0921 q0: 1.6810 q10: 1.8631 q20: 1.9010 q30: 1.9281 q40: 1.9493 q50: 1.9693 q60: 1.9932 q70: 2.0205 q80: 2.0561 q90: 2.0987 q100: 2.2881\n", + "one_class_0 2.0291 +- 0.0715 q0: 1.8113 q10: 1.9469 q20: 1.9690 q30: 1.9889 q40: 2.0060 q50: 2.0221 q60: 2.0372 q70: 2.0598 q80: 2.0847 q90: 2.1253 q100: 2.3137\n", + "[one_class_0 CSI 0.3210] [one_class_0 best 0.3210] \n", + "[one_class_mean CSI 0.3210] [one_class_mean best 0.3210] \n", + "0.3210\t0.3210\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharpness : 3\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --sharpness_factor 3 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor3.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 36, + "id": "ed261f4c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0016\t0.0017\t0.0018\t0.0018\n", + "weight_shi:\t-0.0191\t0.0634\t0.0692\t0.0681\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5479179959286604\n", + "CNMC 2.0439 +- 0.2491 q0: 1.3671 q10: 1.7149 q20: 1.8289 q30: 1.9114 q40: 1.9969 q50: 2.0563 q60: 2.1157 q70: 2.1758 q80: 2.2482 q90: 2.3551 q100: 2.6738\n", + "one_class_0 2.0114 +- 0.1906 q0: 1.4681 q10: 1.7626 q20: 1.8467 q30: 1.9083 q40: 1.9608 q50: 2.0043 q60: 2.0576 q70: 2.1069 q80: 2.1710 q90: 2.2558 q100: 2.5480\n", + "[one_class_0 CSI 0.5479] [one_class_0 best 0.5479] \n", + "[one_class_mean CSI 0.5479] [one_class_mean best 0.5479] \n", + "0.5479\t0.5479\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharpness : 2\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --sharpness_factor 2 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor2.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "markdown", + "id": "3f347111", + "metadata": {}, + "source": [ + "## randpers" + ] + }, + { + "cell_type": "code", + "execution_count": 37, + "id": "6954e9f3", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0027\t0.0028\t0.0028\t0.0029\n", + "weight_shi:\t0.0396\t-0.1267\t-0.1178\t-0.1344\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.35701318627897793\n", + "CNMC 1.9933 +- 0.0426 q0: 1.7637 q10: 1.9470 q20: 1.9683 q30: 1.9825 q40: 1.9896 q50: 1.9981 q60: 2.0059 q70: 2.0119 q80: 2.0228 q90: 2.0391 q100: 2.1039\n", + "one_class_0 2.0107 +- 0.0300 q0: 1.8398 q10: 1.9753 q20: 1.9909 q30: 2.0006 q40: 2.0073 q50: 2.0134 q60: 2.0197 q70: 2.0245 q80: 2.0323 q90: 2.0429 q100: 2.0991\n", + "[one_class_0 CSI 0.3570] [one_class_0 best 0.3570] \n", + "[one_class_mean CSI 0.3570] [one_class_mean best 0.3570] \n", + "0.3570\t0.3570\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : randpers\n", + "# crop : 0.08\n", + "# randpers : 0.95\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --distortion_scale 0.95 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type randpers --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/randpers/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_randpers_resize_factor0.08_color_dist0.5_distortion_scale0.95_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 38, + "id": "7ef390e9", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0079\t0.0098\t0.0115\t0.0104\n", + "weight_shi:\t-0.2285\t-6.8399\t0.4918\t0.3229\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5641122049038374\n", + "CNMC 1.9712 +- 0.8313 q0: -4.4253 q10: 0.9802 q20: 1.5045 q30: 1.8002 q40: 1.9917 q50: 2.1173 q60: 2.2353 q70: 2.3608 q80: 2.5048 q90: 2.7707 q100: 4.2407\n", + "one_class_0 1.9180 +- 0.6218 q0: -3.1624 q10: 1.2584 q20: 1.5541 q30: 1.7231 q40: 1.8312 q50: 1.9474 q60: 2.0646 q70: 2.2017 q80: 2.3455 q90: 2.6130 q100: 4.2616\n", + "[one_class_0 CSI 0.5641] [one_class_0 best 0.5641] \n", + "[one_class_mean CSI 0.5641] [one_class_mean best 0.5641] \n", + "0.5641\t0.5641\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : randpers\n", + "# crop : 0.08\n", + "# randpers : 0.9\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --distortion_scale 0.9 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type randpers --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/randpers/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_randpers_resize_factor0.08_color_dist0.5_distortion_scale0.9_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 39, + "id": "1205e882", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0034\t0.0047\t0.0029\t0.0041\n", + "weight_shi:\t0.1303\t-0.3875\t-0.1777\t-0.3820\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.34714879632161555\n", + "CNMC 1.9226 +- 0.3041 q0: 0.5031 q10: 1.4958 q20: 1.7161 q30: 1.8443 q40: 1.9245 q50: 1.9879 q60: 2.0373 q70: 2.0889 q80: 2.1566 q90: 2.2287 q100: 2.5521\n", + "one_class_0 2.0685 +- 0.2043 q0: 1.1682 q10: 1.8203 q20: 1.9473 q30: 2.0089 q40: 2.0513 q50: 2.0872 q60: 2.1284 q70: 2.1701 q80: 2.2162 q90: 2.2834 q100: 2.6224\n", + "[one_class_0 CSI 0.3471] [one_class_0 best 0.3471] \n", + "[one_class_mean CSI 0.3471] [one_class_mean best 0.3471] \n", + "0.3471\t0.3471\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : randpers\n", + "# crop : 0.08\n", + "# randpers : 0.85\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --distortion_scale 0.85 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type randpers --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/randpers/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_randpers_resize_factor0.08_color_dist0.5_distortion_scale0.85_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 40, + "id": "8887546c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0020\t0.0037\t0.0026\t0.0039\n", + "weight_shi:\t0.1393\t2.5299\t-1.4218\t1.2437\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.6913884078226435\n", + "CNMC 2.0752 +- 0.3123 q0: 0.7320 q10: 1.7083 q20: 1.8297 q30: 1.9220 q40: 2.0047 q50: 2.0690 q60: 2.1391 q70: 2.2162 q80: 2.3010 q90: 2.4549 q100: 3.2842\n", + "one_class_0 1.8917 +- 0.2289 q0: 0.7422 q10: 1.6150 q20: 1.7197 q30: 1.7818 q40: 1.8380 q50: 1.8923 q60: 1.9400 q70: 1.9926 q80: 2.0616 q90: 2.1731 q100: 2.9070\n", + "[one_class_0 CSI 0.6914] [one_class_0 best 0.6914] \n", + "[one_class_mean CSI 0.6914] [one_class_mean best 0.6914] \n", + "0.6914\t0.6914\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : randpers\n", + "# crop : 0.08\n", + "# randpers : 0.8\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --distortion_scale 0.8 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type randpers --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/randpers/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_randpers_resize_factor0.08_color_dist0.5_distortion_scale0.8_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 41, + "id": "b65d2295", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0080\t0.0036\t0.0038\t0.0054\n", + "weight_shi:\t-0.0669\t-0.5647\t-0.7888\t0.5885\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.35124039133473095\n", + "CNMC 1.9592 +- 0.1769 q0: 1.1540 q10: 1.7506 q20: 1.8287 q30: 1.8809 q40: 1.9227 q50: 1.9622 q60: 1.9930 q70: 2.0505 q80: 2.0959 q90: 2.1715 q100: 2.7656\n", + "one_class_0 2.0376 +- 0.1423 q0: 1.0198 q10: 1.8838 q20: 1.9409 q30: 1.9806 q40: 2.0144 q50: 2.0407 q60: 2.0708 q70: 2.0989 q80: 2.1348 q90: 2.1967 q100: 2.6480\n", + "[one_class_0 CSI 0.3512] [one_class_0 best 0.3512] \n", + "[one_class_mean CSI 0.3512] [one_class_mean best 0.3512] \n", + "0.3512\t0.3512\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : randpers\n", + "# crop : 0.08\n", + "# randpers : 0.75\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --distortion_scale 0.75 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type randpers --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/randpers/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_randpers_resize_factor0.08_color_dist0.5_distortion_scale0.75_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 42, + "id": "2a818378", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0034\t0.0037\t0.0024\t0.0028\n", + "weight_shi:\t0.5181\t-2.5612\t-0.2828\t-0.4473\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.3594818156959256\n", + "CNMC 1.5977 +- 1.3081 q0: -2.9399 q10: -0.2131 q20: 0.5889 q30: 1.0500 q40: 1.4828 q50: 1.7567 q60: 2.0337 q70: 2.3642 q80: 2.6741 q90: 3.1919 q100: 4.5132\n", + "one_class_0 2.2261 +- 1.0824 q0: -1.7685 q10: 0.7646 q20: 1.4013 q30: 1.7643 q40: 2.0621 q50: 2.3193 q60: 2.5929 q70: 2.8838 q80: 3.1160 q90: 3.5429 q100: 5.0474\n", + "[one_class_0 CSI 0.3595] [one_class_0 best 0.3595] \n", + "[one_class_mean CSI 0.3595] [one_class_mean best 0.3595] \n", + "0.3595\t0.3595\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : randpers\n", + "# crop : 0.08\n", + "# randpers : 0.6\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --distortion_scale 0.6 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type randpers --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/randpers/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_randpers_resize_factor0.08_color_dist0.5_distortion_scale0.6_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 43, + "id": "09a15dda", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0043\t0.0115\t0.0075\t0.0087\n", + "weight_shi:\t12.1609\t0.3968\t2.0101\t0.4812\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.36039204367068733\n", + "CNMC 1.4531 +- 7.5510 q0: -28.7074 q10: -8.1564 q20: -3.7419 q30: -1.3692 q40: 0.5754 q50: 2.0721 q60: 3.7007 q70: 5.1638 q80: 7.3805 q90: 10.4019 q100: 20.0002\n", + "one_class_0 4.8084 +- 5.1144 q0: -14.2655 q10: -1.2285 q20: 1.1701 q30: 2.5734 q40: 3.7262 q50: 4.8972 q60: 5.9430 q70: 7.0902 q80: 8.7584 q90: 11.3302 q100: 19.8412\n", + "[one_class_0 CSI 0.3604] [one_class_0 best 0.3604] \n", + "[one_class_mean CSI 0.3604] [one_class_mean best 0.3604] \n", + "0.3604\t0.3604\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : randpers\n", + "# crop : 0.08\n", + "# randpers : 0.3\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --distortion_scale 0.3 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type randpers --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/randpers/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_randpers_resize_factor0.08_color_dist0.5_distortion_scale0.3_one_class_1/last.model\"" + ] + }, + { + "cell_type": "markdown", + "id": "47013663", + "metadata": {}, + "source": [ + "## blur" + ] + }, + { + "cell_type": "code", + "execution_count": 134, + "id": "958ecba3", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0038\t0.0072\t0.0039\t0.0044\n", + "weight_shi:\t-0.1658\t0.1714\t0.2799\t0.2838\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.4812004375170905\n", + "CNMC 1.9384 +- 0.2410 q0: 1.5304 q10: 1.7013 q20: 1.7508 q30: 1.7894 q40: 1.8299 q50: 1.8827 q60: 1.9243 q70: 1.9999 q80: 2.0841 q90: 2.2637 q100: 2.8485\n", + "one_class_0 1.9219 +- 0.1651 q0: 1.5451 q10: 1.7386 q20: 1.7816 q30: 1.8174 q40: 1.8548 q50: 1.8945 q60: 1.9407 q70: 1.9846 q80: 2.0549 q90: 2.1446 q100: 2.6371\n", + "[one_class_0 CSI 0.4812] [one_class_0 best 0.4812] \n", + "[one_class_mean CSI 0.4812] [one_class_mean best 0.4812] \n", + "0.4812\t0.4812\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 180\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 180 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.5_blur_sigma180.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 135, + "id": "a3f7ef72", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0025\t0.0058\t0.0024\t0.0029\n", + "weight_shi:\t-0.0568\t0.0831\t0.1701\t0.1303\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.6192537902956279\n", + "CNMC 2.0247 +- 0.0991 q0: 1.8346 q10: 1.9155 q20: 1.9404 q30: 1.9652 q40: 1.9879 q50: 2.0067 q60: 2.0291 q70: 2.0625 q80: 2.1019 q90: 2.1563 q100: 2.4786\n", + "one_class_0 1.9853 +- 0.0765 q0: 1.7917 q10: 1.9064 q20: 1.9276 q30: 1.9429 q40: 1.9598 q50: 1.9743 q60: 1.9887 q70: 2.0055 q80: 2.0343 q90: 2.0845 q100: 2.3701\n", + "[one_class_0 CSI 0.6193] [one_class_0 best 0.6193] \n", + "[one_class_mean CSI 0.6193] [one_class_mean best 0.6193] \n", + "0.6193\t0.6193\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 120\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 120 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.5_blur_sigma120.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 147, + "id": "2f2a8808", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0030\t0.0043\t0.0026\t0.0028\n", + "weight_shi:\t-0.0889\t0.1756\t0.3138\t0.2610\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5952460527248604\n", + "CNMC 2.0008 +- 0.0139 q0: 1.9745 q10: 1.9866 q20: 1.9900 q30: 1.9930 q40: 1.9955 q50: 1.9986 q60: 2.0019 q70: 2.0048 q80: 2.0099 q90: 2.0166 q100: 2.0896\n", + "one_class_0 1.9964 +- 0.0119 q0: 1.9575 q10: 1.9833 q20: 1.9872 q30: 1.9899 q40: 1.9925 q50: 1.9948 q60: 1.9973 q70: 2.0007 q80: 2.0051 q90: 2.0119 q100: 2.0732\n", + "[one_class_0 CSI 0.5952] [one_class_0 best 0.5952] \n", + "[one_class_mean CSI 0.5952] [one_class_mean best 0.5952] \n", + "0.5952\t0.5952\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 110\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 110 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.5_blur_sigma110.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 136, + "id": "08a6959c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0067\t0.0043\t0.0047\t0.0049\n", + "weight_shi:\t-0.0583\t0.0915\t0.2051\t0.1700\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.633952896018797\n", + "CNMC 1.9904 +- 0.1692 q0: 1.5494 q10: 1.7849 q20: 1.8442 q30: 1.9005 q40: 1.9430 q50: 1.9786 q60: 2.0203 q70: 2.0602 q80: 2.1209 q90: 2.2064 q100: 2.5511\n", + "one_class_0 1.9167 +- 0.1098 q0: 1.5787 q10: 1.7796 q20: 1.8329 q30: 1.8646 q40: 1.8922 q50: 1.9150 q60: 1.9402 q70: 1.9662 q80: 1.9991 q90: 2.0546 q100: 2.2965\n", + "[one_class_0 CSI 0.6340] [one_class_0 best 0.6340] \n", + "[one_class_mean CSI 0.6340] [one_class_mean best 0.6340] \n", + "0.6340\t0.6340\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 105\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 105 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.5_blur_sigma105.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 137, + "id": "a4a4eee1", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0091\t0.0045\t0.0091\t0.0070\n", + "weight_shi:\t-0.0676\t0.0975\t0.1849\t0.1972\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5476483456385015\n", + "CNMC 1.9769 +- 0.2661 q0: 1.3171 q10: 1.6312 q20: 1.7272 q30: 1.8319 q40: 1.9118 q50: 1.9776 q60: 2.0460 q70: 2.1125 q80: 2.1924 q90: 2.3278 q100: 2.8173\n", + "one_class_0 1.9268 +- 0.2241 q0: 1.2928 q10: 1.6165 q20: 1.7138 q30: 1.8152 q40: 1.8859 q50: 1.9465 q60: 2.0048 q70: 2.0580 q80: 2.1179 q90: 2.1973 q100: 2.5704\n", + "[one_class_0 CSI 0.5476] [one_class_0 best 0.5476] \n", + "[one_class_mean CSI 0.5476] [one_class_mean best 0.5476] \n", + "0.5476\t0.5476\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 100\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 100 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.5_blur_sigma100.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 138, + "id": "8f0ceb15", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0018\t0.0028\t0.0016\t0.0018\n", + "weight_shi:\t-0.2029\t0.1970\t1.0597\t0.4185\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5393334953767002\n", + "CNMC 1.9749 +- 0.3046 q0: 1.1986 q10: 1.5960 q20: 1.7089 q30: 1.8060 q40: 1.8905 q50: 1.9696 q60: 2.0356 q70: 2.1233 q80: 2.2217 q90: 2.3752 q100: 3.1061\n", + "one_class_0 1.9275 +- 0.2387 q0: 1.1897 q10: 1.6222 q20: 1.7266 q30: 1.8084 q40: 1.8732 q50: 1.9343 q60: 1.9958 q70: 2.0464 q80: 2.1172 q90: 2.2327 q100: 2.6893\n", + "[one_class_0 CSI 0.5393] [one_class_0 best 0.5393] \n", + "[one_class_mean CSI 0.5393] [one_class_mean best 0.5393] \n", + "0.5393\t0.5393\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 95\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 95 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.5_blur_sigma95.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 139, + "id": "7d89e279", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0075\t0.0041\t0.0071\t0.0059\n", + "weight_shi:\t-0.0360\t0.0714\t0.1079\t0.0991\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5292488277175179\n", + "CNMC 1.9774 +- 0.1920 q0: 1.5068 q10: 1.7357 q20: 1.8106 q30: 1.8745 q40: 1.9206 q50: 1.9753 q60: 2.0197 q70: 2.0694 q80: 2.1299 q90: 2.2192 q100: 2.6272\n", + "one_class_0 1.9507 +- 0.1545 q0: 1.4789 q10: 1.7436 q20: 1.8103 q30: 1.8750 q40: 1.9239 q50: 1.9683 q60: 2.0050 q70: 2.0411 q80: 2.0780 q90: 2.1379 q100: 2.3968\n", + "[one_class_0 CSI 0.5292] [one_class_0 best 0.5292] \n", + "[one_class_mean CSI 0.5292] [one_class_mean best 0.5292] \n", + "0.5292\t0.5292\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 90\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 90 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.5_blur_sigma90.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 140, + "id": "ebb47e6b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0050\t0.0117\t0.0038\t0.0049\n", + "weight_shi:\t-0.2427\t0.2328\t1.3692\t0.7248\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.557083573866456\n", + "CNMC 2.0005 +- 0.1926 q0: 1.3343 q10: 1.8135 q20: 1.8631 q30: 1.9060 q40: 1.9410 q50: 1.9699 q60: 2.0110 q70: 2.0589 q80: 2.1204 q90: 2.2186 q100: 3.1284\n", + "one_class_0 1.9634 +- 0.1487 q0: 1.4064 q10: 1.8025 q20: 1.8544 q30: 1.8886 q40: 1.9192 q50: 1.9463 q60: 1.9749 q70: 2.0096 q80: 2.0594 q90: 2.1522 q100: 2.5877\n", + "[one_class_0 CSI 0.5571] [one_class_0 best 0.5571] \n", + "[one_class_mean CSI 0.5571] [one_class_mean best 0.5571] \n", + "0.5571\t0.5571\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 80\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 80 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.5_blur_sigma80.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 141, + "id": "7d6e0050", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0062\t0.0053\t0.0066\t0.0062\n", + "weight_shi:\t-0.0434\t0.0771\t0.1221\t0.1065\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5821104122990916\n", + "CNMC 1.9984 +- 0.0869 q0: 1.7841 q10: 1.8832 q20: 1.9216 q30: 1.9505 q40: 1.9768 q50: 1.9991 q60: 2.0230 q70: 2.0443 q80: 2.0710 q90: 2.1126 q100: 2.2334\n", + "one_class_0 1.9740 +- 0.0685 q0: 1.7594 q10: 1.8780 q20: 1.9143 q30: 1.9428 q40: 1.9641 q50: 1.9808 q60: 1.9973 q70: 2.0131 q80: 2.0305 q90: 2.0551 q100: 2.1770\n", + "[one_class_0 CSI 0.5821] [one_class_0 best 0.5821] \n", + "[one_class_mean CSI 0.5821] [one_class_mean best 0.5821] \n", + "0.5821\t0.5821\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 60\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 60 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.5_blur_sigma60.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 142, + "id": "df7becce", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0045\t0.0051\t0.0033\t0.0041\n", + "weight_shi:\t-0.1512\t0.2745\t0.6510\t0.4026\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.6940311072625811\n", + "CNMC 2.0096 +- 0.0626 q0: 1.8488 q10: 1.9389 q20: 1.9597 q30: 1.9741 q40: 1.9884 q50: 2.0031 q60: 2.0185 q70: 2.0342 q80: 2.0520 q90: 2.0853 q100: 2.2770\n", + "one_class_0 1.9718 +- 0.0446 q0: 1.8450 q10: 1.9219 q20: 1.9383 q30: 1.9495 q40: 1.9584 q50: 1.9680 q60: 1.9769 q70: 1.9880 q80: 2.0015 q90: 2.0244 q100: 2.2023\n", + "[one_class_0 CSI 0.6940] [one_class_0 best 0.6940] \n", + "[one_class_mean CSI 0.6940] [one_class_mean best 0.6940] \n", + "0.6940\t0.6940\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 40\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 40 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.5_blur_sigma40.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 143, + "id": "b7036b42", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0017\t0.0020\t0.0015\t0.0016\n", + "weight_shi:\t0.0317\t-0.1164\t-0.0840\t-0.0812\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.39877986408612603\n", + "CNMC 1.9759 +- 0.1316 q0: 1.4471 q10: 1.8010 q20: 1.8983 q30: 1.9382 q40: 1.9698 q50: 1.9932 q60: 2.0241 q70: 2.0528 q80: 2.0799 q90: 2.1197 q100: 2.2278\n", + "one_class_0 2.0210 +- 0.0942 q0: 1.5614 q10: 1.8968 q20: 1.9555 q30: 1.9874 q40: 2.0148 q50: 2.0364 q60: 2.0551 q70: 2.0753 q80: 2.0963 q90: 2.1246 q100: 2.2320\n", + "[one_class_0 CSI 0.3988] [one_class_0 best 0.3988] \n", + "[one_class_mean CSI 0.3988] [one_class_mean best 0.3988] \n", + "0.3988\t0.3988\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 20\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 20 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.5_blur_sigma20.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 144, + "id": "e7b68654", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0020\t0.0035\t0.0027\t0.0027\n", + "weight_shi:\t0.1013\t-0.5641\t-0.5419\t-0.3880\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.33394542683235595\n", + "CNMC 1.9739 +- 0.2803 q0: 1.2318 q10: 1.5933 q20: 1.7359 q30: 1.8196 q40: 1.9141 q50: 1.9863 q60: 2.0640 q70: 2.1425 q80: 2.2339 q90: 2.3203 q100: 2.6037\n", + "one_class_0 2.1309 +- 0.1875 q0: 1.4830 q10: 1.8910 q20: 1.9714 q30: 2.0347 q40: 2.0807 q50: 2.1311 q60: 2.1754 q70: 2.2372 q80: 2.3011 q90: 2.3743 q100: 2.6831\n", + "[one_class_0 CSI 0.3339] [one_class_0 best 0.3339] \n", + "[one_class_mean CSI 0.3339] [one_class_mean best 0.3339] \n", + "0.3339\t0.3339\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# shift_tr : blur\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# res : 450px\n", + "# crop : 0.08\n", + "# blur_sigma : 6\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 6 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.5_blur_sigma6.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 145, + "id": "5a20ddb8", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0029\t0.0062\t0.0034\t0.0030\n", + "weight_shi:\t0.2169\t2.1291\t-0.6997\t-0.6317\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5895074388033098\n", + "CNMC 2.0596 +- 0.2877 q0: 1.0222 q10: 1.6929 q20: 1.8375 q30: 1.9167 q40: 2.0096 q50: 2.0851 q60: 2.1435 q70: 2.2065 q80: 2.2951 q90: 2.4044 q100: 3.0604\n", + "one_class_0 1.9839 +- 0.2326 q0: 1.1067 q10: 1.6760 q20: 1.7908 q30: 1.8756 q40: 1.9429 q50: 2.0082 q60: 2.0584 q70: 2.1101 q80: 2.1732 q90: 2.2584 q100: 3.0116\n", + "[one_class_0 CSI 0.5895] [one_class_0 best 0.5895] \n", + "[one_class_mean CSI 0.5895] [one_class_mean best 0.5895] \n", + "0.5895\t0.5895\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 4\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 4 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.5_blur_sigma4.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 146, + "id": "f014e06d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0047\t0.0065\t0.0046\t0.0045\n", + "weight_shi:\t0.2645\t-12.1918\t-1.1354\t-0.9111\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.43248488439218546\n", + "CNMC 1.5307 +- 2.2981 q0: -6.2860 q10: -1.3964 q20: -0.1077 q30: 0.5774 q40: 1.2354 q50: 1.7091 q60: 2.1039 q70: 2.7068 q80: 3.3782 q90: 4.4476 q100: 6.9377\n", + "one_class_0 2.0424 +- 1.5916 q0: -5.1678 q10: 0.0924 q20: 0.8505 q30: 1.3834 q40: 1.7445 q50: 2.1476 q60: 2.4484 q70: 2.8574 q80: 3.3216 q90: 3.9483 q100: 6.4052\n", + "[one_class_0 CSI 0.4325] [one_class_0 best 0.4325] \n", + "[one_class_mean CSI 0.4325] [one_class_mean best 0.4325] \n", + "0.4325\t0.4325\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 3\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 3 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.5_blur_sigma3.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 148, + "id": "469197e2", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0083\t0.0100\t0.0115\t0.0075\n", + "weight_shi:\t2.4798\t0.7962\t-4.3631\t-2.5771\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.37173128145920054\n", + "CNMC 1.8398 +- 0.7107 q0: -1.6740 q10: 0.9741 q20: 1.3853 q30: 1.6153 q40: 1.8033 q50: 1.9486 q60: 2.1053 q70: 2.2304 q80: 2.3601 q90: 2.5741 q100: 3.5645\n", + "one_class_0 2.1409 +- 0.5323 q0: -0.7020 q10: 1.4665 q20: 1.7704 q30: 1.9274 q40: 2.0616 q50: 2.1799 q60: 2.2918 q70: 2.4188 q80: 2.5430 q90: 2.7544 q100: 3.8279\n", + "[one_class_0 CSI 0.3717] [one_class_0 best 0.3717] \n", + "[one_class_mean CSI 0.3717] [one_class_mean best 0.3717] \n", + "0.3717\t0.3717\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 2\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 2 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.5_blur_sigma2.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 149, + "id": "b8ccee0f", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0078\t0.0116\t0.0095\t0.0106\n", + "weight_shi:\t0.1768\t-0.5198\t-0.4439\t-0.3696\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.3392225969475081\n", + "CNMC 1.9808 +- 0.1438 q0: 1.5735 q10: 1.8230 q20: 1.8637 q30: 1.8908 q40: 1.9230 q50: 1.9577 q60: 1.9928 q70: 2.0435 q80: 2.1120 q90: 2.1900 q100: 2.4139\n", + "one_class_0 2.0502 +- 0.1152 q0: 1.7554 q10: 1.9134 q20: 1.9501 q30: 1.9799 q40: 2.0064 q50: 2.0376 q60: 2.0668 q70: 2.1043 q80: 2.1520 q90: 2.2170 q100: 2.4357\n", + "[one_class_0 CSI 0.3392] [one_class_0 best 0.3392] \n", + "[one_class_mean CSI 0.3392] [one_class_mean best 0.3392] \n", + "0.3392\t0.3392\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 1.5\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 1.5 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.5_blur_sigma1.5_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 150, + "id": "3ba56d85", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0021\t0.0031\t0.0026\t0.0026\n", + "weight_shi:\t0.3756\t9.2614\t-0.9536\t-0.8326\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.6796440616169901\n", + "CNMC 2.2897 +- 0.8188 q0: -0.1330 q10: 1.2176 q20: 1.6025 q30: 1.8640 q40: 2.0833 q50: 2.2961 q60: 2.5135 q70: 2.7109 q80: 2.9502 q90: 3.3024 q100: 4.9905\n", + "one_class_0 1.8212 +- 0.6476 q0: -0.7580 q10: 1.0215 q20: 1.3188 q30: 1.4821 q40: 1.6494 q50: 1.7908 q60: 1.9457 q70: 2.1147 q80: 2.3124 q90: 2.6421 q100: 4.3585\n", + "[one_class_0 CSI 0.6796] [one_class_0 best 0.6796] \n", + "[one_class_mean CSI 0.6796] [one_class_mean best 0.6796] \n", + "0.6796\t0.6796\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 1\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 1 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.5_blur_sigma1.0_one_class_1/last.model\"" + ] + }, + { + "cell_type": "markdown", + "id": "9fd03e0e", + "metadata": {}, + "source": [ + "## other transformations" + ] + }, + { + "cell_type": "code", + "execution_count": 151, + "id": "beda234d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0022\t0.0048\t0.0029\t0.0028\n", + "weight_shi:\t-3.2909\t-2.8657\t12.5482\t8.7034\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5285322922047013\n", + "CNMC 2.0914 +- 0.5227 q0: 0.3459 q10: 1.3730 q20: 1.6394 q30: 1.8216 q40: 1.9820 q50: 2.1814 q60: 2.2788 q70: 2.3999 q80: 2.5247 q90: 2.7229 q100: 3.6842\n", + "one_class_0 2.0687 +- 0.3844 q0: 0.9708 q10: 1.5495 q20: 1.7411 q30: 1.8523 q40: 1.9633 q50: 2.0755 q60: 2.1823 q70: 2.2948 q80: 2.4057 q90: 2.5591 q100: 3.3615\n", + "[one_class_0 CSI 0.5285] [one_class_0 best 0.5285] \n", + "[one_class_mean CSI 0.5285] [one_class_mean best 0.5285] \n", + "0.5285\t0.5285\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : rotation\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type rotation --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_rotation_resize_factor0.08_color_dist0.5_one_class_1/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 152, + "id": "025aedc5", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3581 3581 3581 3581\n", + "weight_sim:\t0.0030\t0.0034\t0.0036\t0.0040\n", + "weight_shi:\t-0.0433\t0.5499\t-0.7289\t0.1057\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.4115717953392276\n", + "CNMC 2.0156 +- 0.1922 q0: 1.5401 q10: 1.7668 q20: 1.8500 q30: 1.9196 q40: 1.9652 q50: 2.0074 q60: 2.0619 q70: 2.1144 q80: 2.1791 q90: 2.2713 q100: 2.6170\n", + "one_class_0 2.0726 +- 0.1608 q0: 1.6384 q10: 1.8757 q20: 1.9493 q30: 1.9876 q40: 2.0244 q50: 2.0556 q60: 2.0929 q70: 2.1446 q80: 2.2026 q90: 2.3007 q100: 2.7631\n", + "[one_class_0 CSI 0.4116] [one_class_0 best 0.4116] \n", + "[one_class_mean CSI 0.4116] [one_class_mean best 0.4116] \n", + "0.4116\t0.4116\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : cutperm\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type cutperm --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 1 --load_path \"logs/id_hem/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_cutperm_resize_factor0.08_color_dist0.5_one_class_1/last.model\"" + ] + }, + { + "cell_type": "markdown", + "id": "7def804c", + "metadata": {}, + "source": [ + "# In-Distribution = ALL" + ] + }, + { + "cell_type": "markdown", + "id": "4d826eb3", + "metadata": {}, + "source": [ + "# Combined shiftings" + ] + }, + { + "cell_type": "code", + "execution_count": 153, + "id": "ed1501cd", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0099\t0.0102\t0.0065\t0.0102\n", + "weight_shi:\t-0.2651\t0.3988\t-0.4352\t-0.9217\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.39018776775134445\n", + "CNMC 1.9696 +- 0.3172 q0: 1.0059 q10: 1.5824 q20: 1.7007 q30: 1.7852 q40: 1.8767 q50: 1.9557 q60: 2.0335 q70: 2.1270 q80: 2.2281 q90: 2.3851 q100: 3.0697\n", + "one_class_1 2.1059 +- 0.3583 q0: 1.1658 q10: 1.6595 q20: 1.8070 q30: 1.9152 q40: 2.0053 q50: 2.0849 q60: 2.1740 q70: 2.2650 q80: 2.3979 q90: 2.5818 q100: 3.4632\n", + "[one_class_1 CSI 0.3902] [one_class_1 best 0.3902] \n", + "[one_class_mean CSI 0.3902] [one_class_mean best 0.3902] \n", + "0.3902\t0.3902\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur_randpers\n", + "# crop : 0.08\n", + "# blur_sigma : 2\n", + "# randpers : 0.75\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --distortion_scale 0.75 --resize_factor 0.08 --blur_sigma 2 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur_randpers --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_randpers_resize_factor0.08_color_dist0.5_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 154, + "id": "b471436b", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0067\t0.0129\t0.0065\t0.0086\n", + "weight_shi:\t-0.0850\t0.2249\t0.1729\t0.1702\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.4909736780805963\n", + "CNMC 2.1838 +- 0.3670 q0: 0.9506 q10: 1.6677 q20: 1.8649 q30: 2.0015 q40: 2.1242 q50: 2.2193 q60: 2.3160 q70: 2.4125 q80: 2.5084 q90: 2.6376 q100: 3.1795\n", + "one_class_1 2.1670 +- 0.4888 q0: 0.7892 q10: 1.4646 q20: 1.7498 q30: 1.9466 q40: 2.1070 q50: 2.2393 q60: 2.3641 q70: 2.4747 q80: 2.6032 q90: 2.7386 q100: 3.1321\n", + "[one_class_1 CSI 0.4910] [one_class_1 best 0.4910] \n", + "[one_class_mean CSI 0.4910] [one_class_mean best 0.4910] \n", + "0.4910\t0.4910\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur_sharp\n", + "# crop : 0.08\n", + "# blur_sigma : 2\n", + "# randpers : 0.75\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --sharpness_factor 5 --resize_factor 0.08 --blur_sigma 2 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur_sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_sharp_resize_factor0.08_color_dist0.5_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 155, + "id": "5c08667d", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0058\t0.0071\t0.0060\t0.0060\n", + "weight_shi:\t-0.0229\t0.0795\t0.0649\t0.0666\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.45186742320663564\n", + "CNMC 2.0290 +- 0.0922 q0: 1.7841 q10: 1.9115 q20: 1.9486 q30: 1.9769 q40: 2.0011 q50: 2.0200 q60: 2.0479 q70: 2.0788 q80: 2.1081 q90: 2.1556 q100: 2.4408\n", + "one_class_1 2.0462 +- 0.1034 q0: 1.7679 q10: 1.9185 q20: 1.9546 q30: 1.9876 q40: 2.0171 q50: 2.0410 q60: 2.0679 q70: 2.0985 q80: 2.1328 q90: 2.1914 q100: 2.3683\n", + "[one_class_1 CSI 0.4519] [one_class_1 best 0.4519] \n", + "[one_class_mean CSI 0.4519] [one_class_mean best 0.4519] \n", + "0.4519\t0.4519\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : randpers_sharp\n", + "# crop : 0.08\n", + "# blur_sigma : 2\n", + "# randpers : 0.75\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --sharpness_factor 5 --distortion_scale 0.75 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type randpers_sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_randpers_sharp_resize_factor0.08_color_dist0.5_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 156, + "id": "e1be886d", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0069\t0.0188\t0.0166\t0.0120\n", + "weight_shi:\t-0.1581\t0.1971\t0.2342\t0.3190\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.32064141322071316\n", + "CNMC 1.9454 +- 0.0810 q0: 1.7576 q10: 1.8630 q20: 1.8860 q30: 1.9012 q40: 1.9140 q50: 1.9316 q60: 1.9461 q70: 1.9640 q80: 1.9905 q90: 2.0476 q100: 2.4165\n", + "one_class_1 2.0265 +- 0.1592 q0: 1.7834 q10: 1.8887 q20: 1.9115 q30: 1.9346 q40: 1.9614 q50: 1.9884 q60: 2.0114 q70: 2.0559 q80: 2.1059 q90: 2.2091 q100: 3.1080\n", + "[one_class_1 CSI 0.3206] [one_class_1 best 0.3206] \n", + "[one_class_mean CSI 0.3206] [one_class_mean best 0.3206] \n", + "0.3206\t0.3206\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur_randpers_sharp\n", + "# crop : 0.08\n", + "# blur_sigma : 2\n", + "# randpers : 0.75\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --sharpness_factor 5 --distortion_scale 0.75 --resize_factor 0.08 --blur_sigma 2 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur_randpers_sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_randpers_sharp_resize_factor0.08_color_dist0.5_one_class_0/last.model\"" + ] + }, + { + "cell_type": "markdown", + "id": "d8cd9c5a", + "metadata": {}, + "source": [ + "# Rotation" + ] + }, + { + "cell_type": "code", + "execution_count": 157, + "id": "3f9748c5", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0058\t0.0091\t0.0059\t0.0061\n", + "weight_shi:\t-20.2520\t5.6794\t4.4756\t-13.8486\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.6155293247855458\n", + "CNMC 2.2941 +- 0.4851 q0: 0.6171 q10: 1.6977 q20: 1.9170 q30: 2.0537 q40: 2.1467 q50: 2.2525 q60: 2.3680 q70: 2.5004 q80: 2.6952 q90: 2.9333 q100: 4.0615\n", + "one_class_1 2.0566 +- 0.6054 q0: 0.2141 q10: 1.2573 q20: 1.5313 q30: 1.7431 q40: 1.8966 q50: 2.0426 q60: 2.2221 q70: 2.3685 q80: 2.6045 q90: 2.8399 q100: 3.9073\n", + "[one_class_1 CSI 0.6155] [one_class_1 best 0.6155] \n", + "[one_class_mean CSI 0.6155] [one_class_mean best 0.6155] \n", + "0.6155\t0.6155\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : rotation\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type rotation --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_rotation_resize_factor0.08_color_dist0.5_one_class_0/last.model\"" + ] + }, + { + "cell_type": "markdown", + "id": "ed7a3ca6", + "metadata": {}, + "source": [ + "# Cutperm" + ] + }, + { + "cell_type": "code", + "execution_count": 158, + "id": "47382eef", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0033\t0.0040\t0.0048\t0.0059\n", + "weight_shi:\t-0.0422\t-0.2956\t0.3071\t0.0913\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5637665967854647\n", + "CNMC 2.1340 +- 0.2713 q0: 1.5092 q10: 1.8054 q20: 1.8955 q30: 1.9635 q40: 2.0281 q50: 2.0900 q60: 2.1689 q70: 2.2729 q80: 2.3753 q90: 2.5306 q100: 2.8713\n", + "one_class_1 2.0681 +- 0.3216 q0: 1.3818 q10: 1.6678 q20: 1.7728 q30: 1.8582 q40: 1.9368 q50: 2.0391 q60: 2.1288 q70: 2.2616 q80: 2.3884 q90: 2.5307 q100: 2.8915\n", + "[one_class_1 CSI 0.5638] [one_class_1 best 0.5638] \n", + "[one_class_mean CSI 0.5638] [one_class_mean best 0.5638] \n", + "0.5638\t0.5638\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : cutperm\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type cutperm --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_cutperm_resize_factor0.08_color_dist0.5_one_class_0/last.model\"" + ] + }, + { + "cell_type": "markdown", + "id": "e338538b", + "metadata": {}, + "source": [ + "# Rotated Dataset 4" + ] + }, + { + "cell_type": "code", + "execution_count": 69, + "id": "18aa1694", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0089\t0.0071\t0.0082\t0.0060\n", + "weight_shi:\t-0.0826\t0.1155\t0.1144\t0.1138\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.6829627857280305\n", + "CNMC 2.0823 +- 0.1590 q0: 1.4861 q10: 1.8867 q20: 1.9512 q30: 1.9962 q40: 2.0434 q50: 2.0844 q60: 2.1284 q70: 2.1653 q80: 2.2150 q90: 2.2784 q100: 2.7066\n", + "one_class_1 1.9798 +- 0.1471 q0: 1.4589 q10: 1.7996 q20: 1.8601 q30: 1.9145 q40: 1.9503 q50: 1.9828 q60: 2.0164 q70: 2.0541 q80: 2.1007 q90: 2.1670 q100: 2.3931\n", + "[one_class_1 CSI 0.6830] [one_class_1 best 0.6830] \n", + "[one_class_mean CSI 0.6830] [one_class_mean best 0.6830] \n", + "0.6830\t0.6830\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC_ROT4\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharp : 64\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --sharpness_factor 64 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/dataset_rotated_4/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor64.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 70, + "id": "95e84b59", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0076\t0.0081\t0.0080\t0.0086\n", + "weight_shi:\t-0.1382\t1.2588\t2.1567\t0.5287\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.3485907290938738\n", + "CNMC 1.8551 +- 0.4346 q0: 0.8263 q10: 1.3471 q20: 1.5032 q30: 1.6156 q40: 1.7119 q50: 1.8134 q60: 1.9097 q70: 2.0306 q80: 2.1600 q90: 2.4047 q100: 4.4743\n", + "one_class_1 2.1133 +- 0.5033 q0: 0.9826 q10: 1.5132 q20: 1.7004 q30: 1.8184 q40: 1.9124 q50: 2.0310 q60: 2.1594 q70: 2.3078 q80: 2.5394 q90: 2.7696 q100: 4.0888\n", + "[one_class_1 CSI 0.3486] [one_class_1 best 0.3486] \n", + "[one_class_mean CSI 0.3486] [one_class_mean best 0.3486] \n", + "0.3486\t0.3486\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC_ROT4\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : randpers\n", + "# crop : 0.08\n", + "# randpers : 0.75\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --distortion_scale 0.75 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type randpers --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/dataset_rotated_4/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_randpers_resize_factor0.08_color_dist0.5_distortion_scale0.75_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 159, + "id": "982cf5a4", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0137\t0.0152\t0.0140\t0.0126\n", + "weight_shi:\t-0.1440\t0.3135\t0.4775\t0.4211\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.3167783246741409\n", + "CNMC 1.9366 +- 0.1773 q0: 1.4437 q10: 1.7285 q20: 1.7909 q30: 1.8431 q40: 1.8862 q50: 1.9192 q60: 1.9569 q70: 2.0073 q80: 2.0694 q90: 2.1765 q100: 2.5655\n", + "one_class_1 2.0674 +- 0.2071 q0: 1.6099 q10: 1.8331 q20: 1.8954 q30: 1.9420 q40: 1.9859 q50: 2.0320 q60: 2.0882 q70: 2.1432 q80: 2.2308 q90: 2.3727 q100: 2.8015\n", + "[one_class_1 CSI 0.3168] [one_class_1 best 0.3168] \n", + "[one_class_mean CSI 0.3168] [one_class_mean best 0.3168] \n", + "0.3168\t0.3168\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC_ROT4\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 2\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 2 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/dataset_rotated_4/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.5_blur_sigma2.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "markdown", + "id": "c9c8f555", + "metadata": {}, + "source": [ + "# Sharpness Factor" + ] + }, + { + "cell_type": "code", + "execution_count": 72, + "id": "ac35a164", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0050\t0.0011\t0.0010\t0.0009\n", + "weight_shi:\t-0.0433\t0.0726\t0.2303\t0.0769\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5375965930382118\n", + "CNMC 1.9981 +- 0.1008 q0: 1.5969 q10: 1.8641 q20: 1.9163 q30: 1.9503 q40: 1.9776 q50: 2.0056 q60: 2.0329 q70: 2.0563 q80: 2.0814 q90: 2.1226 q100: 2.2687\n", + "one_class_1 1.9867 +- 0.1056 q0: 1.6492 q10: 1.8484 q20: 1.8943 q30: 1.9323 q40: 1.9603 q50: 1.9909 q60: 2.0131 q70: 2.0457 q80: 2.0783 q90: 2.1193 q100: 2.2764\n", + "[one_class_1 CSI 0.5376] [one_class_1 best 0.5376] \n", + "[one_class_mean CSI 0.5376] [one_class_mean best 0.5376] \n", + "0.5376\t0.5376\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharp : 4096\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --sharpness_factor 4096 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor4096.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 73, + "id": "49250ae3", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0099\t0.0038\t0.0037\t0.0035\n", + "weight_shi:\t-0.0601\t0.0628\t0.0572\t0.0620\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5004063743809436\n", + "CNMC 2.1367 +- 0.2510 q0: 1.4456 q10: 1.8346 q20: 1.9233 q30: 1.9922 q40: 2.0485 q50: 2.1189 q60: 2.1860 q70: 2.2597 q80: 2.3425 q90: 2.4668 q100: 3.2275\n", + "one_class_1 2.1346 +- 0.3755 q0: 1.1971 q10: 1.6290 q20: 1.8287 q30: 1.9361 q40: 2.0319 q50: 2.1191 q60: 2.2264 q70: 2.3246 q80: 2.4509 q90: 2.6481 q100: 3.1412\n", + "[one_class_1 CSI 0.5004] [one_class_1 best 0.5004] \n", + "[one_class_mean CSI 0.5004] [one_class_mean best 0.5004] \n", + "0.5004\t0.5004\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharp : 2048\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --sharpness_factor 2048 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor2048.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 74, + "id": "0bd84a7e", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0242\t0.0050\t0.0046\t0.0044\n", + "weight_shi:\t-0.0828\t0.0645\t0.0669\t0.0596\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.521132733772876\n", + "CNMC 2.1385 +- 0.2030 q0: 1.5179 q10: 1.8773 q20: 1.9648 q30: 2.0325 q40: 2.0825 q50: 2.1336 q60: 2.1808 q70: 2.2389 q80: 2.3102 q90: 2.3997 q100: 2.7902\n", + "one_class_1 2.1145 +- 0.2767 q0: 1.3283 q10: 1.7725 q20: 1.8865 q30: 1.9760 q40: 2.0428 q50: 2.1166 q60: 2.1976 q70: 2.2709 q80: 2.3529 q90: 2.4697 q100: 2.8054\n", + "[one_class_1 CSI 0.5211] [one_class_1 best 0.5211] \n", + "[one_class_mean CSI 0.5211] [one_class_mean best 0.5211] \n", + "0.5211\t0.5211\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharp : 1024\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --sharpness_factor 1024 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor1024.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 75, + "id": "7084a03f", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0059\t0.0055\t0.0051\t0.0051\n", + "weight_shi:\t-0.0132\t0.0371\t0.0377\t0.0376\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5495593179999798\n", + "CNMC 2.0729 +- 0.0917 q0: 1.7983 q10: 1.9554 q20: 1.9947 q30: 2.0222 q40: 2.0461 q50: 2.0659 q60: 2.0930 q70: 2.1220 q80: 2.1540 q90: 2.1973 q100: 2.3686\n", + "one_class_1 2.0536 +- 0.1203 q0: 1.7288 q10: 1.9078 q20: 1.9506 q30: 1.9885 q40: 2.0186 q50: 2.0481 q60: 2.0860 q70: 2.1206 q80: 2.1548 q90: 2.2130 q100: 2.3754\n", + "[one_class_1 CSI 0.5496] [one_class_1 best 0.5496] \n", + "[one_class_mean CSI 0.5496] [one_class_mean best 0.5496] \n", + "0.5496\t0.5496\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharp : 512\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --sharpness_factor 512 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor512.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 76, + "id": "7609406d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0033\t0.0016\t0.0015\t0.0015\n", + "weight_shi:\t-0.0626\t0.0548\t0.0482\t0.0476\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5079350611207324\n", + "CNMC 2.1463 +- 0.2056 q0: 1.5154 q10: 1.8902 q20: 1.9661 q30: 2.0278 q40: 2.0864 q50: 2.1497 q60: 2.1995 q70: 2.2608 q80: 2.3272 q90: 2.4144 q100: 2.8480\n", + "one_class_1 2.1363 +- 0.2821 q0: 1.3866 q10: 1.7738 q20: 1.8962 q30: 1.9787 q40: 2.0632 q50: 2.1372 q60: 2.2119 q70: 2.2966 q80: 2.3999 q90: 2.5028 q100: 2.7673\n", + "[one_class_1 CSI 0.5079] [one_class_1 best 0.5079] \n", + "[one_class_mean CSI 0.5079] [one_class_mean best 0.5079] \n", + "0.5079\t0.5079\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharp : 256\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --sharpness_factor 256 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor256.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 77, + "id": "aad2a734", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0048\t0.0080\t0.0087\t0.0057\n", + "weight_shi:\t-0.0840\t0.0954\t0.0919\t0.0779\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5942041645145282\n", + "CNMC 2.0494 +- 0.1428 q0: 1.7036 q10: 1.8748 q20: 1.9344 q30: 1.9773 q40: 2.0081 q50: 2.0436 q60: 2.0718 q70: 2.1122 q80: 2.1561 q90: 2.2278 q100: 2.7563\n", + "one_class_1 2.0001 +- 0.1729 q0: 1.5526 q10: 1.7899 q20: 1.8590 q30: 1.9109 q40: 1.9589 q50: 1.9977 q60: 2.0327 q70: 2.0653 q80: 2.1332 q90: 2.2229 q100: 2.6275\n", + "[one_class_1 CSI 0.5942] [one_class_1 best 0.5942] \n", + "[one_class_mean CSI 0.5942] [one_class_mean best 0.5942] \n", + "0.5942\t0.5942\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharp : 128\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --sharpness_factor 128 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor128.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 78, + "id": "eceb0082", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0055\t0.0037\t0.0058\t0.0037\n", + "weight_shi:\t-0.1448\t0.1735\t0.1588\t0.1423\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.7104164767720962\n", + "CNMC 2.0883 +- 0.1055 q0: 1.7978 q10: 1.9497 q20: 1.9997 q30: 2.0352 q40: 2.0642 q50: 2.0925 q60: 2.1162 q70: 2.1440 q80: 2.1764 q90: 2.2198 q100: 2.4996\n", + "one_class_1 1.9981 +- 0.1273 q0: 1.6099 q10: 1.8403 q20: 1.8878 q30: 1.9263 q40: 1.9618 q50: 1.9937 q60: 2.0157 q70: 2.0606 q80: 2.1110 q90: 2.1738 q100: 2.4795\n", + "[one_class_1 CSI 0.7104] [one_class_1 best 0.7104] \n", + "[one_class_mean CSI 0.7104] [one_class_mean best 0.7104] \n", + "0.7104\t0.7104\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharp : 64\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --sharpness_factor 64 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor64.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 79, + "id": "7c881700", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0028\t0.0085\t0.0044\t0.0097\n", + "weight_shi:\t-0.0235\t0.0638\t0.0549\t0.0541\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5937648750746918\n", + "CNMC 2.0002 +- 0.1702 q0: 1.6494 q10: 1.8157 q20: 1.8583 q30: 1.8929 q40: 1.9291 q50: 1.9695 q60: 2.0094 q70: 2.0576 q80: 2.1345 q90: 2.2419 q100: 2.8225\n", + "one_class_1 1.9446 +- 0.1597 q0: 1.5613 q10: 1.7697 q20: 1.8122 q30: 1.8531 q40: 1.8816 q50: 1.9188 q60: 1.9583 q70: 2.0091 q80: 2.0737 q90: 2.1568 q100: 2.5480\n", + "[one_class_1 CSI 0.5938] [one_class_1 best 0.5938] \n", + "[one_class_mean CSI 0.5938] [one_class_mean best 0.5938] \n", + "0.5938\t0.5938\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharp : 32\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --sharpness_factor 32 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor32.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 80, + "id": "afaa2706", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0030\t0.0048\t0.0042\t0.0054\n", + "weight_shi:\t-0.0352\t0.0883\t0.0761\t0.0693\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5747906095868907\n", + "CNMC 2.0814 +- 0.1200 q0: 1.7198 q10: 1.9284 q20: 1.9790 q30: 2.0137 q40: 2.0462 q50: 2.0794 q60: 2.1084 q70: 2.1462 q80: 2.1929 q90: 2.2480 q100: 2.3614\n", + "one_class_1 2.0319 +- 0.1736 q0: 1.4826 q10: 1.8007 q20: 1.8786 q30: 1.9545 q40: 1.9968 q50: 2.0343 q60: 2.0956 q70: 2.1409 q80: 2.1917 q90: 2.2512 q100: 2.3875\n", + "[one_class_1 CSI 0.5748] [one_class_1 best 0.5748] \n", + "[one_class_mean CSI 0.5748] [one_class_mean best 0.5748] \n", + "0.5748\t0.5748\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharp : 16\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --sharpness_factor 16 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor16.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 81, + "id": "374eec9c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0040\t0.0039\t0.0044\t0.0039\n", + "weight_shi:\t-0.0360\t0.1191\t0.0847\t0.0773\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.47723417292052783\n", + "CNMC 2.1169 +- 0.2902 q0: 1.3060 q10: 1.7390 q20: 1.8614 q30: 1.9501 q40: 2.0312 q50: 2.1085 q60: 2.1828 q70: 2.2782 q80: 2.3917 q90: 2.5189 q100: 3.0332\n", + "one_class_1 2.1411 +- 0.3676 q0: 1.2368 q10: 1.6509 q20: 1.8257 q30: 1.9349 q40: 2.0555 q50: 2.1498 q60: 2.2467 q70: 2.3477 q80: 2.4742 q90: 2.6155 q100: 3.2105\n", + "[one_class_1 CSI 0.4772] [one_class_1 best 0.4772] \n", + "[one_class_mean CSI 0.4772] [one_class_mean best 0.4772] \n", + "0.4772\t0.4772\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharp : 8\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --sharpness_factor 8 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor8.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 82, + "id": "2b907319", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0120\t0.0137\t0.0113\t0.0151\n", + "weight_shi:\t-0.0230\t0.0744\t0.0702\t0.0797\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.7440512360870578\n", + "CNMC 2.0897 +- 0.1280 q0: 1.6279 q10: 1.9253 q20: 1.9937 q30: 2.0344 q40: 2.0646 q50: 2.0929 q60: 2.1235 q70: 2.1514 q80: 2.1925 q90: 2.2452 q100: 2.5109\n", + "one_class_1 1.9564 +- 0.1648 q0: 1.2763 q10: 1.7402 q20: 1.8197 q30: 1.8821 q40: 1.9240 q50: 1.9604 q60: 2.0013 q70: 2.0434 q80: 2.0908 q90: 2.1604 q100: 2.4535\n", + "[one_class_1 CSI 0.7441] [one_class_1 best 0.7441] \n", + "[one_class_mean CSI 0.7441] [one_class_mean best 0.7441] \n", + "0.7441\t0.7441\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharp : 5\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --sharpness_factor 5 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor5.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 83, + "id": "eadc9f63", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0044\t0.0050\t0.0036\t0.0045\n", + "weight_shi:\t-0.0174\t0.0520\t0.0435\t0.0457\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.6856960015799229\n", + "CNMC 2.0337 +- 0.0637 q0: 1.7790 q10: 1.9526 q20: 1.9802 q30: 2.0010 q40: 2.0182 q50: 2.0348 q60: 2.0490 q70: 2.0674 q80: 2.0882 q90: 2.1155 q100: 2.2475\n", + "one_class_1 1.9842 +- 0.0803 q0: 1.6728 q10: 1.8819 q20: 1.9195 q30: 1.9483 q40: 1.9679 q50: 1.9875 q60: 2.0072 q70: 2.0236 q80: 2.0479 q90: 2.0840 q100: 2.2335\n", + "[one_class_1 CSI 0.6857] [one_class_1 best 0.6857] \n", + "[one_class_mean CSI 0.6857] [one_class_mean best 0.6857] \n", + "0.6857\t0.6857\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharp : 4\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --sharpness_factor 4 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor4.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 84, + "id": "66a30bac", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0096\t0.0088\t0.0090\t0.0096\n", + "weight_shi:\t-0.0320\t0.1007\t0.1076\t0.0998\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.49014067389785193\n", + "CNMC 2.0877 +- 0.2810 q0: 1.3173 q10: 1.7265 q20: 1.8460 q30: 1.9306 q40: 2.0048 q50: 2.0768 q60: 2.1398 q70: 2.2252 q80: 2.3252 q90: 2.4627 q100: 3.0835\n", + "one_class_1 2.0957 +- 0.3295 q0: 1.1248 q10: 1.6700 q20: 1.8185 q30: 1.9197 q40: 2.0144 q50: 2.0849 q60: 2.1813 q70: 2.2611 q80: 2.3718 q90: 2.5219 q100: 3.0920\n", + "[one_class_1 CSI 0.4901] [one_class_1 best 0.4901] \n", + "[one_class_mean CSI 0.4901] [one_class_mean best 0.4901] \n", + "0.4901\t0.4901\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharp : 3\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --sharpness_factor 3 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor3.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 85, + "id": "e8fde266", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0051\t0.0046\t0.0048\t0.0045\n", + "weight_shi:\t-0.0137\t0.0407\t0.0450\t0.0411\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.4467181154356435\n", + "CNMC 2.0176 +- 0.0689 q0: 1.7903 q10: 1.9308 q20: 1.9620 q30: 1.9833 q40: 1.9998 q50: 2.0144 q60: 2.0329 q70: 2.0534 q80: 2.0752 q90: 2.1054 q100: 2.2461\n", + "one_class_1 2.0300 +- 0.0917 q0: 1.7417 q10: 1.9114 q20: 1.9580 q30: 1.9866 q40: 2.0089 q50: 2.0337 q60: 2.0591 q70: 2.0798 q80: 2.1052 q90: 2.1409 q100: 2.2672\n", + "[one_class_1 CSI 0.4467] [one_class_1 best 0.4467] \n", + "[one_class_mean CSI 0.4467] [one_class_mean best 0.4467] \n", + "0.4467\t0.4467\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# sharp : 2\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --sharpness_factor 2 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type sharp --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/sharp/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_sharp_resize_factor0.08_color_dist0.5_sharpness_factor2.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "markdown", + "id": "bac55a6b", + "metadata": {}, + "source": [ + "# Random Perspective" + ] + }, + { + "cell_type": "code", + "execution_count": 86, + "id": "acb8e0cf", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0046\t0.0037\t0.0045\t0.0046\n", + "weight_shi:\t0.1028\t-0.1896\t-0.2910\t-0.3483\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.6587695844600411\n", + "CNMC 2.0386 +- 0.2243 q0: 1.1465 q10: 1.7463 q20: 1.8820 q30: 1.9641 q40: 2.0161 q50: 2.0690 q60: 2.1137 q70: 2.1678 q80: 2.2188 q90: 2.2888 q100: 2.5369\n", + "one_class_1 1.8805 +- 0.3066 q0: 0.7440 q10: 1.4384 q20: 1.6744 q30: 1.7753 q40: 1.8640 q50: 1.9389 q60: 1.9992 q70: 2.0643 q80: 2.1327 q90: 2.2159 q100: 2.4966\n", + "[one_class_1 CSI 0.6588] [one_class_1 best 0.6588] \n", + "[one_class_mean CSI 0.6588] [one_class_mean best 0.6588] \n", + "0.6588\t0.6588\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : randpers\n", + "# crop : 0.08\n", + "# randper_dist: 0.95\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --distortion_scale 0.95 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type randpers --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/randpers/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_randpers_resize_factor0.08_color_dist0.5_distortion_scale0.95_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 87, + "id": "38406c45", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0036\t0.0043\t0.0044\t0.0045\n", + "weight_shi:\t0.0940\t-0.3354\t-0.3010\t-0.4613\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.6866549691611218\n", + "CNMC 2.0701 +- 0.1458 q0: 1.3353 q10: 1.9029 q20: 1.9897 q30: 2.0319 q40: 2.0558 q50: 2.0795 q60: 2.1028 q70: 2.1319 q80: 2.1715 q90: 2.2439 q100: 2.4895\n", + "one_class_1 1.9579 +- 0.2070 q0: 0.9880 q10: 1.6843 q20: 1.8208 q30: 1.8972 q40: 1.9508 q50: 1.9923 q60: 2.0252 q70: 2.0622 q80: 2.0986 q90: 2.1886 q100: 2.4493\n", + "[one_class_1 CSI 0.6867] [one_class_1 best 0.6867] \n", + "[one_class_mean CSI 0.6867] [one_class_mean best 0.6867] \n", + "0.6867\t0.6867\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : randpers\n", + "# crop : 0.08\n", + "# randper_dist: 0.9\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --distortion_scale 0.9 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type randpers --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/randpers/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_randpers_resize_factor0.08_color_dist0.5_distortion_scale0.9_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 88, + "id": "79e43776", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0058\t0.0098\t0.0051\t0.0075\n", + "weight_shi:\t0.7573\t0.7158\t-0.4403\t3.1769\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.6343618023273478\n", + "CNMC 2.2964 +- 0.4564 q0: 0.6867 q10: 1.7046 q20: 1.9085 q30: 2.0619 q40: 2.1922 q50: 2.3256 q60: 2.4368 q70: 2.5468 q80: 2.6813 q90: 2.8610 q100: 3.7183\n", + "one_class_1 1.9670 +- 0.7117 q0: -1.6022 q10: 1.0639 q20: 1.4591 q30: 1.7160 q40: 1.8807 q50: 2.0547 q60: 2.2023 q70: 2.3776 q80: 2.5617 q90: 2.7902 q100: 3.2754\n", + "[one_class_1 CSI 0.6344] [one_class_1 best 0.6344] \n", + "[one_class_mean CSI 0.6344] [one_class_mean best 0.6344] \n", + "0.6344\t0.6344\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : randpers\n", + "# crop : 0.08\n", + "# randper_dist: 0.85\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --distortion_scale 0.85 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type randpers --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/randpers/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_randpers_resize_factor0.08_color_dist0.5_distortion_scale0.85_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 89, + "id": "b5045a90", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0044\t0.0059\t0.0035\t0.0046\n", + "weight_shi:\t0.1149\t-0.5921\t-0.2913\t-0.4212\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.6980790265244736\n", + "CNMC 2.0586 +- 0.0926 q0: 1.6907 q10: 1.9416 q20: 1.9891 q30: 2.0146 q40: 2.0367 q50: 2.0567 q60: 2.0821 q70: 2.1041 q80: 2.1311 q90: 2.1727 q100: 2.4236\n", + "one_class_1 1.9890 +- 0.1129 q0: 1.6747 q10: 1.8586 q20: 1.8947 q30: 1.9326 q40: 1.9579 q50: 1.9848 q60: 2.0103 q70: 2.0342 q80: 2.0767 q90: 2.1413 q100: 2.4328\n", + "[one_class_1 CSI 0.6981] [one_class_1 best 0.6981] \n", + "[one_class_mean CSI 0.6981] [one_class_mean best 0.6981] \n", + "0.6981\t0.6981\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : randpers\n", + "# crop : 0.08\n", + "# randper_dist: 0.8\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --distortion_scale 0.8 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type randpers --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/randpers/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_randpers_resize_factor0.08_color_dist0.5_distortion_scale0.8_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 90, + "id": "5d4659ac", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0020\t0.0024\t0.0019\t0.0028\n", + "weight_shi:\t0.0839\t-0.1992\t-0.1714\t-0.2720\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.7161709152411915\n", + "CNMC 2.0766 +- 0.1562 q0: 1.4380 q10: 1.8760 q20: 1.9670 q30: 2.0275 q40: 2.0724 q50: 2.1054 q60: 2.1342 q70: 2.1645 q80: 2.2012 q90: 2.2452 q100: 2.4108\n", + "one_class_1 1.9367 +- 0.2053 q0: 1.1357 q10: 1.6437 q20: 1.7718 q30: 1.8741 q40: 1.9299 q50: 1.9756 q60: 2.0202 q70: 2.0576 q80: 2.1027 q90: 2.1710 q100: 2.3247\n", + "[one_class_1 CSI 0.7162] [one_class_1 best 0.7162] \n", + "[one_class_mean CSI 0.7162] [one_class_mean best 0.7162] \n", + "0.7162\t0.7162\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : randpers\n", + "# crop : 0.08\n", + "# randper_dist: 0.75\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --distortion_scale 0.75 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type randpers --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/randpers/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_randpers_resize_factor0.08_color_dist0.5_distortion_scale0.75_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 91, + "id": "43c01d76", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0049\t0.0060\t0.0045\t0.0072\n", + "weight_shi:\t-1.4937\t0.4193\t-0.5923\t0.9519\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.272612645459241\n", + "CNMC 1.3428 +- 1.3765 q0: -2.6822 q10: -0.1317 q20: 0.2430 q30: 0.5766 q40: 0.8777 q50: 1.1651 q60: 1.4791 q70: 1.7858 q80: 2.3163 q90: 3.1566 q100: 7.5281\n", + "one_class_1 2.6219 +- 1.7311 q0: -1.5026 q10: 0.6189 q20: 1.1608 q30: 1.6420 q40: 2.0065 q50: 2.4546 q60: 2.8590 q70: 3.3026 q80: 3.9411 q90: 5.0125 q100: 8.8420\n", + "[one_class_1 CSI 0.2726] [one_class_1 best 0.2726] \n", + "[one_class_mean CSI 0.2726] [one_class_mean best 0.2726] \n", + "0.2726\t0.2726\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : randpers\n", + "# crop : 0.08\n", + "# randper_dist: 0.6\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --distortion_scale 0.6 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type randpers --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/randpers/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_randpers_resize_factor0.08_color_dist0.5_distortion_scale0.6_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 92, + "id": "b3c2bb68", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0040\t0.0080\t0.0069\t0.0094\n", + "weight_shi:\t0.2243\t2.4831\t-1.1810\t8.3228\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.28934109115952156\n", + "CNMC 1.5171 +- 0.6417 q0: -1.0892 q10: 0.6963 q20: 1.0163 q30: 1.2147 q40: 1.3800 q50: 1.5239 q60: 1.6610 q70: 1.8232 q80: 2.0413 q90: 2.3147 q100: 3.5528\n", + "one_class_1 2.0698 +- 0.8043 q0: -0.3686 q10: 0.9775 q20: 1.4094 q30: 1.6626 q40: 1.9406 q50: 2.1430 q60: 2.3291 q70: 2.4725 q80: 2.7776 q90: 3.0563 q100: 4.2509\n", + "[one_class_1 CSI 0.2893] [one_class_1 best 0.2893] \n", + "[one_class_mean CSI 0.2893] [one_class_mean best 0.2893] \n", + "0.2893\t0.2893\n" + ] + } + ], + "source": [ + "###### EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : randpers\n", + "# crop : 0.08\n", + "# randper_dist: 0.3\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --distortion_scale 0.3 --color_distort 0.5 --resize_factor 0.08 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type randpers --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/randpers/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_randpers_resize_factor0.08_color_dist0.5_distortion_scale0.3_one_class_0/last.model\"" + ] + }, + { + "cell_type": "markdown", + "id": "5cfed222", + "metadata": {}, + "source": [ + "# Color Distortion = 0.8" + ] + }, + { + "cell_type": "markdown", + "id": "009f41d0", + "metadata": {}, + "source": [ + "## Examine crop" + ] + }, + { + "cell_type": "code", + "execution_count": 196, + "id": "0c216c1d", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0109\t0.0127\t0.0131\t0.0112\n", + "weight_shi:\t-0.3601\t0.8696\t0.8266\t1.2633\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.3773116499053059\n", + "CNMC 1.9315 +- 0.0895 q0: 1.6857 q10: 1.8069 q20: 1.8435 q30: 1.8794 q40: 1.9068 q50: 1.9359 q60: 1.9582 q70: 1.9849 q80: 2.0138 q90: 2.0465 q100: 2.2963\n", + "one_class_1 1.9880 +- 0.1336 q0: 1.7198 q10: 1.8075 q20: 1.8598 q30: 1.9006 q40: 1.9419 q50: 1.9827 q60: 2.0260 q70: 2.0729 q80: 2.1099 q90: 2.1645 q100: 2.4634\n", + "[one_class_1 CSI 0.3773] [one_class_1 best 0.3773] \n", + "[one_class_mean CSI 0.3773] [one_class_mean best 0.3773] \n", + "0.3773\t0.3773\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.5\n", + "# blur_sigma : 2\n", + "# color_dist : 0.8\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.8 --resize_factor 0.5 --blur_sigma 2 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.8/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.5_color_dist0.8_blur_sigma2.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 197, + "id": "6320eef5", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0076\t0.0076\t0.0074\t0.0074\n", + "weight_shi:\t0.9058\t0.5362\t0.6368\t-14.1887\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5917127477491163\n", + "CNMC 2.8214 +- 1.0315 q0: -1.2486 q10: 1.3987 q20: 1.9532 q30: 2.3459 q40: 2.7058 q50: 2.9513 q60: 3.1875 q70: 3.4447 q80: 3.7111 q90: 4.0288 q100: 5.9040\n", + "one_class_1 2.3812 +- 1.3314 q0: -2.1268 q10: 0.5397 q20: 1.2181 q30: 1.7456 q40: 2.2246 q50: 2.5793 q60: 2.9176 q70: 3.1949 q80: 3.5267 q90: 3.9124 q100: 4.9106\n", + "[one_class_1 CSI 0.5917] [one_class_1 best 0.5917] \n", + "[one_class_mean CSI 0.5917] [one_class_mean best 0.5917] \n", + "0.5917\t0.5917\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.3\n", + "# blur_sigma : 2\n", + "# color_dist : 0.8\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.8 --resize_factor 0.3 --blur_sigma 2 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.8/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.3_color_dist0.8_blur_sigma2.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 198, + "id": "451c90e5", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0110\t0.0071\t0.0102\t0.0101\n", + "weight_shi:\t-0.2335\t0.3455\t0.5920\t0.5756\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.4148462107171432\n", + "CNMC 1.8587 +- 0.2019 q0: 1.3001 q10: 1.5984 q20: 1.6837 q30: 1.7477 q40: 1.8032 q50: 1.8568 q60: 1.9066 q70: 1.9618 q80: 2.0281 q90: 2.1033 q100: 2.4803\n", + "one_class_1 1.9549 +- 0.3011 q0: 1.3374 q10: 1.5829 q20: 1.6833 q30: 1.7648 q40: 1.8483 q50: 1.9432 q60: 2.0105 q70: 2.0860 q80: 2.2017 q90: 2.3676 q100: 3.0008\n", + "[one_class_1 CSI 0.4148] [one_class_1 best 0.4148] \n", + "[one_class_mean CSI 0.4148] [one_class_mean best 0.4148] \n", + "0.4148\t0.4148\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.02\n", + "# blur_sigma : 2\n", + "# color_dist : 0.8\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.8 --resize_factor 0.02 --blur_sigma 2 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.8/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.02_color_dist0.8_blur_sigma2.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 199, + "id": "54fef60e", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0055\t0.0047\t0.0063\t0.0070\n", + "weight_shi:\t-1.5156\t2.2142\t13.3925\t216.9532\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.3877887663435927\n", + "CNMC -9.2248 +- 14.9978 q0: -31.8010 q10: -22.8496 q20: -20.5631 q30: -18.8369 q40: -16.1600 q50: -13.7478 q60: -10.1906 q70: -5.6572 q80: 0.0581 q90: 9.1230 q100: 77.4578\n", + "one_class_1 0.7817 +- 24.0001 q0: -33.6751 q10: -22.8505 q20: -20.2689 q30: -16.3248 q40: -11.6706 q50: -5.3667 q60: 0.3825 q70: 6.7728 q80: 17.5805 q90: 39.7293 q100: 83.7649\n", + "[one_class_1 CSI 0.3878] [one_class_1 best 0.3878] \n", + "[one_class_mean CSI 0.3878] [one_class_mean best 0.3878] \n", + "0.3878\t0.3878\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.008\n", + "# blur_sigma : 2\n", + "# color_dist : 0.8\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.8 --resize_factor 0.008 --blur_sigma 2 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.8/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.008_color_dist0.8_blur_sigma2.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "markdown", + "id": "2dccb685", + "metadata": {}, + "source": [ + "## Examine blur_sigma" + ] + }, + { + "cell_type": "code", + "execution_count": 200, + "id": "0c13892c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0059\t0.0053\t0.0052\t0.0054\n", + "weight_shi:\t-0.0908\t0.2339\t0.2553\t0.2459\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.46215401209248624\n", + "CNMC 1.9646 +- 0.0814 q0: 1.7239 q10: 1.8537 q20: 1.8937 q30: 1.9247 q40: 1.9510 q50: 1.9695 q60: 1.9918 q70: 2.0122 q80: 2.0334 q90: 2.0642 q100: 2.1895\n", + "one_class_1 1.9790 +- 0.1048 q0: 1.6906 q10: 1.8438 q20: 1.8841 q30: 1.9178 q40: 1.9505 q50: 1.9783 q60: 2.0103 q70: 2.0393 q80: 2.0700 q90: 2.1155 q100: 2.2617\n", + "[one_class_1 CSI 0.4622] [one_class_1 best 0.4622] \n", + "[one_class_mean CSI 0.4622] [one_class_mean best 0.4622] \n", + "0.4622\t0.4622\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 40\n", + "# color_dist : 0.8 \n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.8 --resize_factor 0.08 --blur_sigma 40 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.8/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.8_blur_sigma40.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 201, + "id": "7b24db11", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0172\t0.0135\t0.0226\t0.0192\n", + "weight_shi:\t-0.0741\t0.1495\t0.1978\t0.1718\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.4793875773503884\n", + "CNMC 1.9531 +- 0.1138 q0: 1.6474 q10: 1.8092 q20: 1.8542 q30: 1.8878 q40: 1.9235 q50: 1.9532 q60: 1.9802 q70: 2.0102 q80: 2.0447 q90: 2.0976 q100: 2.3859\n", + "one_class_1 1.9692 +- 0.1523 q0: 1.6030 q10: 1.7796 q20: 1.8318 q30: 1.8785 q40: 1.9130 q50: 1.9548 q60: 1.9983 q70: 2.0455 q80: 2.0996 q90: 2.1848 q100: 2.3561\n", + "[one_class_1 CSI 0.4794] [one_class_1 best 0.4794] \n", + "[one_class_mean CSI 0.4794] [one_class_mean best 0.4794] \n", + "0.4794\t0.4794\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 20\n", + "# color_dist : 0.8 \n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.8 --resize_factor 0.08 --blur_sigma 20 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.8/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.8_blur_sigma20.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 202, + "id": "352c0a41", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0025\t0.0040\t0.0022\t0.0025\n", + "weight_shi:\t1.2410\t0.6755\t-1.1582\t-3.5877\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.6594658645520007\n", + "CNMC 2.1628 +- 0.3004 q0: 0.7838 q10: 1.7714 q20: 1.9641 q30: 2.0468 q40: 2.1213 q50: 2.1830 q60: 2.2509 q70: 2.3213 q80: 2.4001 q90: 2.5014 q100: 3.3713\n", + "one_class_1 1.9514 +- 0.4284 q0: -0.1104 q10: 1.4546 q20: 1.6765 q30: 1.7941 q40: 1.8892 q50: 2.0071 q60: 2.0917 q70: 2.1785 q80: 2.2711 q90: 2.4356 q100: 2.9090\n", + "[one_class_1 CSI 0.6595] [one_class_1 best 0.6595] \n", + "[one_class_mean CSI 0.6595] [one_class_mean best 0.6595] \n", + "0.6595\t0.6595\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 6\n", + "# color_dist : 0.8 \n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.8 --resize_factor 0.08 --blur_sigma 6 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.8/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.8_blur_sigma6.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 203, + "id": "d22c485a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0050\t0.0078\t0.0052\t0.0062\n", + "weight_shi:\t0.4106\t0.4163\t-2.7425\t-3.5688\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.6272255643666637\n", + "CNMC 2.1200 +- 0.6206 q0: -0.0284 q10: 1.2949 q20: 1.5957 q30: 1.8155 q40: 2.0312 q50: 2.1755 q60: 2.3270 q70: 2.4960 q80: 2.6611 q90: 2.8534 q100: 4.1276\n", + "one_class_1 1.8269 +- 0.6667 q0: -0.9521 q10: 0.9740 q20: 1.3397 q30: 1.5917 q40: 1.7532 q50: 1.8870 q60: 2.0460 q70: 2.2002 q80: 2.3477 q90: 2.5711 q100: 3.5803\n", + "[one_class_1 CSI 0.6272] [one_class_1 best 0.6272] \n", + "[one_class_mean CSI 0.6272] [one_class_mean best 0.6272] \n", + "0.6272\t0.6272\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 4\n", + "# color_dist : 0.8 \n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.8 --resize_factor 0.08 --blur_sigma 4 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.8/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.8_blur_sigma4.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 204, + "id": "00a8d2ac", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0047\t0.0072\t0.0056\t0.0047\n", + "weight_shi:\t0.3757\t1.6655\t5.1831\t-1.0361\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.49718323053707253\n", + "CNMC 1.9914 +- 0.1852 q0: 1.3248 q10: 1.7612 q20: 1.8463 q30: 1.9021 q40: 1.9490 q50: 1.9936 q60: 2.0285 q70: 2.0851 q80: 2.1355 q90: 2.2233 q100: 2.6945\n", + "one_class_1 1.9965 +- 0.2132 q0: 1.1855 q10: 1.7487 q20: 1.8267 q30: 1.8843 q40: 1.9394 q50: 1.9881 q60: 2.0429 q70: 2.1000 q80: 2.1632 q90: 2.2481 q100: 2.8524\n", + "[one_class_1 CSI 0.4972] [one_class_1 best 0.4972] \n", + "[one_class_mean CSI 0.4972] [one_class_mean best 0.4972] \n", + "0.4972\t0.4972\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 3\n", + "# color_dist : 0.8 \n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.8 --resize_factor 0.08 --blur_sigma 3 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.8/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.8_blur_sigma3.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 205, + "id": "cdab5a91", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0019\t0.0026\t0.0019\t0.0022\n", + "weight_shi:\t0.2520\t-1.0379\t-0.8245\t-0.8299\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.7393317230273752\n", + "CNMC 2.1093 +- 0.1696 q0: 1.2392 q10: 1.9016 q20: 2.0068 q30: 2.0585 q40: 2.1066 q50: 2.1333 q60: 2.1667 q70: 2.1958 q80: 2.2352 q90: 2.2885 q100: 2.5315\n", + "one_class_1 1.9282 +- 0.2660 q0: 0.4865 q10: 1.6153 q20: 1.7843 q30: 1.8731 q40: 1.9295 q50: 1.9714 q60: 2.0108 q70: 2.0668 q80: 2.1224 q90: 2.2106 q100: 2.5011\n", + "[one_class_1 CSI 0.7393] [one_class_1 best 0.7393] \n", + "[one_class_mean CSI 0.7393] [one_class_mean best 0.7393] \n", + "0.7393\t0.7393\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 2\n", + "# color_dist : 0.8 \n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.8 --resize_factor 0.08 --blur_sigma 2 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.8/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.8_blur_sigma2.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 206, + "id": "76bdab2e", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0044\t0.0059\t0.0046\t0.0046\n", + "weight_shi:\t0.2676\t-0.5492\t-0.7697\t-0.6319\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.6709070124267007\n", + "CNMC 2.0490 +- 0.0659 q0: 1.6783 q10: 1.9728 q20: 1.9995 q30: 2.0151 q40: 2.0347 q50: 2.0501 q60: 2.0654 q70: 2.0818 q80: 2.1012 q90: 2.1296 q100: 2.2543\n", + "one_class_1 1.9948 +- 0.1054 q0: 1.5066 q10: 1.8893 q20: 1.9323 q30: 1.9563 q40: 1.9732 q50: 1.9993 q60: 2.0219 q70: 2.0484 q80: 2.0819 q90: 2.1226 q100: 2.2211\n", + "[one_class_1 CSI 0.6709] [one_class_1 best 0.6709] \n", + "[one_class_mean CSI 0.6709] [one_class_mean best 0.6709] \n", + "0.6709\t0.6709\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 1.5\n", + "# color_dist : 0.8 \n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.8 --resize_factor 0.08 --blur_sigma 1.5 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.8/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.8_blur_sigma1.5_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 207, + "id": "0c1efb9f", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0058\t0.0159\t0.0080\t0.0086\n", + "weight_shi:\t0.5438\t-2.8363\t-21.1928\t-1.9421\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.6621902186572681\n", + "CNMC 2.2408 +- 0.8449 q0: -0.8843 q10: 1.1519 q20: 1.6038 q30: 1.9076 q40: 2.1231 q50: 2.3416 q60: 2.5656 q70: 2.7323 q80: 2.9607 q90: 3.2092 q100: 3.9264\n", + "one_class_1 1.6402 +- 1.1251 q0: -2.9414 q10: 0.0207 q20: 0.8939 q30: 1.3058 q40: 1.6627 q50: 1.9102 q60: 2.1044 q70: 2.2975 q80: 2.5539 q90: 2.8038 q100: 3.7386\n", + "[one_class_1 CSI 0.6622] [one_class_1 best 0.6622] \n", + "[one_class_mean CSI 0.6622] [one_class_mean best 0.6622] \n", + "0.6622\t0.6622\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 1\n", + "# color_dist : 0.8 \n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.8 --resize_factor 0.08 --blur_sigma 1 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.8/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.8_blur_sigma1.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "markdown", + "id": "f676267b", + "metadata": {}, + "source": [ + "# Color Distortion = 1" + ] + }, + { + "cell_type": "markdown", + "id": "744297b9", + "metadata": {}, + "source": [ + "## Examine crop" + ] + }, + { + "cell_type": "code", + "execution_count": 208, + "id": "21a87be2", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0061\t0.0065\t0.0065\t0.0056\n", + "weight_shi:\t1.6932\t-31.1268\t15.0080\t-10.2414\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.7102132895816242\n", + "CNMC 2.2522 +- 0.5226 q0: -0.3755 q10: 1.6169 q20: 1.9232 q30: 2.1160 q40: 2.2220 q50: 2.3245 q60: 2.4225 q70: 2.5144 q80: 2.6312 q90: 2.8060 q100: 3.9139\n", + "one_class_1 1.8127 +- 0.7110 q0: -1.7832 q10: 0.9329 q20: 1.3309 q30: 1.6150 q40: 1.7793 q50: 1.9225 q60: 2.0429 q70: 2.1887 q80: 2.3378 q90: 2.5668 q100: 3.4155\n", + "[one_class_1 CSI 0.7102] [one_class_1 best 0.7102] \n", + "[one_class_mean CSI 0.7102] [one_class_mean best 0.7102] \n", + "0.7102\t0.7102\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.5\n", + "# blur_sigma : 2\n", + "# color_dist : 1\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 1 --resize_factor 0.5 --blur_sigma 2 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist1.0/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_2.0_resize_factor_0.5_color_dist1.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 209, + "id": "8dd1d6d5", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0092\t0.0099\t0.0099\t0.0096\n", + "weight_shi:\t0.5734\t-1.4904\t-1.4266\t-2.6760\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5938636202513699\n", + "CNMC 2.0102 +- 0.1072 q0: 1.4470 q10: 1.8844 q20: 1.9436 q30: 1.9761 q40: 2.0084 q50: 2.0343 q60: 2.0578 q70: 2.0757 q80: 2.0944 q90: 2.1139 q100: 2.2014\n", + "one_class_1 1.9687 +- 0.1365 q0: 1.2909 q10: 1.8035 q20: 1.8848 q30: 1.9370 q40: 1.9730 q50: 2.0035 q60: 2.0287 q70: 2.0532 q80: 2.0725 q90: 2.0980 q100: 2.1942\n", + "[one_class_1 CSI 0.5939] [one_class_1 best 0.5939] \n", + "[one_class_mean CSI 0.5939] [one_class_mean best 0.5939] \n", + "0.5939\t0.5939\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.3\n", + "# blur_sigma : 2\n", + "# color_dist : 1\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 1 --resize_factor 0.3 --blur_sigma 2 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist1.0/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_2.0_resize_factor_0.3_color_dist1.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 210, + "id": "80437a6c", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0086\t0.0090\t0.0096\t0.0084\n", + "weight_shi:\t-0.6178\t0.6564\t1.4537\t1.9758\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.40624398667193307\n", + "CNMC 1.9234 +- 0.1679 q0: 1.2990 q10: 1.6940 q20: 1.7902 q30: 1.8538 q40: 1.9027 q50: 1.9438 q60: 1.9742 q70: 2.0088 q80: 2.0543 q90: 2.1136 q100: 2.6046\n", + "one_class_1 1.9913 +- 0.2119 q0: 1.3411 q10: 1.7247 q20: 1.8161 q30: 1.8739 q40: 1.9369 q50: 1.9987 q60: 2.0415 q70: 2.0979 q80: 2.1553 q90: 2.2420 q100: 2.6629\n", + "[one_class_1 CSI 0.4062] [one_class_1 best 0.4062] \n", + "[one_class_mean CSI 0.4062] [one_class_mean best 0.4062] \n", + "0.4062\t0.4062\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.02\n", + "# blur_sigma : 2\n", + "# color_dist : 1\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 1 --resize_factor 0.02 --blur_sigma 2 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist1.0/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_2.0_resize_factor_0.02_color_dist1.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 211, + "id": "5ee4b03d", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0077\t0.0063\t0.0079\t0.0085\n", + "weight_shi:\t-0.5622\t1.4395\t2.1736\t5.1802\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.40242330791278014\n", + "CNMC 1.7715 +- 0.3123 q0: 1.0132 q10: 1.4352 q20: 1.5132 q30: 1.5847 q40: 1.6416 q50: 1.7219 q60: 1.7995 q70: 1.8923 q80: 2.0104 q90: 2.1944 q100: 3.1272\n", + "one_class_1 1.9377 +- 0.4535 q0: 1.0669 q10: 1.4215 q20: 1.5153 q30: 1.6260 q40: 1.7391 q50: 1.8745 q60: 1.9976 q70: 2.1337 q80: 2.2999 q90: 2.5968 q100: 3.2364\n", + "[one_class_1 CSI 0.4024] [one_class_1 best 0.4024] \n", + "[one_class_mean CSI 0.4024] [one_class_mean best 0.4024] \n", + "0.4024\t0.4024\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.008\n", + "# blur_sigma : 2\n", + "# color_dist : 1\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 1 --resize_factor 0.008 --blur_sigma 2 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist1.0/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_2.0_resize_factor_0.008_color_dist1.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "markdown", + "id": "3993fc92", + "metadata": {}, + "source": [ + "## Examine blur_sigma" + ] + }, + { + "cell_type": "code", + "execution_count": 212, + "id": "d11c9dcd", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0118\t0.0082\t0.0118\t0.0109\n", + "weight_shi:\t-0.5332\t0.3382\t1.2635\t1.1178\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.513900282563121\n", + "CNMC 1.8224 +- 0.5573 q0: 0.4695 q10: 1.0940 q20: 1.3195 q30: 1.5008 q40: 1.6572 q50: 1.8032 q60: 1.9693 q70: 2.1209 q80: 2.3099 q90: 2.5687 q100: 3.4860\n", + "one_class_1 1.8135 +- 0.7140 q0: 0.2666 q10: 0.8849 q20: 1.1651 q30: 1.3949 q40: 1.5485 q50: 1.7703 q60: 1.9382 q70: 2.1803 q80: 2.4559 q90: 2.7728 q100: 4.0059\n", + "[one_class_1 CSI 0.5139] [one_class_1 best 0.5139] \n", + "[one_class_mean CSI 0.5139] [one_class_mean best 0.5139] \n", + "0.5139\t0.5139\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 40\n", + "# color_dist : 1\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 1 --resize_factor 0.08 --blur_sigma 40 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist1.0/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_40.0_resize_factor_0.08_color_dist1.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 213, + "id": "b5ffde5e", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0083\t0.0108\t0.0081\t0.0091\n", + "weight_shi:\t-0.0827\t0.1462\t0.2242\t0.2133\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5305926482950001\n", + "CNMC 1.9788 +- 0.0509 q0: 1.8514 q10: 1.9121 q20: 1.9387 q30: 1.9537 q40: 1.9657 q50: 1.9801 q60: 1.9904 q70: 2.0027 q80: 2.0193 q90: 2.0412 q100: 2.1780\n", + "one_class_1 1.9783 +- 0.0729 q0: 1.8180 q10: 1.8943 q20: 1.9205 q30: 1.9377 q40: 1.9541 q50: 1.9673 q60: 1.9856 q70: 2.0070 q80: 2.0322 q90: 2.0712 q100: 2.2575\n", + "[one_class_1 CSI 0.5306] [one_class_1 best 0.5306] \n", + "[one_class_mean CSI 0.5306] [one_class_mean best 0.5306] \n", + "0.5306\t0.5306\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 20\n", + "# color_dist : 1\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 1 --resize_factor 0.08 --blur_sigma 20 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist1.0/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_20.0_resize_factor_0.08_color_dist1.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 214, + "id": "46c0a5be", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0121\t0.0090\t0.0108\t0.0115\n", + "weight_shi:\t-0.1191\t0.1866\t0.3505\t0.3025\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5126887552031113\n", + "CNMC 1.9350 +- 0.1456 q0: 1.5801 q10: 1.7447 q20: 1.8072 q30: 1.8557 q40: 1.8955 q50: 1.9330 q60: 1.9678 q70: 2.0099 q80: 2.0570 q90: 2.1285 q100: 2.4351\n", + "one_class_1 1.9410 +- 0.1925 q0: 1.5534 q10: 1.7034 q20: 1.7718 q30: 1.8241 q40: 1.8672 q50: 1.9158 q60: 1.9602 q70: 2.0200 q80: 2.1069 q90: 2.2114 q100: 2.5473\n", + "[one_class_1 CSI 0.5127] [one_class_1 best 0.5127] \n", + "[one_class_mean CSI 0.5127] [one_class_mean best 0.5127] \n", + "0.5127\t0.5127\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 6\n", + "# color_dist : 1\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 1 --resize_factor 0.08 --blur_sigma 6 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist1.0/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_6.0_resize_factor_0.08_color_dist1.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 215, + "id": "9c074889", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0091\t0.0056\t0.0092\t0.0077\n", + "weight_shi:\t1.7473\t1.4099\t-3.2623\t7.2654\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.3547040683012791\n", + "CNMC 1.6471 +- 0.5245 q0: 0.2048 q10: 0.9772 q20: 1.1761 q30: 1.3840 q40: 1.5410 q50: 1.6358 q60: 1.7534 q70: 1.8764 q80: 2.0524 q90: 2.3159 q100: 3.4153\n", + "one_class_1 2.0065 +- 0.7329 q0: 0.2362 q10: 1.0671 q20: 1.3288 q30: 1.5682 q40: 1.7685 q50: 1.9697 q60: 2.1593 q70: 2.3883 q80: 2.6860 q90: 3.0258 q100: 4.4012\n", + "[one_class_1 CSI 0.3547] [one_class_1 best 0.3547] \n", + "[one_class_mean CSI 0.3547] [one_class_mean best 0.3547] \n", + "0.3547\t0.3547\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 4\n", + "# color_dist : 1\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 1 --resize_factor 0.08 --blur_sigma 4 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist1.0/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist1.0_blur_sigma4.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 216, + "id": "99c14a28", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0033\t0.0033\t0.0025\t0.0033\n", + "weight_shi:\t0.2828\t-1.2986\t-0.7648\t-1.3398\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5759223812272759\n", + "CNMC 1.9848 +- 0.2270 q0: 1.1862 q10: 1.7005 q20: 1.7975 q30: 1.8756 q40: 1.9360 q50: 1.9962 q60: 2.0534 q70: 2.1141 q80: 2.1839 q90: 2.2699 q100: 2.5657\n", + "one_class_1 1.9048 +- 0.2961 q0: 0.9850 q10: 1.4973 q20: 1.6832 q30: 1.7788 q40: 1.8554 q50: 1.9257 q60: 1.9946 q70: 2.0781 q80: 2.1586 q90: 2.2805 q100: 2.6712\n", + "[one_class_1 CSI 0.5759] [one_class_1 best 0.5759] \n", + "[one_class_mean CSI 0.5759] [one_class_mean best 0.5759] \n", + "0.5759\t0.5759\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 3\n", + "# color_dist : 1\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 1 --resize_factor 0.08 --blur_sigma 3 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist1.0/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist1.0_blur_sigma3.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 221, + "id": "bd3e218a", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0050\t0.0049\t0.0049\t0.0050\n", + "weight_shi:\t0.3094\t-1.0241\t-0.9471\t-0.9535\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5930793556750625\n", + "CNMC 2.0015 +- 0.1074 q0: 1.5164 q10: 1.8705 q20: 1.9260 q30: 1.9607 q40: 1.9870 q50: 2.0109 q60: 2.0342 q70: 2.0588 q80: 2.0868 q90: 2.1264 q100: 2.2720\n", + "one_class_1 1.9465 +- 0.1678 q0: 1.2629 q10: 1.7271 q20: 1.8484 q30: 1.9020 q40: 1.9374 q50: 1.9751 q60: 2.0018 q70: 2.0397 q80: 2.0761 q90: 2.1296 q100: 2.2873\n", + "[one_class_1 CSI 0.5931] [one_class_1 best 0.5931] \n", + "[one_class_mean CSI 0.5931] [one_class_mean best 0.5931] \n", + "0.5931\t0.5931\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 2\n", + "# color_dist : 1\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 1 --resize_factor 0.08 --blur_sigma 2 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist1.0/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_2.0_resize_factor_0.08_color_dist1.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 218, + "id": "c2f0113b", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0151\t0.0114\t0.0140\t0.0143\n", + "weight_shi:\t0.3904\t-1.7955\t-0.8990\t-1.3060\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.6319476093539534\n", + "CNMC 2.0984 +- 0.1571 q0: 1.7583 q10: 1.9064 q20: 1.9536 q30: 2.0042 q40: 2.0398 q50: 2.0803 q60: 2.1205 q70: 2.1755 q80: 2.2444 q90: 2.3124 q100: 2.6504\n", + "one_class_1 2.0194 +- 0.1919 q0: 1.5830 q10: 1.7904 q20: 1.8384 q30: 1.8841 q40: 1.9493 q50: 1.9958 q60: 2.0566 q70: 2.1204 q80: 2.1982 q90: 2.2910 q100: 2.6254\n", + "[one_class_1 CSI 0.6319] [one_class_1 best 0.6319] \n", + "[one_class_mean CSI 0.6319] [one_class_mean best 0.6319] \n", + "0.6319\t0.6319\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 1.5\n", + "# color_dist : 1\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 1 --resize_factor 0.08 --blur_sigma 1.5 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist1.0/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist1.0_blur_sigma1.5_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 219, + "id": "1a64397f", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0835\t0.0834\t0.0843\t0.0839\n", + "weight_shi:\t0.6194\t-4.8322\t-1.4623\t-2.0319\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.5156447806844306\n", + "CNMC 2.0671 +- 0.1604 q0: 1.6242 q10: 1.8621 q20: 1.9293 q30: 1.9753 q40: 2.0244 q50: 2.0641 q60: 2.0983 q70: 2.1559 q80: 2.2074 q90: 2.2798 q100: 2.5007\n", + "one_class_1 2.0520 +- 0.2129 q0: 1.5274 q10: 1.7652 q20: 1.8656 q30: 1.9325 q40: 1.9996 q50: 2.0618 q60: 2.1083 q70: 2.1784 q80: 2.2379 q90: 2.3346 q100: 2.6253\n", + "[one_class_1 CSI 0.5156] [one_class_1 best 0.5156] \n", + "[one_class_mean CSI 0.5156] [one_class_mean best 0.5156] \n", + "0.5156\t0.5156\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 1\n", + "# color_dist : 1\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 1 --resize_factor 0.08 --blur_sigma 1 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist1.0/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist1.0_blur_sigma1.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "markdown", + "id": "c1bce058", + "metadata": {}, + "source": [ + "# Color Distortion = 0.5" + ] + }, + { + "cell_type": "markdown", + "id": "65e662af", + "metadata": {}, + "source": [ + "## Examine crop" + ] + }, + { + "cell_type": "code", + "execution_count": 184, + "id": "fdaec3de", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0077\t0.0072\t0.0077\t0.0083\n", + "weight_shi:\t-0.2495\t0.5029\t0.4407\t0.6284\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.4545575962892069\n", + "CNMC 1.9531 +- 0.0857 q0: 1.5656 q10: 1.8568 q20: 1.8889 q30: 1.9090 q40: 1.9290 q50: 1.9470 q60: 1.9664 q70: 1.9868 q80: 2.0131 q90: 2.0534 q100: 2.4858\n", + "one_class_1 1.9770 +- 0.1276 q0: 1.5910 q10: 1.8422 q20: 1.8816 q30: 1.9115 q40: 1.9369 q50: 1.9621 q60: 1.9818 q70: 2.0174 q80: 2.0584 q90: 2.1323 q100: 2.7000\n", + "[one_class_1 CSI 0.4546] [one_class_1 best 0.4546] \n", + "[one_class_mean CSI 0.4546] [one_class_mean best 0.4546] \n", + "0.4546\t0.4546\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.5\n", + "# blur_sigma : 2\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.5 --blur_sigma 2 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_2.0_resize_factor_0.5_color_dist0.5_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 185, + "id": "eaa5ec79", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0069\t0.0088\t0.0090\t0.0079\n", + "weight_shi:\t2.7516\t0.9415\t1.1553\t-18.6953\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.580519095798013\n", + "CNMC 2.6811 +- 1.0535 q0: -1.5616 q10: 1.2724 q20: 1.7695 q30: 2.1545 q40: 2.4883 q50: 2.7551 q60: 3.0169 q70: 3.2695 q80: 3.5643 q90: 3.8850 q100: 6.2124\n", + "one_class_1 2.2993 +- 1.4215 q0: -2.7435 q10: 0.4967 q20: 1.2345 q30: 1.7164 q40: 2.0762 q50: 2.3752 q60: 2.6957 q70: 3.0288 q80: 3.4597 q90: 3.9539 q100: 6.3139\n", + "[one_class_1 CSI 0.5805] [one_class_1 best 0.5805] \n", + "[one_class_mean CSI 0.5805] [one_class_mean best 0.5805] \n", + "0.5805\t0.5805\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.3\n", + "# blur_sigma : 2\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.3 --blur_sigma 2 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_2.0_resize_factor_0.3_color_dist0.5_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 220, + "id": "4a75f4d4", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0074\t0.0080\t0.0073\t0.0077\n", + "weight_shi:\t-0.8732\t0.8498\t2.4905\t1.5653\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.2671803947781525\n", + "CNMC 1.8416 +- 0.1772 q0: 1.2265 q10: 1.6388 q20: 1.7083 q30: 1.7532 q40: 1.7915 q50: 1.8242 q60: 1.8580 q70: 1.8990 q80: 1.9656 q90: 2.0775 q100: 2.5900\n", + "one_class_1 2.0431 +- 0.2719 q0: 1.2846 q10: 1.7272 q20: 1.8128 q30: 1.8857 q40: 1.9439 q50: 2.0013 q60: 2.0829 q70: 2.1644 q80: 2.2684 q90: 2.4103 q100: 2.9156\n", + "[one_class_1 CSI 0.2672] [one_class_1 best 0.2672] \n", + "[one_class_mean CSI 0.2672] [one_class_mean best 0.2672] \n", + "0.2672\t0.2672\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.02\n", + "# blur_sigma : 2\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.02 --blur_sigma 2 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_2.0_resize_factor_0.02_color_dist0.5_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 187, + "id": "9d31d62a", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0046\t0.0037\t0.0035\t0.0047\n", + "weight_shi:\t0.4014\t-0.7791\t-0.6536\t-1.3711\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.611233276618155\n", + "CNMC 1.9991 +- 0.2593 q0: 0.9291 q10: 1.6096 q20: 1.7794 q30: 1.9130 q40: 1.9994 q50: 2.0695 q60: 2.1203 q70: 2.1657 q80: 2.2124 q90: 2.2692 q100: 2.5149\n", + "one_class_1 1.8852 +- 0.3136 q0: 0.6811 q10: 1.4317 q20: 1.6563 q30: 1.7768 q40: 1.8804 q50: 1.9566 q60: 2.0179 q70: 2.0924 q80: 2.1459 q90: 2.2152 q100: 2.4864\n", + "[one_class_1 CSI 0.6112] [one_class_1 best 0.6112] \n", + "[one_class_mean CSI 0.6112] [one_class_mean best 0.6112] \n", + "0.6112\t0.6112\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.008\n", + "# blur_sigma : 2\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.008 --blur_sigma 2 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_2.0_resize_factor_0.008_color_dist0.5_one_class_0/last.model\"" + ] + }, + { + "cell_type": "markdown", + "id": "58a14458", + "metadata": {}, + "source": [ + "## Examine blur_sigma" + ] + }, + { + "cell_type": "code", + "execution_count": 188, + "id": "c7c2318d", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0050\t0.0073\t0.0050\t0.0055\n", + "weight_shi:\t-0.3869\t0.3100\t0.7499\t0.9321\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.4740262206422994\n", + "CNMC 1.9842 +- 0.2291 q0: 1.4232 q10: 1.6950 q20: 1.7851 q30: 1.8500 q40: 1.9214 q50: 1.9744 q60: 2.0301 q70: 2.0950 q80: 2.1712 q90: 2.2769 q100: 3.0240\n", + "one_class_1 2.0169 +- 0.2738 q0: 1.4504 q10: 1.6924 q20: 1.7765 q30: 1.8481 q40: 1.9251 q50: 1.9917 q60: 2.0673 q70: 2.1457 q80: 2.2342 q90: 2.3550 q100: 3.2798\n", + "[one_class_1 CSI 0.4740] [one_class_1 best 0.4740] \n", + "[one_class_mean CSI 0.4740] [one_class_mean best 0.4740] \n", + "0.4740\t0.4740\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 40\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 40 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_40.0_resize_factor_0.08_color_dist0.5_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 189, + "id": "dbd4fb10", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0041\t0.0073\t0.0038\t0.0040\n", + "weight_shi:\t-0.0807\t0.1383\t0.2679\t0.2225\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.3159839323874052\n", + "CNMC 1.9782 +- 0.0390 q0: 1.8641 q10: 1.9344 q20: 1.9491 q30: 1.9576 q40: 1.9654 q50: 1.9735 q60: 1.9830 q70: 1.9942 q80: 2.0052 q90: 2.0239 q100: 2.1760\n", + "one_class_1 2.0111 +- 0.0558 q0: 1.8790 q10: 1.9491 q20: 1.9646 q30: 1.9780 q40: 1.9912 q50: 2.0041 q60: 2.0170 q70: 2.0318 q80: 2.0532 q90: 2.0897 q100: 2.2666\n", + "[one_class_1 CSI 0.3160] [one_class_1 best 0.3160] \n", + "[one_class_mean CSI 0.3160] [one_class_mean best 0.3160] \n", + "0.3160\t0.3160\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 20\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 20 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_20.0_resize_factor_0.08_color_dist0.5_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 190, + "id": "c0cd8374", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0021\t0.0037\t0.0024\t0.0027\n", + "weight_shi:\t0.1478\t4.1795\t-0.4613\t-0.5806\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.4508957959874011\n", + "CNMC 2.0731 +- 0.5687 q0: 0.4702 q10: 1.3853 q20: 1.5945 q30: 1.7650 q40: 1.9267 q50: 2.0493 q60: 2.1848 q70: 2.3330 q80: 2.5050 q90: 2.7946 q100: 4.6939\n", + "one_class_1 2.1855 +- 0.7534 q0: 0.3032 q10: 1.1734 q20: 1.4954 q30: 1.7768 q40: 1.9835 q50: 2.1717 q60: 2.4165 q70: 2.5852 q80: 2.8103 q90: 3.1495 q100: 4.4871\n", + "[one_class_1 CSI 0.4509] [one_class_1 best 0.4509] \n", + "[one_class_mean CSI 0.4509] [one_class_mean best 0.4509] \n", + "0.4509\t0.4509\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 6\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 6 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_6.0_resize_factor_0.08_color_dist0.5_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 191, + "id": "1a733a07", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0019\t0.0025\t0.0018\t0.0018\n", + "weight_shi:\t0.1207\t-0.4216\t-0.2927\t-0.2699\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.622416167876928\n", + "CNMC 2.0481 +- 0.2777 q0: 0.5649 q10: 1.7109 q20: 1.8569 q30: 1.9499 q40: 2.0216 q50: 2.0813 q60: 2.1374 q70: 2.2010 q80: 2.2718 q90: 2.3476 q100: 2.6884\n", + "one_class_1 1.8936 +- 0.3857 q0: 0.4436 q10: 1.4038 q20: 1.6226 q30: 1.7768 q40: 1.8682 q50: 1.9483 q60: 2.0252 q70: 2.1209 q80: 2.2012 q90: 2.3215 q100: 2.8144\n", + "[one_class_1 CSI 0.6224] [one_class_1 best 0.6224] \n", + "[one_class_mean CSI 0.6224] [one_class_mean best 0.6224] \n", + "0.6224\t0.6224\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 4\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 4 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.5_blur_sigma4.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 192, + "id": "c59e2e1d", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0024\t0.0049\t0.0029\t0.0029\n", + "weight_shi:\t0.3727\t0.6016\t-2.1896\t-1.0076\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.7071838381996982\n", + "CNMC 2.1791 +- 0.2772 q0: 0.2329 q10: 1.8709 q20: 2.0154 q30: 2.0976 q40: 2.1641 q50: 2.2225 q60: 2.2692 q70: 2.3190 q80: 2.3739 q90: 2.4494 q100: 2.9055\n", + "one_class_1 1.9359 +- 0.4103 q0: -0.1517 q10: 1.4452 q20: 1.7034 q30: 1.8312 q40: 1.9334 q50: 2.0115 q60: 2.0642 q70: 2.1519 q80: 2.2408 q90: 2.3584 q100: 2.8261\n", + "[one_class_1 CSI 0.7072] [one_class_1 best 0.7072] \n", + "[one_class_mean CSI 0.7072] [one_class_mean best 0.7072] \n", + "0.7072\t0.7072\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 3\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 3 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.5_blur_sigma3.0_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 193, + "id": "5827615d", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0019\t0.0027\t0.0022\t0.0026\n", + "weight_shi:\t0.1899\t-0.4837\t-0.3535\t-0.3448\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.7415028509504856\n", + "CNMC 2.1337 +- 0.1823 q0: 0.9904 q10: 1.9206 q20: 2.0374 q30: 2.0868 q40: 2.1243 q50: 2.1600 q60: 2.1945 q70: 2.2226 q80: 2.2642 q90: 2.3222 q100: 2.5460\n", + "one_class_1 1.9400 +- 0.2874 q0: 0.6323 q10: 1.5817 q20: 1.7870 q30: 1.8866 q40: 1.9416 q50: 1.9843 q60: 2.0207 q70: 2.0829 q80: 2.1566 q90: 2.2468 q100: 2.5500\n", + "[one_class_1 CSI 0.7415] [one_class_1 best 0.7415] \n", + "[one_class_mean CSI 0.7415] [one_class_mean best 0.7415] \n", + "0.7415\t0.7415\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 2\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 2 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_2.0_resize_factor_0.08_color_dist0.5_one_class_0_7415/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 194, + "id": "65baeab1", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0022\t0.0033\t0.0025\t0.0029\n", + "weight_shi:\t0.4059\t-6.1160\t-2.6702\t-1.5404\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.7402292913641013\n", + "CNMC 2.5607 +- 0.6482 q0: -0.8214 q10: 1.7859 q20: 2.1154 q30: 2.3402 q40: 2.5031 q50: 2.6399 q60: 2.7654 q70: 2.9084 q80: 3.0413 q90: 3.2796 q100: 4.2054\n", + "one_class_1 1.8328 +- 0.9715 q0: -2.1220 q10: 0.6263 q20: 1.0844 q30: 1.4277 q40: 1.6691 q50: 1.8643 q60: 2.1102 q70: 2.3723 q80: 2.6504 q90: 3.0382 q100: 4.2076\n", + "[one_class_1 CSI 0.7402] [one_class_1 best 0.7402] \n", + "[one_class_mean CSI 0.7402] [one_class_mean best 0.7402] \n", + "0.7402\t0.7402\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 1.5\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 1.5 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.5_blur_sigma1.5_one_class_0/last.model\"" + ] + }, + { + "cell_type": "code", + "execution_count": 195, + "id": "a9c1c45f", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Pre-compute global statistics...\n", + "axis size: 3527 3527 3527 3527\n", + "weight_sim:\t0.0077\t0.0092\t0.0076\t0.0081\n", + "weight_shi:\t-0.2259\t0.4304\t0.6270\t0.7623\n", + "Pre-compute features...\n", + "Compute OOD scores... (score: CSI)\n", + "One_class_real_mean: 0.36727255694305183\n", + "CNMC 1.9613 +- 0.2117 q0: 1.4830 q10: 1.7342 q20: 1.7989 q30: 1.8500 q40: 1.8941 q50: 1.9317 q60: 1.9759 q70: 2.0278 q80: 2.0943 q90: 2.2144 q100: 3.2689\n", + "one_class_1 2.0967 +- 0.3131 q0: 1.5468 q10: 1.7976 q20: 1.8621 q30: 1.9190 q40: 1.9608 q50: 2.0266 q60: 2.0831 q70: 2.1520 q80: 2.2654 q90: 2.5291 q100: 3.5407\n", + "[one_class_1 CSI 0.3673] [one_class_1 best 0.3673] \n", + "[one_class_mean CSI 0.3673] [one_class_mean best 0.3673] \n", + "0.3673\t0.3673\n" + ] + } + ], + "source": [ + "# EVALUATION\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 1.0\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0 python3 \"eval.py\" --color_distort 0.5 --resize_factor 0.08 --blur_sigma 1.0 --mode ood_pre --dataset CNMC --model resnet18_imagenet --ood_score CSI --shift_trans_type blur --print_score --save_score --ood_samples 10 --resize_fix --one_class_idx 0 --load_path \"logs/id_all/color_dist0.5/blur/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_resize_factor0.08_color_dist0.5_blur_sigma1.0_one_class_0/last.model\"" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.9" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/eval.py b/eval.py new file mode 100644 index 0000000..c77cb12 --- /dev/null +++ b/eval.py @@ -0,0 +1,57 @@ +from common.eval import * + + +def main(): + model.eval() + + if P.mode == 'test_acc': + from evals import test_classifier + with torch.no_grad(): + error = test_classifier(P, model, test_loader, 0, logger=None) + + elif P.mode == 'test_marginalized_acc': + from evals import test_classifier + with torch.no_grad(): + error = test_classifier(P, model, test_loader, 0, marginal=True, logger=None) + + elif P.mode in ['ood', 'ood_pre']: + if P.mode == 'ood': + from evals import eval_ood_detection + else: + from evals.ood_pre import eval_ood_detection + + with torch.no_grad(): + auroc_dict = eval_ood_detection(P, model, test_loader, ood_test_loader, P.ood_score, + train_loader=train_loader, simclr_aug=simclr_aug) + + if P.one_class_idx is not None: + mean_dict = dict() + for ood_score in P.ood_score: + mean = 0 + for ood in auroc_dict.keys(): + mean += auroc_dict[ood][ood_score] + mean_dict[ood_score] = mean / len(auroc_dict.keys()) + auroc_dict['one_class_mean'] = mean_dict + + bests = [] + for ood in auroc_dict.keys(): + message = '' + best_auroc = 0 + for ood_score, auroc in auroc_dict[ood].items(): + message += '[%s %s %.4f] ' % (ood, ood_score, auroc) + if auroc > best_auroc: + best_auroc = auroc + message += '[%s %s %.4f] ' % (ood, 'best', best_auroc) + if P.print_score: + print(message) + bests.append(best_auroc) + + bests = map('{:.4f}'.format, bests) + print('\t'.join(bests)) + + else: + raise NotImplementedError() + + +if __name__ == '__main__': + main() diff --git a/evals/__init__.py b/evals/__init__.py new file mode 100644 index 0000000..4cf01c0 --- /dev/null +++ b/evals/__init__.py @@ -0,0 +1 @@ +from evals.evals import test_classifier, eval_ood_detection diff --git a/evals/__pycache__/__init__.cpython-36.pyc b/evals/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..a210afc Binary files /dev/null and b/evals/__pycache__/__init__.cpython-36.pyc differ diff --git a/evals/__pycache__/__init__.cpython-37.pyc b/evals/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000..a302e83 Binary files /dev/null and b/evals/__pycache__/__init__.cpython-37.pyc differ diff --git a/evals/__pycache__/evals.cpython-36.pyc b/evals/__pycache__/evals.cpython-36.pyc new file mode 100644 index 0000000..8c35bfe Binary files /dev/null and b/evals/__pycache__/evals.cpython-36.pyc differ diff --git a/evals/__pycache__/evals.cpython-37.pyc b/evals/__pycache__/evals.cpython-37.pyc new file mode 100644 index 0000000..cf157ae Binary files /dev/null and b/evals/__pycache__/evals.cpython-37.pyc differ diff --git a/evals/__pycache__/ood_pre.cpython-36.pyc b/evals/__pycache__/ood_pre.cpython-36.pyc new file mode 100644 index 0000000..1d46940 Binary files /dev/null and b/evals/__pycache__/ood_pre.cpython-36.pyc differ diff --git a/evals/__pycache__/ood_pre.cpython-37.pyc b/evals/__pycache__/ood_pre.cpython-37.pyc new file mode 100644 index 0000000..290d3f9 Binary files /dev/null and b/evals/__pycache__/ood_pre.cpython-37.pyc differ diff --git a/evals/evals.py b/evals/evals.py new file mode 100644 index 0000000..ad285a9 --- /dev/null +++ b/evals/evals.py @@ -0,0 +1,201 @@ +import time +import itertools + +import diffdist.functional as distops +import numpy as np +import torch +import torch.distributed as dist +import torch.nn as nn +import torch.nn.functional as F +from sklearn.metrics import roc_auc_score + +import models.transform_layers as TL +from utils.temperature_scaling import _ECELoss +from utils.utils import AverageMeter, set_random_seed, normalize + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") +ece_criterion = _ECELoss().to(device) + + +def error_k(output, target, ks=(1,)): + """Computes the precision@k for the specified values of k""" + max_k = max(ks) + batch_size = target.size(0) + + _, pred = output.topk(max_k, 1, True, True) + pred = pred.t() + correct = pred.eq(target.view(1, -1).expand_as(pred)) + + results = [] + for k in ks: + correct_k = correct[:k].view(-1).float().sum(0) + results.append(100.0 - correct_k.mul_(100.0 / batch_size)) + return results + + +def test_classifier(P, model, loader, steps, marginal=False, logger=None): + error_top1 = AverageMeter() + error_calibration = AverageMeter() + + if logger is None: + log_ = print + else: + log_ = logger.log + + # Switch to evaluate mode + mode = model.training + model.eval() + + for n, (images, labels) in enumerate(loader): + batch_size = images.size(0) + + images, labels = images.to(device), labels.to(device) + + if marginal: + outputs = 0 + for i in range(4): + rot_images = torch.rot90(images, i, (2, 3)) + _, outputs_aux = model(rot_images, joint=True) + outputs += outputs_aux['joint'][:, P.n_classes * i: P.n_classes * (i + 1)] / 4. + else: + outputs = model(images) + + top1, = error_k(outputs.data, labels, ks=(1,)) + error_top1.update(top1.item(), batch_size) + + ece = ece_criterion(outputs, labels) * 100 + error_calibration.update(ece.item(), batch_size) + + if n % 100 == 0: + log_('[Test %3d] [Test@1 %.3f] [ECE %.3f]' % + (n, error_top1.value, error_calibration.value)) + + log_(' * [Error@1 %.3f] [ECE %.3f]' % + (error_top1.average, error_calibration.average)) + + if logger is not None: + logger.scalar_summary('eval/clean_error', error_top1.average, steps) + logger.scalar_summary('eval/ece', error_calibration.average, steps) + + model.train(mode) + + return error_top1.average + + +def eval_ood_detection(P, model, id_loader, ood_loaders, ood_scores, train_loader=None, simclr_aug=None): + auroc_dict = dict() + for ood in ood_loaders.keys(): + auroc_dict[ood] = dict() + + for ood_score in ood_scores: + # compute scores for ID and OOD samples + score_func = get_ood_score_func(P, model, ood_score, simclr_aug=simclr_aug) + + save_path = f'plot/score_in_{P.dataset}_{ood_score}' + if P.one_class_idx is not None: + save_path += f'_{P.one_class_idx}' + + scores_id = get_scores(id_loader, score_func) + + if P.save_score: + np.save(f'{save_path}.npy', scores_id) + + for ood, ood_loader in ood_loaders.items(): + if ood == 'interp': + scores_ood = get_scores_interp(id_loader, score_func) + auroc_dict['interp'][ood_score] = get_auroc(scores_id, scores_ood) + else: + scores_ood = get_scores(ood_loader, score_func) + auroc_dict[ood][ood_score] = get_auroc(scores_id, scores_ood) + + if P.save_score: + np.save(f'{save_path}_out_{ood}.npy', scores_ood) + + return auroc_dict + + +def get_ood_score_func(P, model, ood_score, simclr_aug=None): + def score_func(x): + return compute_ood_score(P, model, ood_score, x, simclr_aug=simclr_aug) + return score_func + + +def get_scores(loader, score_func): + scores = [] + for i, (x, _) in enumerate(loader): + s = score_func(x.to(device)) + assert s.dim() == 1 and s.size(0) == x.size(0) + + scores.append(s.detach().cpu().numpy()) + return np.concatenate(scores) + + +def get_scores_interp(loader, score_func): + scores = [] + for i, (x, _) in enumerate(loader): + x_interp = (x + last) / 2 if i > 0 else x # omit the first batch, assume batch sizes are equal + last = x # save the last batch + s = score_func(x_interp.to(device)) + assert s.dim() == 1 and s.size(0) == x.size(0) + + scores.append(s.detach().cpu().numpy()) + return np.concatenate(scores) + + +def get_auroc(scores_id, scores_ood): + scores = np.concatenate([scores_id, scores_ood]) + labels = np.concatenate([np.ones_like(scores_id), np.zeros_like(scores_ood)]) + return roc_auc_score(labels, scores) + + +def compute_ood_score(P, model, ood_score, x, simclr_aug=None): + model.eval() + + if ood_score == 'clean_norm': + _, output_aux = model(x, penultimate=True, simclr=True) + score = output_aux[P.ood_layer].norm(dim=1) + return score + + elif ood_score == 'similar': + assert simclr_aug is not None # require custom simclr augmentation + sample_num = 2 # fast evaluation + feats = get_features(model, simclr_aug, x, layer=P.ood_layer, sample_num=sample_num) + feats_avg = sum(feats) / len(feats) + + scores = [] + for seed in range(sample_num): + sim = torch.cosine_similarity(feats[seed], feats_avg) + scores.append(sim) + return sum(scores) / len(scores) + + elif ood_score == 'baseline': + outputs, outputs_aux = model(x, penultimate=True) + scores = F.softmax(outputs, dim=1).max(dim=1)[0] + return scores + + elif ood_score == 'baseline_marginalized': + + total_outputs = 0 + for i in range(4): + x_rot = torch.rot90(x, i, (2, 3)) + outputs, outputs_aux = model(x_rot, penultimate=True, joint=True) + total_outputs += outputs_aux['joint'][:, P.n_classes * i:P.n_classes * (i + 1)] + + scores = F.softmax(total_outputs / 4., dim=1).max(dim=1)[0] + return scores + + else: + raise NotImplementedError() + + +def get_features(model, simclr_aug, x, layer='simclr', sample_num=1): + model.eval() + + feats = [] + for seed in range(sample_num): + set_random_seed(seed) + x_t = simclr_aug(x) + with torch.no_grad(): + _, output_aux = model(x_t, penultimate=True, simclr=True, shift=True) + feats.append(output_aux[layer]) + return feats diff --git a/evals/ood_pre.py b/evals/ood_pre.py new file mode 100644 index 0000000..54f5731 --- /dev/null +++ b/evals/ood_pre.py @@ -0,0 +1,242 @@ +import os +from copy import deepcopy + +import torch +import torch.nn as nn +import torch.nn.functional as F +import numpy as np + +import models.transform_layers as TL +from utils.utils import set_random_seed, normalize +from evals.evals import get_auroc + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") +hflip = TL.HorizontalFlipLayer().to(device) + + +def eval_ood_detection(P, model, id_loader, ood_loaders, ood_scores, train_loader=None, simclr_aug=None): + auroc_dict = dict() + for ood in ood_loaders.keys(): + auroc_dict[ood] = dict() + + assert len(ood_scores) == 1 # assume single ood_score for simplicity + ood_score = ood_scores[0] + + base_path = os.path.split(P.load_path)[0] # checkpoint directory + + prefix = f'{P.ood_samples}' + if P.resize_fix: + prefix += f'_resize_fix_{P.resize_factor}' + else: + prefix += f'_resize_range_{P.resize_factor}' + + prefix = os.path.join(base_path, f'feats_{prefix}') + + kwargs = { + 'simclr_aug': simclr_aug, + 'sample_num': P.ood_samples, + 'layers': P.ood_layer, + } + + print('Pre-compute global statistics...') + feats_train = get_features(P, f'{P.dataset}_train', model, train_loader, prefix=prefix, **kwargs) # (M, T, d) + + P.axis = [] + for f in feats_train['simclr'].chunk(P.K_shift, dim=1): + axis = f.mean(dim=1) # (M, d) + P.axis.append(normalize(axis, dim=1).to(device)) + print('axis size: ' + ' '.join(map(lambda x: str(len(x)), P.axis))) + + f_sim = [f.mean(dim=1) for f in feats_train['simclr'].chunk(P.K_shift, dim=1)] # list of (M, d) + f_shi = [f.mean(dim=1) for f in feats_train['shift'].chunk(P.K_shift, dim=1)] # list of (M, 4) + + weight_sim = [] + weight_shi = [] + for shi in range(P.K_shift): + sim_norm = f_sim[shi].norm(dim=1) # (M) + shi_mean = f_shi[shi][:, shi] # (M) + weight_sim.append(1 / sim_norm.mean().item()) + weight_shi.append(1 / shi_mean.mean().item()) + + if ood_score == 'simclr': + P.weight_sim = [1] + P.weight_shi = [0] + elif ood_score == 'CSI': + P.weight_sim = weight_sim + P.weight_shi = weight_shi + else: + raise ValueError() + + print(f'weight_sim:\t' + '\t'.join(map('{:.4f}'.format, P.weight_sim))) + print(f'weight_shi:\t' + '\t'.join(map('{:.4f}'.format, P.weight_shi))) + + print('Pre-compute features...') + feats_id = get_features(P, P.dataset, model, id_loader, prefix=prefix, **kwargs) # (N, T, d) + feats_ood = dict() + for ood, ood_loader in ood_loaders.items(): + if ood == 'interp': + feats_ood[ood] = get_features(P, ood, model, id_loader, interp=True, prefix=prefix, **kwargs) + else: + feats_ood[ood] = get_features(P, ood, model, ood_loader, prefix=prefix, **kwargs) + + print(f'Compute OOD scores... (score: {ood_score})') + scores_id = get_scores(P, feats_id, ood_score).numpy() + scores_ood = dict() + if P.one_class_idx is not None: + one_class_score = [] + + for ood, feats in feats_ood.items(): + scores_ood[ood] = get_scores(P, feats, ood_score).numpy() + auroc_dict[ood][ood_score] = get_auroc(scores_id, scores_ood[ood]) + if P.one_class_idx is not None: + one_class_score.append(scores_ood[ood]) + + if P.one_class_idx is not None: + one_class_score = np.concatenate(one_class_score) + one_class_total = get_auroc(scores_id, one_class_score) + print(f'One_class_real_mean: {one_class_total}') + + if P.print_score: + print_score(P.dataset, scores_id) + for ood, scores in scores_ood.items(): + print_score(ood, scores) + + return auroc_dict + + +def get_scores(P, feats_dict, ood_score): + # convert to gpu tensor + feats_sim = feats_dict['simclr'].to(device) + feats_shi = feats_dict['shift'].to(device) + N = feats_sim.size(0) + + # compute scores + scores = [] + for f_sim, f_shi in zip(feats_sim, feats_shi): + f_sim = [f.mean(dim=0, keepdim=True) for f in f_sim.chunk(P.K_shift)] # list of (1, d) + f_shi = [f.mean(dim=0, keepdim=True) for f in f_shi.chunk(P.K_shift)] # list of (1, 4) + score = 0 + for shi in range(P.K_shift): + score += (f_sim[shi] * P.axis[shi]).sum(dim=1).max().item() * P.weight_sim[shi] + score += f_shi[shi][:, shi].item() * P.weight_shi[shi] + score = score / P.K_shift + scores.append(score) + scores = torch.tensor(scores) + + assert scores.dim() == 1 and scores.size(0) == N # (N) + return scores.cpu() + + +def get_features(P, data_name, model, loader, interp=False, prefix='', + simclr_aug=None, sample_num=1, layers=('simclr', 'shift')): + + if not isinstance(layers, (list, tuple)): + layers = [layers] + + # load pre-computed features if exists + feats_dict = dict() + # for layer in layers: + # path = prefix + f'_{data_name}_{layer}.pth' + # if os.path.exists(path): + # feats_dict[layer] = torch.load(path) + + # pre-compute features and save to the path + left = [layer for layer in layers if layer not in feats_dict.keys()] + if len(left) > 0: + _feats_dict = _get_features(P, model, loader, interp, (P.dataset == 'imagenet' or + P.dataset == 'CNMC' or + P.dataset == 'CNMC_grayscale'), + simclr_aug, sample_num, layers=left) + + for layer, feats in _feats_dict.items(): + path = prefix + f'_{data_name}_{layer}.pth' + torch.save(_feats_dict[layer], path) + feats_dict[layer] = feats # update value + + return feats_dict + + +def _get_features(P, model, loader, interp=False, imagenet=False, simclr_aug=None, + sample_num=1, layers=('simclr', 'shift')): + + if not isinstance(layers, (list, tuple)): + layers = [layers] + + # check if arguments are valid + assert simclr_aug is not None + + if imagenet is True: # assume batch_size = 1 for ImageNet + sample_num = 1 + + # compute features in full dataset + model.eval() + feats_all = {layer: [] for layer in layers} # initialize: empty list + for i, (x, _) in enumerate(loader): + if interp: + x_interp = (x + last) / 2 if i > 0 else x # omit the first batch, assume batch sizes are equal + last = x # save the last batch + x = x_interp # use interp as current batch + + if imagenet is True: + x = torch.cat(x[0], dim=0) # augmented list of x + + x = x.to(device) # gpu tensor + + # compute features in one batch + feats_batch = {layer: [] for layer in layers} # initialize: empty list + for seed in range(sample_num): + set_random_seed(seed) + + if P.K_shift > 1: + x_t = torch.cat([P.shift_trans(hflip(x), k) for k in range(P.K_shift)]) + else: + x_t = x # No shifting: SimCLR + x_t = simclr_aug(x_t) + + # compute augmented features + with torch.no_grad(): + kwargs = {layer: True for layer in layers} # only forward selected layers + _, output_aux = model(x_t, **kwargs) + + # add features in one batch + for layer in layers: + feats = output_aux[layer].cpu() + if imagenet is False: + feats_batch[layer] += feats.chunk(P.K_shift) + else: + feats_batch[layer] += [feats] # (B, d) cpu tensor + + # concatenate features in one batch + for key, val in feats_batch.items(): + if imagenet: + feats_batch[key] = torch.stack(val, dim=0) # (B, T, d) + else: + feats_batch[key] = torch.stack(val, dim=1) # (B, T, d) + + # add features in full dataset + for layer in layers: + feats_all[layer] += [feats_batch[layer]] + + # concatenate features in full dataset + for key, val in feats_all.items(): + feats_all[key] = torch.cat(val, dim=0) # (N, T, d) + + # reshape order + if imagenet is False: + # Convert [1,2,3,4, 1,2,3,4] -> [1,1, 2,2, 3,3, 4,4] + for key, val in feats_all.items(): + N, T, d = val.size() # T = K * T' + val = val.view(N, -1, P.K_shift, d) # (N, T', K, d) + val = val.transpose(2, 1) # (N, 4, T', d) + val = val.reshape(N, T, d) # (N, T, d) + feats_all[key] = val + + return feats_all + + +def print_score(data_name, scores): + quantile = np.quantile(scores, np.arange(0, 1.1, 0.1)) + print('{:18s} '.format(data_name) + + '{:.4f} +- {:.4f} '.format(np.mean(scores), np.std(scores)) + + ' '.join(['q{:d}: {:.4f}'.format(i * 10, quantile[i]) for i in range(11)])) + diff --git a/figures/CSI_teaser.png b/figures/CSI_teaser.png new file mode 100644 index 0000000..d73ab3e Binary files /dev/null and b/figures/CSI_teaser.png differ diff --git a/figures/fixed_ood_benchmarks.png b/figures/fixed_ood_benchmarks.png new file mode 100644 index 0000000..e62b1ef Binary files /dev/null and b/figures/fixed_ood_benchmarks.png differ diff --git a/figures/shifting_transformations.png b/figures/shifting_transformations.png new file mode 100644 index 0000000..2544be0 Binary files /dev/null and b/figures/shifting_transformations.png differ diff --git a/main.py b/main.py new file mode 100644 index 0000000..8f17a7b --- /dev/null +++ b/main.py @@ -0,0 +1,37 @@ +from sys import argv +from os import system +from datasets.prepare_data import prep, resize + +import torch +import os +from datasets.postprocess_data import postprocess_data + +DATA_BASE_DIR = r'/home/feoktistovar67431/CSI/CSI_local/main.py' +BASE_DIR = '/home/feoktistovar67431/CSI/CSI_local/' + +def main(): + for argument in argv: + if argument == '--proc_step': + proc_step = argv[argv.index(argument)+1] + if proc_step == 'eval': + system("eval.py "+' '.join(argv[1:])) + if proc_step == 'train': + system(BASE_DIR + os.sep + "eval.py " + ' '.join(argv[1:])) + if proc_step == 'plot': + plot_data() + elif proc_step == 'post_proc': + postprocess_data( + [ + r'\CNMC_resnet18_unsup_simclr_CSI_shift_cutperm4_one_class_0\log.txt', + r'\CNMC_resnet18_unsup_simclr_CSI_shift_cutperm4_one_class_0_64px\log.txt', + r'\CNMC_resnet18_unsup_simclr_CSI_shift_cutperm16_one_class_0_32px\log.txt', + r'\CNMC_resnet18_unsup_simclr_CSI_shift_cutperm_one_class_0_64px_batch64\log.txt', + r'\CNMC_resnet18_unsup_simclr_CSI_shift_rotation_one_class_0\log.txt', + r"\CNMC_resnet18_unsup_simclr_CSI_shift_gauss_one_class_0_32px\log.txt" + # r'\cifar10_resnet18_unsup_simclr_CSI_shift_rotation_one_class_1\log.txt' + ] + ) + + +if __name__ == '__main__': + main() diff --git a/models/__init__.py b/models/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/models/__pycache__/__init__.cpython-36.pyc b/models/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..5f40435 Binary files /dev/null and b/models/__pycache__/__init__.cpython-36.pyc differ diff --git a/models/__pycache__/__init__.cpython-37.pyc b/models/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000..fdaf78a Binary files /dev/null and b/models/__pycache__/__init__.cpython-37.pyc differ diff --git a/models/__pycache__/base_model.cpython-36.pyc b/models/__pycache__/base_model.cpython-36.pyc new file mode 100644 index 0000000..6d092a3 Binary files /dev/null and b/models/__pycache__/base_model.cpython-36.pyc differ diff --git a/models/__pycache__/base_model.cpython-37.pyc b/models/__pycache__/base_model.cpython-37.pyc new file mode 100644 index 0000000..2126282 Binary files /dev/null and b/models/__pycache__/base_model.cpython-37.pyc differ diff --git a/models/__pycache__/classifier.cpython-36.pyc b/models/__pycache__/classifier.cpython-36.pyc new file mode 100644 index 0000000..d88815d Binary files /dev/null and b/models/__pycache__/classifier.cpython-36.pyc differ diff --git a/models/__pycache__/classifier.cpython-37.pyc b/models/__pycache__/classifier.cpython-37.pyc new file mode 100644 index 0000000..e5aad37 Binary files /dev/null and b/models/__pycache__/classifier.cpython-37.pyc differ diff --git a/models/__pycache__/resnet.cpython-36.pyc b/models/__pycache__/resnet.cpython-36.pyc new file mode 100644 index 0000000..756737f Binary files /dev/null and b/models/__pycache__/resnet.cpython-36.pyc differ diff --git a/models/__pycache__/resnet.cpython-37.pyc b/models/__pycache__/resnet.cpython-37.pyc new file mode 100644 index 0000000..39f7ba1 Binary files /dev/null and b/models/__pycache__/resnet.cpython-37.pyc differ diff --git a/models/__pycache__/resnet_imagenet.cpython-36.pyc b/models/__pycache__/resnet_imagenet.cpython-36.pyc new file mode 100644 index 0000000..93e5ddc Binary files /dev/null and b/models/__pycache__/resnet_imagenet.cpython-36.pyc differ diff --git a/models/__pycache__/resnet_imagenet.cpython-37.pyc b/models/__pycache__/resnet_imagenet.cpython-37.pyc new file mode 100644 index 0000000..22c74f7 Binary files /dev/null and b/models/__pycache__/resnet_imagenet.cpython-37.pyc differ diff --git a/models/__pycache__/transform_layers.cpython-36.pyc b/models/__pycache__/transform_layers.cpython-36.pyc new file mode 100644 index 0000000..cb7de85 Binary files /dev/null and b/models/__pycache__/transform_layers.cpython-36.pyc differ diff --git a/models/__pycache__/transform_layers.cpython-37.pyc b/models/__pycache__/transform_layers.cpython-37.pyc new file mode 100644 index 0000000..1d2e793 Binary files /dev/null and b/models/__pycache__/transform_layers.cpython-37.pyc differ diff --git a/models/base_model.py b/models/base_model.py new file mode 100644 index 0000000..12cb814 --- /dev/null +++ b/models/base_model.py @@ -0,0 +1,48 @@ +from abc import * +import torch.nn as nn + + +class BaseModel(nn.Module, metaclass=ABCMeta): + def __init__(self, last_dim, num_classes=10, simclr_dim=128): + super(BaseModel, self).__init__() + self.linear = nn.Linear(last_dim, num_classes) + self.simclr_layer = nn.Sequential( + nn.Linear(last_dim, last_dim), + nn.ReLU(), + nn.Linear(last_dim, simclr_dim), + ) + self.shift_cls_layer = nn.Linear(last_dim, 2) + self.joint_distribution_layer = nn.Linear(last_dim, 4 * num_classes) + + @abstractmethod + def penultimate(self, inputs, all_features=False): + pass + + def forward(self, inputs, penultimate=False, simclr=False, shift=False, joint=False): + _aux = {} + _return_aux = False + + features = self.penultimate(inputs) + + output = self.linear(features) + + if penultimate: + _return_aux = True + _aux['penultimate'] = features + + if simclr: + _return_aux = True + _aux['simclr'] = self.simclr_layer(features) + + if shift: + _return_aux = True + _aux['shift'] = self.shift_cls_layer(features) + + if joint: + _return_aux = True + _aux['joint'] = self.joint_distribution_layer(features) + + if _return_aux: + return output, _aux + + return output \ No newline at end of file diff --git a/models/classifier.py b/models/classifier.py new file mode 100644 index 0000000..a1b3e04 --- /dev/null +++ b/models/classifier.py @@ -0,0 +1,135 @@ +import torch.nn as nn + +from models.resnet import ResNet18, ResNet34, ResNet50 +from models.resnet_imagenet import resnet18, resnet50 +import models.transform_layers as TL +from torchvision import transforms + + +def get_simclr_augmentation(P, image_size): + """ + Creates positive data for training. + + :param P: parsed arguments + :param image_size: size of image + :return: transformation + """ + + # parameter for resizecrop + resize_scale = (P.resize_factor, 1.0) # resize scaling factor + if P.resize_fix: # if resize_fix is True, use same scale + resize_scale = (P.resize_factor, P.resize_factor) + + # Align augmentation + s = P.color_distort + color_jitter = TL.ColorJitterLayer(brightness=s*0.8, contrast=s*0.8, saturation=s*0.8, hue=s*0.2, p=0.8) + color_gray = TL.RandomColorGrayLayer(p=0.2) + resize_crop = TL.RandomResizedCropLayer(scale=resize_scale, size=(image_size[0], image_size[1])) + + #v_flip = transforms.RandomVerticalFlip() + #h_flip = transforms.RandomHorizontalFlip() + rand_aff = transforms.RandomAffine(degrees=360, translate=(0.2, 0.2)) + + # Transform define # + if P.dataset == 'imagenet': # Using RandomResizedCrop at PIL transform + transform = nn.Sequential( + color_jitter, + color_gray, + ) + elif P.dataset == 'CNMC': + transform = nn.Sequential( + color_jitter, + color_gray, + resize_crop, + ) + else: + transform = nn.Sequential( + color_jitter, + color_gray, + resize_crop, + ) + + return transform + + +def get_shift_module(P, eval=False): + """ + Creates shift transformation (negative). + + :param P: parsed arguments + :param eval: whether it is an evaluation step or not + :return: transformation + """ + if P.shift_trans_type == 'rotation': + shift_transform = TL.Rotation() + K_shift = 4 + elif P.shift_trans_type == 'cutperm': + shift_transform = TL.CutPerm() + K_shift = 4 + elif P.shift_trans_type == 'noise': + shift_transform = TL.GaussNoise(mean=P.noise_mean, std=P.noise_std) + K_shift = 4 + elif P.shift_trans_type == 'randpers': + shift_transform = TL.RandPers(distortion_scale=P.distortion_scale, p=1) + K_shift = 4 + elif P.shift_trans_type == 'sharp': + shift_transform = TL.RandomAdjustSharpness(sharpness_factor=P.sharpness_factor, p=1) + K_shift = 4 + elif P.shift_trans_type == 'blur': + kernel_size = int(int(P.res.replace('px', ''))*0.1) + if kernel_size%2 == 0: + kernel_size+=1 + sigma = (0.1, float(P.blur_sigma)) + shift_transform = TL.GaussBlur(kernel_size=kernel_size, sigma=sigma) + K_shift = 4 + elif P.shift_trans_type == 'blur_randpers': + kernel_size = int(P.res.replace('px', '')) * 0.1 + sigma = (0.1, float(P.blur_sigma)) + shift_transform = TL.BlurRandpers(kernel_size=kernel_size, sigma=sigma, distortion_scale=P.distortion_scale, p=1) + K_shift = 4 + elif P.shift_trans_type == 'blur_sharp': + kernel_size = int(P.res.replace('px', '')) * 0.1 + sigma = (0.1, float(P.blur_sigma)) + shift_transform = TL.BlurSharpness(kernel_size=kernel_size, sigma=sigma, sharpness_factor=P.sharpness_factor, p=1) + K_shift = 4 + elif P.shift_trans_type == 'randpers_sharp': + shift_transform = TL.RandpersSharpness(distortion_scale=P.distortion_scale, p=1, sharpness_factor=P.sharpness_factor) + K_shift = 4 + elif P.shift_trans_type == 'blur_randpers_sharp': + kernel_size = int(P.res.replace('px', '')) * 0.1 + sigma = (0.1, float(P.blur_sigma)) + shift_transform = TL.BlurRandpersSharpness(kernel_size=kernel_size, sigma=sigma, distortion_scale=P.distortion_scale, p=1, sharpness_factor=P.sharpness_factor) + K_shift = 4 + else: + shift_transform = nn.Identity() + K_shift = 1 + + if not eval and not ('sup' in P.mode): + assert P.batch_size == int(128/K_shift) + + return shift_transform, K_shift + + +def get_shift_classifer(model, K_shift): + + model.shift_cls_layer = nn.Linear(model.last_dim, K_shift) + + return model + + +def get_classifier(mode, n_classes=10): + if mode == 'resnet18': + classifier = ResNet18(num_classes=n_classes) + elif mode == 'resnet34': + classifier = ResNet34(num_classes=n_classes) + elif mode == 'resnet50': + classifier = ResNet50(num_classes=n_classes) + elif mode == 'resnet18_imagenet': + classifier = resnet18(num_classes=n_classes) + elif mode == 'resnet50_imagenet': + classifier = resnet50(num_classes=n_classes) + else: + raise NotImplementedError() + + return classifier + diff --git a/models/resnet.py b/models/resnet.py new file mode 100644 index 0000000..1221259 --- /dev/null +++ b/models/resnet.py @@ -0,0 +1,189 @@ +'''ResNet in PyTorch. +BasicBlock and Bottleneck module is from the original ResNet paper: +[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun + Deep Residual Learning for Image Recognition. arXiv:1512.03385 +PreActBlock and PreActBottleneck module is from the later paper: +[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun + Identity Mappings in Deep Residual Networks. arXiv:1603.05027 +''' +import torch +import torch.nn as nn +import torch.nn.functional as F + +from models.base_model import BaseModel +from models.transform_layers import NormalizeLayer +from torch.nn.utils import spectral_norm + +def conv3x3(in_planes, out_planes, stride=1): + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, in_planes, planes, stride=1): + super(BasicBlock, self).__init__() + self.conv1 = conv3x3(in_planes, planes, stride) + self.conv2 = conv3x3(planes, planes) + self.bn1 = nn.BatchNorm2d(planes) + self.bn2 = nn.BatchNorm2d(planes) + + self.shortcut = nn.Sequential() + if stride != 1 or in_planes != self.expansion*planes: + self.shortcut = nn.Sequential( + nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False), + nn.BatchNorm2d(self.expansion*planes) + ) + + def forward(self, x): + out = F.relu(self.bn1(self.conv1(x))) + out = self.bn2(self.conv2(out)) + out += self.shortcut(x) + out = F.relu(out) + return out + + +class PreActBlock(nn.Module): + '''Pre-activation version of the BasicBlock.''' + expansion = 1 + + def __init__(self, in_planes, planes, stride=1): + super(PreActBlock, self).__init__() + self.conv1 = conv3x3(in_planes, planes, stride) + self.conv2 = conv3x3(planes, planes) + self.bn1 = nn.BatchNorm2d(in_planes) + self.bn2 = nn.BatchNorm2d(planes) + + self.shortcut = nn.Sequential() + if stride != 1 or in_planes != self.expansion*planes: + self.shortcut = nn.Sequential( + nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False) + ) + + def forward(self, x): + out = F.relu(self.bn1(x)) + shortcut = self.shortcut(out) + out = self.conv1(out) + out = self.conv2(F.relu(self.bn2(out))) + out += shortcut + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, in_planes, planes, stride=1): + super(Bottleneck, self).__init__() + self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) + self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(planes) + self.bn2 = nn.BatchNorm2d(planes) + self.bn3 = nn.BatchNorm2d(self.expansion * planes) + + self.shortcut = nn.Sequential() + if stride != 1 or in_planes != self.expansion*planes: + self.shortcut = nn.Sequential( + nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False), + nn.BatchNorm2d(self.expansion*planes) + ) + + def forward(self, x): + out = F.relu(self.bn1(self.conv1(x))) + out = F.relu(self.bn2(self.conv2(out))) + out = self.bn3(self.conv3(out)) + out += self.shortcut(x) + out = F.relu(out) + return out + + +class PreActBottleneck(nn.Module): + '''Pre-activation version of the original Bottleneck module.''' + expansion = 4 + + def __init__(self, in_planes, planes, stride=1): + super(PreActBottleneck, self).__init__() + self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) + self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(in_planes) + self.bn2 = nn.BatchNorm2d(planes) + self.bn3 = nn.BatchNorm2d(planes) + + self.shortcut = nn.Sequential() + if stride != 1 or in_planes != self.expansion*planes: + self.shortcut = nn.Sequential( + nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False) + ) + + def forward(self, x): + out = F.relu(self.bn1(x)) + shortcut = self.shortcut(out) + out = self.conv1(out) + out = self.conv2(F.relu(self.bn2(out))) + out = self.conv3(F.relu(self.bn3(out))) + out += shortcut + return out + + +class ResNet(BaseModel): + def __init__(self, block, num_blocks, num_classes=10): + last_dim = 512 * block.expansion + super(ResNet, self).__init__(last_dim, num_classes) + + self.in_planes = 64 + self.last_dim = last_dim + + self.normalize = NormalizeLayer() + + self.conv1 = conv3x3(3, 64) + self.bn1 = nn.BatchNorm2d(64) + + self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1) + self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2) + self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2) + self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2) + + def _make_layer(self, block, planes, num_blocks, stride): + strides = [stride] + [1]*(num_blocks-1) + layers = [] + for stride in strides: + layers.append(block(self.in_planes, planes, stride)) + self.in_planes = planes * block.expansion + return nn.Sequential(*layers) + + def penultimate(self, x, all_features=False): + out_list = [] + + out = self.normalize(x) + out = self.conv1(out) + out = self.bn1(out) + out = F.relu(out) + out_list.append(out) + + out = self.layer1(out) + out_list.append(out) + out = self.layer2(out) + out_list.append(out) + out = self.layer3(out) + out_list.append(out) + out = self.layer4(out) + out_list.append(out) + + out = F.avg_pool2d(out, 4) + out = out.view(out.size(0), -1) + + if all_features: + return out, out_list + else: + return out + + +def ResNet18(num_classes): + return ResNet(BasicBlock, [2,2,2,2], num_classes=num_classes) + +def ResNet34(num_classes): + return ResNet(BasicBlock, [3,4,6,3], num_classes=num_classes) + +def ResNet50(num_classes): + return ResNet(Bottleneck, [3,4,6,3], num_classes=num_classes) \ No newline at end of file diff --git a/models/resnet_imagenet.py b/models/resnet_imagenet.py new file mode 100644 index 0000000..4618396 --- /dev/null +++ b/models/resnet_imagenet.py @@ -0,0 +1,231 @@ +import torch +import torch.nn as nn + +from models.base_model import BaseModel +from models.transform_layers import NormalizeLayer + + +def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): + """3x3 convolution with padding""" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=dilation, groups=groups, bias=False, dilation=dilation) + + +def conv1x1(in_planes, out_planes, stride=1): + """1x1 convolution""" + return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, + base_width=64, dilation=1, norm_layer=None): + super(BasicBlock, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + if groups != 1 or base_width != 64: + raise ValueError('BasicBlock only supports groups=1 and base_width=64') + if dilation > 1: + raise NotImplementedError("Dilation > 1 not supported in BasicBlock") + # Both self.conv1 and self.downsample layers downsample the input when stride != 1 + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = norm_layer(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = norm_layer(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + # Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2) + # while original implementation places the stride at the first 1x1 convolution(self.conv1) + # according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385. + # This variant is also known as ResNet V1.5 and improves accuracy according to + # https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch. + + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, + base_width=64, dilation=1, norm_layer=None): + super(Bottleneck, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + width = int(planes * (base_width / 64.)) * groups + # Both self.conv2 and self.downsample layers downsample the input when stride != 1 + self.conv1 = conv1x1(inplanes, width) + self.bn1 = norm_layer(width) + self.conv2 = conv3x3(width, width, stride, groups, dilation) + self.bn2 = norm_layer(width) + self.conv3 = conv1x1(width, planes * self.expansion) + self.bn3 = norm_layer(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class ResNet(BaseModel): + def __init__(self, block, layers, num_classes=10, + zero_init_residual=False, groups=1, width_per_group=64, replace_stride_with_dilation=None, + norm_layer=None): + last_dim = 512 * block.expansion + super(ResNet, self).__init__(last_dim, num_classes) + if norm_layer is None: + norm_layer = nn.BatchNorm2d + self._norm_layer = norm_layer + + self.inplanes = 64 + self.dilation = 1 + if replace_stride_with_dilation is None: + # each element in the tuple indicates if we should replace + # the 2x2 stride with a dilated convolution instead + replace_stride_with_dilation = [False, False, False] + if len(replace_stride_with_dilation) != 3: + raise ValueError("replace_stride_with_dilation should be None " + "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) + self.groups = groups + self.base_width = width_per_group + self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, + bias=False) + self.bn1 = norm_layer(self.inplanes) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, 64, layers[0]) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2, + dilate=replace_stride_with_dilation[0]) + self.layer3 = self._make_layer(block, 256, layers[2], stride=2, + dilate=replace_stride_with_dilation[1]) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2, + dilate=replace_stride_with_dilation[2]) + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.normalize = NormalizeLayer() + self.last_dim = 512 * block.expansion + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + # Zero-initialize the last BN in each residual branch, + # so that the residual branch starts with zeros, and each residual block behaves like an identity. + # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 + if zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + nn.init.constant_(m.bn3.weight, 0) + elif isinstance(m, BasicBlock): + nn.init.constant_(m.bn2.weight, 0) + + def _make_layer(self, block, planes, blocks, stride=1, dilate=False): + norm_layer = self._norm_layer + downsample = None + previous_dilation = self.dilation + if dilate: + self.dilation *= stride + stride = 1 + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv1x1(self.inplanes, planes * block.expansion, stride), + norm_layer(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample, self.groups, + self.base_width, previous_dilation, norm_layer)) + self.inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes, groups=self.groups, + base_width=self.base_width, dilation=self.dilation, + norm_layer=norm_layer)) + + return nn.Sequential(*layers) + + def penultimate(self, x, all_features=False): + # See note [TorchScript super()] + out_list = [] + + x = self.normalize(x) + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + out_list.append(x) + + x = self.layer1(x) + out_list.append(x) + x = self.layer2(x) + out_list.append(x) + x = self.layer3(x) + out_list.append(x) + x = self.layer4(x) + out_list.append(x) + + x = self.avgpool(x) + x = torch.flatten(x, 1) + + if all_features: + return x, out_list + else: + return x + + +def _resnet(arch, block, layers, **kwargs): + model = ResNet(block, layers, **kwargs) + return model + + +def resnet18(**kwargs): + r"""ResNet-18 model from + `"Deep Residual Learning for Image Recognition" `_ + """ + return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], **kwargs) + + +def resnet50(**kwargs): + r"""ResNet-50 model from + `"Deep Residual Learning for Image Recognition" `_ + """ + return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], **kwargs) diff --git a/models/transform_layers.py b/models/transform_layers.py new file mode 100644 index 0000000..0920852 --- /dev/null +++ b/models/transform_layers.py @@ -0,0 +1,643 @@ +import math +import numbers +import numpy as np + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Function +from torchvision import transforms + +if torch.__version__ >= '1.4.0': + kwargs = {'align_corners': False} +else: + kwargs = {} + + +def rgb2hsv(rgb): + """Convert a 4-d RGB tensor to the HSV counterpart. + + Here, we compute hue using atan2() based on the definition in [1], + instead of using the common lookup table approach as in [2, 3]. + Those values agree when the angle is a multiple of 30°, + otherwise they may differ at most ~1.2°. + + References + [1] https://en.wikipedia.org/wiki/Hue + [2] https://www.rapidtables.com/convert/color/rgb-to-hsv.html + [3] https://github.com/scikit-image/scikit-image/blob/master/skimage/color/colorconv.py#L212 + """ + + r, g, b = rgb[:, 0, :, :], rgb[:, 1, :, :], rgb[:, 2, :, :] + + Cmax = rgb.max(1)[0] + Cmin = rgb.min(1)[0] + delta = Cmax - Cmin + + hue = torch.atan2(math.sqrt(3) * (g - b), 2 * r - g - b) + hue = (hue % (2 * math.pi)) / (2 * math.pi) + saturate = delta / Cmax + value = Cmax + hsv = torch.stack([hue, saturate, value], dim=1) + hsv[~torch.isfinite(hsv)] = 0. + return hsv + + +def hsv2rgb(hsv): + """Convert a 4-d HSV tensor to the RGB counterpart. + + >>> %timeit hsv2rgb(hsv) + 2.37 ms ± 13.4 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) + >>> %timeit rgb2hsv_fast(rgb) + 298 µs ± 542 ns per loop (mean ± std. dev. of 7 runs, 1000 loops each) + >>> torch.allclose(hsv2rgb(hsv), hsv2rgb_fast(hsv), atol=1e-6) + True + + References + [1] https://en.wikipedia.org/wiki/HSL_and_HSV#HSV_to_RGB_alternative + """ + h, s, v = hsv[:, [0]], hsv[:, [1]], hsv[:, [2]] + c = v * s + + n = hsv.new_tensor([5, 3, 1]).view(3, 1, 1) + k = (n + h * 6) % 6 + t = torch.min(k, 4 - k) + t = torch.clamp(t, 0, 1) + + return v - c * t + + +class RandomResizedCropLayer(nn.Module): + def __init__(self, size=None, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.)): + ''' + Inception Crop + size (tuple): size of fowarding image (C, W, H) + scale (tuple): range of size of the origin size cropped + ratio (tuple): range of aspect ratio of the origin aspect ratio cropped + ''' + super(RandomResizedCropLayer, self).__init__() + + _eye = torch.eye(2, 3) + self.size = size + self.register_buffer('_eye', _eye) + self.scale = scale + self.ratio = ratio + + def forward(self, inputs, whbias=None): + _device = inputs.device + N = inputs.size(0) + _theta = self._eye.repeat(N, 1, 1) + + if whbias is None: + whbias = self._sample_latent(inputs) + + _theta[:, 0, 0] = whbias[:, 0] + _theta[:, 1, 1] = whbias[:, 1] + _theta[:, 0, 2] = whbias[:, 2] + _theta[:, 1, 2] = whbias[:, 3] + + grid = F.affine_grid(_theta, inputs.size(), **kwargs).to(_device) + output = F.grid_sample(inputs, grid, padding_mode='reflection', **kwargs) + if self.size is not None: + output = F.adaptive_avg_pool2d(output, self.size) + # output = F.adaptive_avg_pool2d(output, self.size) + # output = F.adaptive_avg_pool2d(output, (self.size[0], self.size[1])) + + + return output + + def _clamp(self, whbias): + + w = whbias[:, 0] + h = whbias[:, 1] + w_bias = whbias[:, 2] + h_bias = whbias[:, 3] + + # Clamp with scale + w = torch.clamp(w, *self.scale) + h = torch.clamp(h, *self.scale) + + # Clamp with ratio + w = self.ratio[0] * h + torch.relu(w - self.ratio[0] * h) + w = self.ratio[1] * h - torch.relu(self.ratio[1] * h - w) + + # Clamp with bias range: w_bias \in (w - 1, 1 - w), h_bias \in (h - 1, 1 - h) + w_bias = w - 1 + torch.relu(w_bias - w + 1) + w_bias = 1 - w - torch.relu(1 - w - w_bias) + + h_bias = h - 1 + torch.relu(h_bias - h + 1) + h_bias = 1 - h - torch.relu(1 - h - h_bias) + + whbias = torch.stack([w, h, w_bias, h_bias], dim=0).t() + + return whbias + + def _sample_latent(self, inputs): + + _device = inputs.device + N, _, width, height = inputs.shape + + # N * 10 trial + area = width * height + target_area = np.random.uniform(*self.scale, N * 10) * area + log_ratio = (math.log(self.ratio[0]), math.log(self.ratio[1])) + aspect_ratio = np.exp(np.random.uniform(*log_ratio, N * 10)) + + # If doesn't satisfy ratio condition, then do central crop + w = np.round(np.sqrt(target_area * aspect_ratio)) + h = np.round(np.sqrt(target_area / aspect_ratio)) + cond = (0 < w) * (w <= width) * (0 < h) * (h <= height) + w = w[cond] + h = h[cond] + cond_len = w.shape[0] + if cond_len >= N: + w = w[:N] + h = h[:N] + else: + w = np.concatenate([w, np.ones(N - cond_len) * width]) + h = np.concatenate([h, np.ones(N - cond_len) * height]) + + w_bias = np.random.randint(w - width, width - w + 1) / width + h_bias = np.random.randint(h - height, height - h + 1) / height + w = w / width + h = h / height + + whbias = np.column_stack([w, h, w_bias, h_bias]) + whbias = torch.tensor(whbias, device=_device) + + return whbias + + +class HorizontalFlipRandomCrop(nn.Module): + def __init__(self, max_range): + super(HorizontalFlipRandomCrop, self).__init__() + self.max_range = max_range + _eye = torch.eye(2, 3) + self.register_buffer('_eye', _eye) + + def forward(self, input, sign=None, bias=None, rotation=None): + _device = input.device + N = input.size(0) + _theta = self._eye.repeat(N, 1, 1) + + if sign is None: + sign = torch.bernoulli(torch.ones(N, device=_device) * 0.5) * 2 - 1 + if bias is None: + bias = torch.empty((N, 2), device=_device).uniform_(-self.max_range, self.max_range) + _theta[:, 0, 0] = sign + _theta[:, :, 2] = bias + + if rotation is not None: + _theta[:, 0:2, 0:2] = rotation + + grid = F.affine_grid(_theta, input.size(), **kwargs).to(_device) + output = F.grid_sample(input, grid, padding_mode='reflection', **kwargs) + + return output + + def _sample_latent(self, N, device=None): + sign = torch.bernoulli(torch.ones(N, device=device) * 0.5) * 2 - 1 + bias = torch.empty((N, 2), device=device).uniform_(-self.max_range, self.max_range) + return sign, bias + + +class Rotation(nn.Module): + def __init__(self, max_range = 4): + super(Rotation, self).__init__() + self.max_range = max_range + self.prob = 0.5 + + def forward(self, input, aug_index=None): + _device = input.device + + _, _, H, W = input.size() + + if aug_index is None: + aug_index = np.random.randint(4) + + output = torch.rot90(input, aug_index, (2, 3)) + + _prob = input.new_full((input.size(0),), self.prob) + _mask = torch.bernoulli(_prob).view(-1, 1, 1, 1) + output = _mask * input + (1-_mask) * output + + else: + aug_index = aug_index % self.max_range + output = torch.rot90(input, aug_index, (2, 3)) + + return output + + +class RandomAdjustSharpness(nn.Module): + def __init__(self, sharpness_factor=0.5, p=0.5): + super(RandomAdjustSharpness, self).__init__() + self.sharpness_factor = sharpness_factor + self.prob = p + + def forward(self, input, aug_index=None): + _device = input.device + + _, _, H, W = input.size() + if aug_index == 0: + output = input + else: + output = transforms.RandomAdjustSharpness(sharpness_factor=self.sharpness_factor, p=self.prob)(input) + + return output + + +class RandPers(nn.Module): + def __init__(self, distortion_scale=0.5, p=0.5): + super(RandPers, self).__init__() + self.distortion_scale = distortion_scale + self.prob = p + + def forward(self, input, aug_index=None): + _device = input.device + + _, _, H, W = input.size() + if aug_index == 0: + output = input + else: + output = transforms.RandomPerspective(distortion_scale=self.distortion_scale, p=self.prob)(input) + + return output + + +class GaussBlur(nn.Module): + def __init__(self, max_range = 4, kernel_size=3, sigma=(0.1, 2.0)): + super(GaussBlur, self).__init__() + self.max_range = max_range + self.prob = 0.5 + self.sigma = sigma + self.kernel_size = kernel_size + + def forward(self, input, aug_index=None): + _device = input.device + + _, _, H, W = input.size() + if aug_index is None: + aug_index = np.random.randint(4) + + output = transforms.GaussianBlur(kernel_size=13, sigma=abs(aug_index)+1)(input) + + _prob = input.new_full((input.size(0),), self.prob) + _mask = torch.bernoulli(_prob).view(-1, 1, 1, 1) + output = _mask * input + (1-_mask) * output + + else: + if aug_index == 0: + output = input + else: + output = transforms.GaussianBlur(kernel_size=self.kernel_size, sigma=self.sigma)(input) + + return output + +class GaussNoise(nn.Module): + def __init__(self, mean = 0, std = 1): + super(GaussNoise, self).__init__() + self.mean = mean + self.std = std + + def forward(self, input, aug_index=None): + _device = input.device + + _, _, H, W = input.size() + + if aug_index == 0: + output = input + else: + output = input + (torch.randn(input.size()) * self.std + self.mean).to(_device) + + return output + + +class BlurRandpers(nn.Module): + def __init__(self, max_range=2, kernel_size=3, sigma=(10, 20), distortion_scale=0.6, p=1): + super(BlurRandpers, self).__init__() + self.max_range = max_range + self.sigma = sigma + self.kernel_size = kernel_size + self.distortion_scale = distortion_scale + self.p = p + self.gauss = GaussBlur(kernel_size=self.kernel_size, sigma=self.sigma) + self.randpers = RandPers(distortion_scale=self.distortion_scale, p=self.p) + + def forward(self, input, aug_index=None): + output = self.gauss.forward(input=input, aug_index=aug_index) + output = self.randpers.forward(input=output, aug_index=aug_index) + + return output + + +class BlurSharpness(nn.Module): + def __init__(self, max_range=2, kernel_size=3, sigma=(10, 20), sharpness_factor=0.6, p=1): + super(BlurSharpness, self).__init__() + self.max_range = max_range + self.sigma = sigma + self.kernel_size = kernel_size + self.sharpness_factor = sharpness_factor + self.p = p + self.gauss = GaussBlur(kernel_size=self.kernel_size, sigma=self.sigma) + self.sharp = RandomAdjustSharpness(sharpness_factor=self.sharpness_factor, p=self.p) + + def forward(self, input, aug_index=None): + output = self.gauss.forward(input=input, aug_index=aug_index) + output = self.sharp.forward(input=output, aug_index=aug_index) + + return output + + +class RandpersSharpness(nn.Module): + def __init__(self, max_range=2, distortion_scale=0.6, p=1, sharpness_factor=0.6): + super(RandpersSharpness, self).__init__() + self.max_range = max_range + self.distortion_scale = distortion_scale + self.p = p + self.sharpness_factor = sharpness_factor + self.randpers = RandPers(distortion_scale=self.distortion_scale, p=self.p) + self.sharp = RandomAdjustSharpness(sharpness_factor=self.sharpness_factor, p=self.p) + + def forward(self, input, aug_index=None): + output = self.randpers.forward(input=input, aug_index=aug_index) + output = self.sharp.forward(input=output, aug_index=aug_index) + + return output + + +class BlurRandpersSharpness(nn.Module): + def __init__(self, max_range=2, kernel_size=3, sigma=(10, 20), distortion_scale=0.6, p=1, sharpness_factor=0.6): + super(BlurRandpersSharpness, self).__init__() + self.max_range = max_range + self.sigma = sigma + self.kernel_size = kernel_size + self.distortion_scale = distortion_scale + self.p = p + self.sharpness_factor = sharpness_factor + self.gauss = GaussBlur(kernel_size=self.kernel_size, sigma=self.sigma) + self.randpers = RandPers(distortion_scale=self.distortion_scale, p=self.p) + self.sharp = RandomAdjustSharpness(sharpness_factor=self.sharpness_factor, p=self.p) + + def forward(self, input, aug_index=None): + output = self.gauss.forward(input=input, aug_index=aug_index) + output = self.randpers.forward(input=output, aug_index=aug_index) + output = self.sharp.forward(input=output, aug_index=aug_index) + + return output + + +class FourCrop(nn.Module): + def __init__(self, max_range = 4): + super(FourCrop, self).__init__() + self.max_range = max_range + self.prob = 0.5 + + def forward(self, inputs): + outputs = inputs + for i in range(8): + outputs[i] = self._crop(inputs.size(), inputs[i], i) + + return outputs + + def _crop(self, size, input, i): + _, _, H, W = size + h_mid = int(H / 2) + w_mid = int(W / 2) + + if i == 0 or i == 4: + corner = input[:, 0:h_mid, 0:w_mid] + elif i == 1 or i == 5: + corner = input[:, 0:h_mid, w_mid:] + elif i == 2 or i == 6: + corner = input[:, h_mid:, 0:w_mid] + elif i == 3 or i == 7: + corner = input[:, h_mid:, w_mid:] + else: + corner = input + corner = transforms.Resize(size=2*h_mid)(corner) + + return corner + + +class CutPerm(nn.Module): + def __init__(self, max_range = 4): + super(CutPerm, self).__init__() + self.max_range = max_range + self.prob = 0.5 + + def forward(self, input, aug_index=None): + _device = input.device + + _, _, H, W = input.size() + + if aug_index is None: + aug_index = np.random.randint(4) + + output = self._cutperm(input, aug_index) + + _prob = input.new_full((input.size(0),), self.prob) + _mask = torch.bernoulli(_prob).view(-1, 1, 1, 1) + output = _mask * input + (1 - _mask) * output + + else: + aug_index = aug_index % self.max_range + output = self._cutperm(input, aug_index) + + return output + + def _cutperm(self, inputs, aug_index): + + _, _, H, W = inputs.size() + h_mid = int(H / 2) + w_mid = int(W / 2) + + jigsaw_h = aug_index // 2 + jigsaw_v = aug_index % 2 + + if jigsaw_h == 1: + inputs = torch.cat((inputs[:, :, h_mid:, :], inputs[:, :, 0:h_mid, :]), dim=2) + if jigsaw_v == 1: + inputs = torch.cat((inputs[:, :, :, w_mid:], inputs[:, :, :, 0:w_mid]), dim=3) + + return inputs + + +def assemble(a, b, c, d): + ab = torch.cat((a, b), dim=2) + cd = torch.cat((c, d), dim=2) + output = torch.cat((ab, cd), dim=3) + + return output + + +def quarter(inputs): + _, _, H, W = inputs.size() + h_mid = int(H / 2) + w_mid = int(W / 2) + quarters = [] + quarters.append(inputs[:, :, 0:h_mid, 0:w_mid]) + quarters.append(inputs[:, :, 0:h_mid, w_mid:]) + quarters.append(inputs[:, :, h_mid:, 0:w_mid]) + quarters.append(inputs[:, :, h_mid:, w_mid:]) + + return quarters + + +class HorizontalFlipLayer(nn.Module): + def __init__(self): + """ + img_size : (int, int, int) + Height and width must be powers of 2. E.g. (32, 32, 1) or + (64, 128, 3). Last number indicates number of channels, e.g. 1 for + grayscale or 3 for RGB + """ + super(HorizontalFlipLayer, self).__init__() + + _eye = torch.eye(2, 3) + self.register_buffer('_eye', _eye) + + def forward(self, inputs): + _device = inputs.device + + N = inputs.size(0) + _theta = self._eye.repeat(N, 1, 1) + r_sign = torch.bernoulli(torch.ones(N, device=_device) * 0.5) * 2 - 1 + _theta[:, 0, 0] = r_sign + grid = F.affine_grid(_theta, inputs.size(), **kwargs).to(_device) + inputs = F.grid_sample(inputs, grid, padding_mode='reflection', **kwargs) + + return inputs + + +class RandomColorGrayLayer(nn.Module): + def __init__(self, p): + super(RandomColorGrayLayer, self).__init__() + self.prob = p + + _weight = torch.tensor([[0.299, 0.587, 0.114]]) + self.register_buffer('_weight', _weight.view(1, 3, 1, 1)) + + def forward(self, inputs, aug_index=None): + + if aug_index == 0: + return inputs + + l = F.conv2d(inputs, self._weight) + gray = torch.cat([l, l, l], dim=1) + + if aug_index is None: + _prob = inputs.new_full((inputs.size(0),), self.prob) + _mask = torch.bernoulli(_prob).view(-1, 1, 1, 1) + + gray = inputs * (1 - _mask) + gray * _mask + + return gray + + +class ColorJitterLayer(nn.Module): + def __init__(self, p, brightness, contrast, saturation, hue): + super(ColorJitterLayer, self).__init__() + self.prob = p + self.brightness = self._check_input(brightness, 'brightness') + self.contrast = self._check_input(contrast, 'contrast') + self.saturation = self._check_input(saturation, 'saturation') + self.hue = self._check_input(hue, 'hue', center=0, bound=(-0.5, 0.5), + clip_first_on_zero=False) + + def _check_input(self, value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True): + if isinstance(value, numbers.Number): + if value < 0: + raise ValueError("If {} is a single number, it must be non negative.".format(name)) + value = [center - value, center + value] + if clip_first_on_zero: + value[0] = max(value[0], 0) + elif isinstance(value, (tuple, list)) and len(value) == 2: + if not bound[0] <= value[0] <= value[1] <= bound[1]: + raise ValueError("{} values should be between {}".format(name, bound)) + else: + raise TypeError("{} should be a single number or a list/tuple with lenght 2.".format(name)) + + # if value is 0 or (1., 1.) for brightness/contrast/saturation + # or (0., 0.) for hue, do nothing + if value[0] == value[1] == center: + value = None + return value + + def adjust_contrast(self, x): + if self.contrast: + factor = x.new_empty(x.size(0), 1, 1, 1).uniform_(*self.contrast) + means = torch.mean(x, dim=[2, 3], keepdim=True) + x = (x - means) * factor + means + return torch.clamp(x, 0, 1) + + def adjust_hsv(self, x): + f_h = x.new_zeros(x.size(0), 1, 1) + f_s = x.new_ones(x.size(0), 1, 1) + f_v = x.new_ones(x.size(0), 1, 1) + + if self.hue: + f_h.uniform_(*self.hue) + if self.saturation: + f_s = f_s.uniform_(*self.saturation) + if self.brightness: + f_v = f_v.uniform_(*self.brightness) + + return RandomHSVFunction.apply(x, f_h, f_s, f_v) + + def transform(self, inputs): + # Shuffle transform + if np.random.rand() > 0.5: + transforms = [self.adjust_contrast, self.adjust_hsv] + else: + transforms = [self.adjust_hsv, self.adjust_contrast] + + for t in transforms: + inputs = t(inputs) + + return inputs + + def forward(self, inputs): + _prob = inputs.new_full((inputs.size(0),), self.prob) + _mask = torch.bernoulli(_prob).view(-1, 1, 1, 1) + return inputs * (1 - _mask) + self.transform(inputs) * _mask + + +class RandomHSVFunction(Function): + @staticmethod + def forward(ctx, x, f_h, f_s, f_v): + # ctx is a context object that can be used to stash information + # for backward computation + x = rgb2hsv(x) + h = x[:, 0, :, :] + h += (f_h * 255. / 360.) + h = (h % 1) + x[:, 0, :, :] = h + x[:, 1, :, :] = x[:, 1, :, :] * f_s + x[:, 2, :, :] = x[:, 2, :, :] * f_v + x = torch.clamp(x, 0, 1) + x = hsv2rgb(x) + return x + + @staticmethod + def backward(ctx, grad_output): + # We return as many input gradients as there were arguments. + # Gradients of non-Tensor arguments to forward must be None. + grad_input = None + if ctx.needs_input_grad[0]: + grad_input = grad_output.clone() + return grad_input, None, None, None + + +class NormalizeLayer(nn.Module): + """ + In order to certify radii in original coordinates rather than standardized coordinates, we + add the Gaussian noise _before_ standardizing, which is why we have standardization be the first + layer of the classifier rather than as a part of preprocessing as is typical. + """ + + def __init__(self): + super(NormalizeLayer, self).__init__() + + def forward(self, inputs): + return (inputs - 0.5) / 0.5 + diff --git a/train.ipynb b/train.ipynb new file mode 100644 index 0000000..5f16fe9 --- /dev/null +++ b/train.ipynb @@ -0,0 +1,1799 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "c812e9f6", + "metadata": {}, + "outputs": [], + "source": [ + "#!pip3 install --upgrade pip setuptools wheel" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "3c2f5cb0", + "metadata": {}, + "outputs": [], + "source": [ + "!chmod +x eval.py" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9808149e", + "metadata": {}, + "outputs": [], + "source": [ + "#setup\n", + "!git clone https://github.com/NVIDIA/apex\n", + "!cp /home/feoktistovar67431/git/apex/setup.py .\n", + "!pip3 install -v --disable-pip-version-check --no-cache-dir ./\n", + "!pip install git+https://github.com/ildoonet/pytorch-gradual-warmup-lr.git\n", + "!python3 -m pip install torch torchvision scikit-learn tensorboard diffdist==0.1 tensorboardX torchlars==0.1.2 apex" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bf0756e3", + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "\n", + "print(f\"Is CUDA supported by this system? ->{torch.cuda.is_available()}\")\n", + "print(f\"CUDA version: {torch.version.cuda}\")\n", + "cuda_id = torch.cuda.current_device()\n", + "print(f\"ID of current CUDA device: {torch.cuda.current_device()}\")\n", + "print(f\"Number of available devices: {torch.cuda.device_count()}\\n\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5f7ff35c", + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "#TEST ONLY\n", + "#!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 '/home/feoktistovar67431/CSI/CSI/train.py' --dataset 'cifar10' --model 'resnet18' --mode simclr_CSI --shift_trans_type rotation --epochs 10 --batch_size 32 --optimizer sgd --one_class_idx 9" + ] + }, + { + "cell_type": "markdown", + "id": "e3f0081b", + "metadata": {}, + "source": [ + "# Combined shiftings" + ] + }, + { + "cell_type": "code", + "execution_count": 222, + "id": "26921f38", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/home/feoktistovar67431/.local/lib/python3.6/site-packages/torch/distributed/launch.py:186: FutureWarning: The module torch.distributed.launch is deprecated\n", + "and will be removed in future. Use torchrun.\n", + "Note that --use_env is set by default in torchrun.\n", + "If your script expects `--local_rank` argument to be set, please\n", + "change it to read from `os.environ['LOCAL_RANK']` instead. See \n", + "https://pytorch.org/docs/stable/distributed.html#launch-utility for \n", + "further instructions\n", + "\n", + " FutureWarning,\n", + "WARNING:torch.distributed.run:\n", + "*****************************************\n", + "Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. \n", + "*****************************************\n", + "Warning: using Python fallback for SyncBatchNorm, possibly because apex was installed without --cuda_ext. The exception raised when attempting to import the cuda backend was: No module named 'syncbn'\n", + "Warning: using Python fallback for SyncBatchNorm, possibly because apex was installed without --cuda_ext. The exception raised when attempting to import the cuda backend was: No module named 'syncbn'\n", + "Warning: apex was installed without --cpp_ext. Falling back to Python flatten and unflatten.\n", + "Warning: apex was installed without --cpp_ext. Falling back to Python flatten and unflatten.\n", + "[2022-04-27 21:19:03.912343] Namespace(K_shift=4, batch_size=8, blur_sigma=40.0, color_distort=0.5, dataset='CNMC', distortion_scale=0.8, epochs=10, error_step=5, image_size=(300, 300, 3), load_path=None, local_rank=0, lr_init=0.1, lr_scheduler='cosine', mode='simclr_CSI', model='resnet18_imagenet', multi_gpu=True, n_classes=2, n_gpus=2, n_superclasses=2, no_strict=False, noise_mean=0, noise_std=0.3, one_class_idx=1, ood_batch_size=100, ood_dataset=[0], ood_layer='simclr', ood_samples=1, ood_score=['norm_mean'], optimizer='sgd', print_score=False, proc_step=None, res='450px', resize_factor=0.08, resize_fix=False, resume_path=None, save_score=False, save_step=10, sharpness_factor=2, shift_trans=BlurRandpers(\n", + " (gauss): GaussBlur()\n", + " (randpers): RandPers()\n", + "), shift_trans_type='blur_randpers', sim_lambda=1.0, simclr_dim=128, suffix=None, temperature=0.5, test_batch_size=100, warmup=10, weight_decay=1e-06)\n", + "[2022-04-27 21:19:03.912780] DistributedDataParallel(\n", + " (module): ResNet(\n", + " (linear): Linear(in_features=512, out_features=2, bias=True)\n", + " (simclr_layer): Sequential(\n", + " (0): Linear(in_features=512, out_features=512, bias=True)\n", + " (1): ReLU()\n", + " (2): Linear(in_features=512, out_features=128, bias=True)\n", + " )\n", + " (shift_cls_layer): Linear(in_features=512, out_features=4, bias=True)\n", + " (joint_distribution_layer): Linear(in_features=512, out_features=8, bias=True)\n", + " (conv1): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)\n", + " (bn1): SyncBatchNorm(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)\n", + " (layer1): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): SyncBatchNorm(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): SyncBatchNorm(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): SyncBatchNorm(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): SyncBatchNorm(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (layer2): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (bn1): SyncBatchNorm(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): SyncBatchNorm(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (downsample): Sequential(\n", + " (0): Conv2d(64, 128, kernel_size=(1, 1), stride=(2, 2), bias=False)\n", + " (1): SyncBatchNorm(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): SyncBatchNorm(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): SyncBatchNorm(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (layer3): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (bn1): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (downsample): Sequential(\n", + " (0): Conv2d(128, 256, kernel_size=(1, 1), stride=(2, 2), bias=False)\n", + " (1): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (layer4): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (bn1): SyncBatchNorm(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): SyncBatchNorm(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (downsample): Sequential(\n", + " (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)\n", + " (1): SyncBatchNorm(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): SyncBatchNorm(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): SyncBatchNorm(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (avgpool): AdaptiveAvgPool2d(output_size=(1, 1))\n", + " (normalize): NormalizeLayer()\n", + " )\n", + ")\n", + "Epoch 1 (logs/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_randpers_resize_factor0.08_color_dist0.5_one_class_1)\n", + "/home/feoktistovar67431/.local/lib/python3.6/site-packages/torch/optim/lr_scheduler.py:154: UserWarning: The epoch parameter in `scheduler.step()` was not necessary and is being deprecated where possible. Please use `scheduler.step()` to step the scheduler. During the deprecation, if epoch is different from None, the closed form is used instead of the new chainable form, where available. Please open an issue if you are unable to replicate your use case: https://github.com/pytorch/pytorch/issues/new/choose.\n", + " warnings.warn(EPOCH_DEPRECATION_WARNING, UserWarning)\n", + "/home/feoktistovar67431/.local/lib/python3.6/site-packages/torch/optim/lr_scheduler.py:154: UserWarning: The epoch parameter in `scheduler.step()` was not necessary and is being deprecated where possible. Please use `scheduler.step()` to step the scheduler. During the deprecation, if epoch is different from None, the closed form is used instead of the new chainable form, where available. Please open an issue if you are unable to replicate your use case: https://github.com/pytorch/pytorch/issues/new/choose.\n", + " warnings.warn(EPOCH_DEPRECATION_WARNING, UserWarning)\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2022-04-27 21:19:06.681133] [Epoch 1; 0] [Time 1.753] [Data 0.128] [LR 0.10000]\n", + "[LossC 0.000000] [LossSim 4.795710] [LossShift 1.446792]\n", + "[2022-04-27 21:19:26.588634] [Epoch 1; 50] [Time 0.435] [Data 0.827] [LR 0.11004]\n", + "[LossC 0.000000] [LossSim 4.458384] [LossShift 1.450558]\n", + "[2022-04-27 21:19:47.065503] [Epoch 1; 100] [Time 0.441] [Data 0.818] [LR 0.12009]\n", + "[LossC 0.000000] [LossSim 4.495318] [LossShift 0.887940]\n", + "[2022-04-27 21:20:08.001796] [Epoch 1; 150] [Time 0.451] [Data 0.826] [LR 0.13013]\n", + "[LossC 0.000000] [LossSim 4.466498] [LossShift 1.651758]\n", + "[2022-04-27 21:20:29.557696] [Epoch 1; 200] [Time 0.463] [Data 0.859] [LR 0.14018]\n", + "[LossC 0.000000] [LossSim 4.488340] [LossShift 0.890679]\n", + "[2022-04-27 21:20:51.522911] [Epoch 1; 250] [Time 0.465] [Data 0.987] [LR 0.15022]\n", + "[LossC 0.000000] [LossSim 4.457443] [LossShift 1.463503]\n", + "[2022-04-27 21:21:13.774301] [Epoch 1; 300] [Time 0.481] [Data 0.873] [LR 0.16027]\n", + "[LossC 0.000000] [LossSim 4.408203] [LossShift 0.978724]\n", + "[2022-04-27 21:21:36.139558] [Epoch 1; 350] [Time 0.463] [Data 0.896] [LR 0.17031]\n", + "[LossC 0.000000] [LossSim 4.406531] [LossShift 0.853714]\n", + "[2022-04-27 21:21:58.598135] [Epoch 1; 400] [Time 0.469] [Data 0.870] [LR 0.18036]\n", + "[LossC 0.000000] [LossSim 4.494049] [LossShift 0.970959]\n", + "[2022-04-27 21:22:19.114742] [DONE] [Time 0.471] [Data 0.868] [LossC 0.000000] [LossSim 4.517576] [LossShift 1.226323]\n", + "Epoch 2 (logs/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_randpers_resize_factor0.08_color_dist0.5_one_class_1)\n", + "[2022-04-27 21:22:20.199138] [Epoch 2; 0] [Time 0.502] [Data 0.158] [LR 0.19000]\n", + "[LossC 0.000000] [LossSim 4.359697] [LossShift 0.896302]\n", + "[2022-04-27 21:22:42.722677] [Epoch 2; 50] [Time 0.452] [Data 0.869] [LR 0.20004]\n", + "[LossC 0.000000] [LossSim 4.424041] [LossShift 0.848778]\n", + "[2022-04-27 21:23:05.591518] [Epoch 2; 100] [Time 0.452] [Data 0.867] [LR 0.21009]\n", + "[LossC 0.000000] [LossSim 4.309733] [LossShift 0.864205]\n", + "[2022-04-27 21:23:28.092864] [Epoch 2; 150] [Time 0.471] [Data 0.871] [LR 0.22013]\n", + "[LossC 0.000000] [LossSim 4.339020] [LossShift 0.861768]\n", + "[2022-04-27 21:23:51.151448] [Epoch 2; 200] [Time 0.471] [Data 0.982] [LR 0.23018]\n", + "[LossC 0.000000] [LossSim 4.398156] [LossShift 0.844045]\n", + "[2022-04-27 21:24:13.759556] [Epoch 2; 250] [Time 0.474] [Data 0.873] [LR 0.24022]\n", + "[LossC 0.000000] [LossSim 4.331997] [LossShift 0.895239]\n", + "[2022-04-27 21:24:36.498251] [Epoch 2; 300] [Time 0.557] [Data 0.844] [LR 0.25027]\n", + "[LossC 0.000000] [LossSim 4.314375] [LossShift 0.844688]\n", + "[2022-04-27 21:24:59.086448] [Epoch 2; 350] [Time 0.448] [Data 0.855] [LR 0.26031]\n", + "[LossC 0.000000] [LossSim 4.494950] [LossShift 0.842451]\n", + "[2022-04-27 21:25:22.358179] [Epoch 2; 400] [Time 0.509] [Data 0.884] [LR 0.27036]\n", + "[LossC 0.000000] [LossSim 4.366556] [LossShift 0.884501]\n", + "[2022-04-27 21:25:43.075378] [DONE] [Time 0.487] [Data 0.907] [LossC 0.000000] [LossSim 4.395404] [LossShift 0.913691]\n", + "Epoch 3 (logs/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_randpers_resize_factor0.08_color_dist0.5_one_class_1)\n", + "[2022-04-27 21:25:44.090938] [Epoch 3; 0] [Time 0.461] [Data 0.134] [LR 0.28000]\n", + "[LossC 0.000000] [LossSim 4.363524] [LossShift 0.843010]\n", + "[2022-04-27 21:26:06.906782] [Epoch 3; 50] [Time 0.489] [Data 0.855] [LR 0.29004]\n", + "[LossC 0.000000] [LossSim 4.475645] [LossShift 1.142160]\n", + "[2022-04-27 21:26:30.509720] [Epoch 3; 100] [Time 0.454] [Data 0.893] [LR 0.30009]\n", + "[LossC 0.000000] [LossSim 4.336016] [LossShift 0.952089]\n", + "[2022-04-27 21:26:53.002780] [Epoch 3; 150] [Time 0.477] [Data 0.860] [LR 0.31013]\n", + "[LossC 0.000000] [LossSim 4.475717] [LossShift 0.875115]\n", + "[2022-04-27 21:27:15.597338] [Epoch 3; 200] [Time 0.471] [Data 0.857] [LR 0.32018]\n", + "[LossC 0.000000] [LossSim 4.349196] [LossShift 0.872518]\n", + "[2022-04-27 21:27:38.345896] [Epoch 3; 250] [Time 0.463] [Data 0.877] [LR 0.33022]\n", + "[LossC 0.000000] [LossSim 4.353239] [LossShift 0.881434]\n", + "[2022-04-27 21:28:01.311768] [Epoch 3; 300] [Time 0.476] [Data 0.876] [LR 0.34027]\n", + "[LossC 0.000000] [LossSim 4.418363] [LossShift 0.876285]\n", + "[2022-04-27 21:28:24.109063] [Epoch 3; 350] [Time 0.529] [Data 0.860] [LR 0.35031]\n", + "[LossC 0.000000] [LossSim 4.391089] [LossShift 0.891998]\n", + "[2022-04-27 21:28:46.767573] [Epoch 3; 400] [Time 0.490] [Data 0.923] [LR 0.36036]\n", + "[LossC 0.000000] [LossSim 4.366334] [LossShift 0.961224]\n", + "[2022-04-27 21:29:07.659288] [DONE] [Time 0.485] [Data 0.909] [LossC 0.000000] [LossSim 4.379301] [LossShift 0.903935]\n", + "Epoch 4 (logs/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_randpers_resize_factor0.08_color_dist0.5_one_class_1)\n", + "[2022-04-27 21:29:08.649924] [Epoch 4; 0] [Time 0.441] [Data 0.154] [LR 0.37000]\n", + "[LossC 0.000000] [LossSim 4.468335] [LossShift 0.975977]\n", + "[2022-04-27 21:29:31.468727] [Epoch 4; 50] [Time 0.459] [Data 0.911] [LR 0.38004]\n", + "[LossC 0.000000] [LossSim 4.803634] [LossShift 2.258877]\n", + "[2022-04-27 21:29:53.609175] [Epoch 4; 100] [Time 0.471] [Data 0.855] [LR 0.39009]\n", + "[LossC 0.000000] [LossSim 4.457827] [LossShift 0.855588]\n", + "[2022-04-27 21:30:16.236645] [Epoch 4; 150] [Time 0.472] [Data 0.861] [LR 0.40013]\n", + "[LossC 0.000000] [LossSim 4.359911] [LossShift 0.869267]\n", + "[2022-04-27 21:30:38.965445] [Epoch 4; 200] [Time 0.457] [Data 0.922] [LR 0.41018]\n", + "[LossC 0.000000] [LossSim 4.300039] [LossShift 0.853143]\n", + "[2022-04-27 21:31:01.744464] [Epoch 4; 250] [Time 0.464] [Data 0.847] [LR 0.42022]\n", + "[LossC 0.000000] [LossSim 4.343868] [LossShift 0.904560]\n", + "[2022-04-27 21:31:24.138632] [Epoch 4; 300] [Time 0.468] [Data 0.929] [LR 0.43027]\n", + "[LossC 0.000000] [LossSim 4.440177] [LossShift 1.008291]\n", + "[2022-04-27 21:31:47.197617] [Epoch 4; 350] [Time 0.459] [Data 0.988] [LR 0.44031]\n", + "[LossC 0.000000] [LossSim 4.313808] [LossShift 0.843529]\n", + "[2022-04-27 21:32:10.020673] [Epoch 4; 400] [Time 0.464] [Data 0.915] [LR 0.45036]\n", + "[LossC 0.000000] [LossSim 4.347077] [LossShift 0.842586]\n", + "[2022-04-27 21:32:30.667648] [DONE] [Time 0.484] [Data 0.903] [LossC 0.000000] [LossSim 4.378773] [LossShift 0.932685]\n", + "Epoch 5 (logs/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_randpers_resize_factor0.08_color_dist0.5_one_class_1)\n", + "[2022-04-27 21:32:31.676676] [Epoch 5; 0] [Time 0.472] [Data 0.141] [LR 0.46000]\n", + "[LossC 0.000000] [LossSim 4.296750] [LossShift 0.850581]\n", + "[2022-04-27 21:32:54.231546] [Epoch 5; 50] [Time 0.531] [Data 0.852] [LR 0.47004]\n", + "[LossC 0.000000] [LossSim 4.324140] [LossShift 0.856480]\n", + "[2022-04-27 21:33:16.815921] [Epoch 5; 100] [Time 0.554] [Data 0.887] [LR 0.48009]\n", + "[LossC 0.000000] [LossSim 4.298337] [LossShift 0.911719]\n", + "[2022-04-27 21:33:39.742560] [Epoch 5; 150] [Time 0.513] [Data 0.938] [LR 0.49013]\n", + "[LossC 0.000000] [LossSim 4.311210] [LossShift 0.854077]\n", + "[2022-04-27 21:34:02.227222] [Epoch 5; 200] [Time 0.544] [Data 0.883] [LR 0.50018]\n", + "[LossC 0.000000] [LossSim 4.316729] [LossShift 0.873590]\n", + "[2022-04-27 21:34:25.029707] [Epoch 5; 250] [Time 0.595] [Data 0.907] [LR 0.51022]\n", + "[LossC 0.000000] [LossSim 4.332903] [LossShift 0.852887]\n", + "[2022-04-27 21:34:47.734705] [Epoch 5; 300] [Time 0.457] [Data 0.884] [LR 0.52027]\n", + "[LossC 0.000000] [LossSim 4.326703] [LossShift 0.827790]\n", + "[2022-04-27 21:35:10.065878] [Epoch 5; 350] [Time 0.480] [Data 0.848] [LR 0.53031]\n", + "[LossC 0.000000] [LossSim 4.629390] [LossShift 0.972859]\n", + "[2022-04-27 21:35:32.496680] [Epoch 5; 400] [Time 0.471] [Data 0.945] [LR 0.54036]\n", + "[LossC 0.000000] [LossSim 4.476654] [LossShift 0.924936]\n", + "[2022-04-27 21:35:53.353584] [DONE] [Time 0.484] [Data 0.901] [LossC 0.000000] [LossSim 4.361738] [LossShift 0.904301]\n", + "Epoch 6 (logs/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_randpers_resize_factor0.08_color_dist0.5_one_class_1)\n", + "[2022-04-27 21:35:54.394370] [Epoch 6; 0] [Time 0.459] [Data 0.168] [LR 0.55000]\n", + "[LossC 0.000000] [LossSim 4.356859] [LossShift 0.916392]\n", + "[2022-04-27 21:36:16.884891] [Epoch 6; 50] [Time 0.461] [Data 0.861] [LR 0.56004]\n", + "[LossC 0.000000] [LossSim 4.396854] [LossShift 0.942714]\n", + "[2022-04-27 21:36:39.738454] [Epoch 6; 100] [Time 0.460] [Data 0.898] [LR 0.57009]\n", + "[LossC 0.000000] [LossSim 4.463193] [LossShift 0.884684]\n", + "[2022-04-27 21:37:02.620539] [Epoch 6; 150] [Time 0.467] [Data 0.885] [LR 0.58013]\n", + "[LossC 0.000000] [LossSim 4.373494] [LossShift 0.972907]\n", + "[2022-04-27 21:37:26.181037] [Epoch 6; 200] [Time 0.469] [Data 0.986] [LR 0.59018]\n", + "[LossC 0.000000] [LossSim 4.492169] [LossShift 0.874383]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2022-04-27 21:37:48.941984] [Epoch 6; 250] [Time 0.455] [Data 0.864] [LR 0.60022]\n", + "[LossC 0.000000] [LossSim 4.365623] [LossShift 0.879145]\n", + "[2022-04-27 21:38:11.891998] [Epoch 6; 300] [Time 0.472] [Data 1.195] [LR 0.61027]\n", + "[LossC 0.000000] [LossSim 4.348284] [LossShift 1.021375]\n", + "[2022-04-27 21:38:34.705143] [Epoch 6; 350] [Time 0.536] [Data 0.864] [LR 0.62031]\n", + "[LossC 0.000000] [LossSim 4.290128] [LossShift 0.857135]\n", + "[2022-04-27 21:38:57.461264] [Epoch 6; 400] [Time 0.467] [Data 0.956] [LR 0.63036]\n", + "[LossC 0.000000] [LossSim 4.288968] [LossShift 0.835112]\n", + "[2022-04-27 21:39:18.226831] [DONE] [Time 0.491] [Data 0.911] [LossC 0.000000] [LossSim 4.369289] [LossShift 0.965370]\n", + "Epoch 7 (logs/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_randpers_resize_factor0.08_color_dist0.5_one_class_1)\n", + "[2022-04-27 21:39:19.197901] [Epoch 7; 0] [Time 0.448] [Data 0.145] [LR 0.64000]\n", + "[LossC 0.000000] [LossSim 4.337277] [LossShift 0.845977]\n", + "[2022-04-27 21:39:41.903147] [Epoch 7; 50] [Time 0.516] [Data 0.844] [LR 0.65004]\n", + "[LossC 0.000000] [LossSim 4.348597] [LossShift 0.887782]\n", + "[2022-04-27 21:40:04.761686] [Epoch 7; 100] [Time 0.462] [Data 0.904] [LR 0.66009]\n", + "[LossC 0.000000] [LossSim 4.288217] [LossShift 0.847829]\n", + "[2022-04-27 21:40:27.497629] [Epoch 7; 150] [Time 0.505] [Data 0.909] [LR 0.67013]\n", + "[LossC 0.000000] [LossSim 4.574395] [LossShift 0.856589]\n", + "[2022-04-27 21:40:50.169432] [Epoch 7; 200] [Time 0.503] [Data 0.874] [LR 0.68018]\n", + "[LossC 0.000000] [LossSim 4.347064] [LossShift 1.008280]\n", + "[2022-04-27 21:41:13.461267] [Epoch 7; 250] [Time 0.535] [Data 0.876] [LR 0.69022]\n", + "[LossC 0.000000] [LossSim 4.344507] [LossShift 0.942077]\n", + "[2022-04-27 21:41:36.295103] [Epoch 7; 300] [Time 0.481] [Data 0.856] [LR 0.70027]\n", + "[LossC 0.000000] [LossSim 4.309855] [LossShift 0.832647]\n", + "[2022-04-27 21:41:58.827571] [Epoch 7; 350] [Time 0.464] [Data 0.853] [LR 0.71031]\n", + "[LossC 0.000000] [LossSim 4.432234] [LossShift 1.124480]\n", + "[2022-04-27 21:42:21.525643] [Epoch 7; 400] [Time 0.462] [Data 0.971] [LR 0.72036]\n", + "[LossC 0.000000] [LossSim 4.344445] [LossShift 0.938462]\n", + "[2022-04-27 21:42:42.184827] [DONE] [Time 0.488] [Data 0.907] [LossC 0.000000] [LossSim 4.358003] [LossShift 0.918527]\n", + "Epoch 8 (logs/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_randpers_resize_factor0.08_color_dist0.5_one_class_1)\n", + "[2022-04-27 21:42:43.188401] [Epoch 8; 0] [Time 0.472] [Data 0.151] [LR 0.73000]\n", + "[LossC 0.000000] [LossSim 4.423952] [LossShift 0.940491]\n", + "[2022-04-27 21:43:05.626867] [Epoch 8; 50] [Time 0.609] [Data 0.911] [LR 0.74004]\n", + "[LossC 0.000000] [LossSim 4.442121] [LossShift 0.870375]\n", + "[2022-04-27 21:43:28.441870] [Epoch 8; 100] [Time 0.480] [Data 0.858] [LR 0.75009]\n", + "[LossC 0.000000] [LossSim 4.287797] [LossShift 0.879039]\n", + "[2022-04-27 21:43:51.203855] [Epoch 8; 150] [Time 0.464] [Data 1.064] [LR 0.76013]\n", + "[LossC 0.000000] [LossSim 4.277451] [LossShift 0.845034]\n", + "[2022-04-27 21:44:13.634754] [Epoch 8; 200] [Time 0.568] [Data 0.851] [LR 0.77018]\n", + "[LossC 0.000000] [LossSim 4.329644] [LossShift 0.961596]\n", + "[2022-04-27 21:44:36.887687] [Epoch 8; 250] [Time 0.723] [Data 0.942] [LR 0.78022]\n", + "[LossC 0.000000] [LossSim 4.317680] [LossShift 0.864846]\n", + "[2022-04-27 21:44:59.265520] [Epoch 8; 300] [Time 0.450] [Data 0.856] [LR 0.79027]\n", + "[LossC 0.000000] [LossSim 4.362687] [LossShift 0.917989]\n", + "[2022-04-27 21:45:22.337561] [Epoch 8; 350] [Time 0.480] [Data 0.891] [LR 0.80031]\n", + "[LossC 0.000000] [LossSim 4.263648] [LossShift 0.859828]\n", + "[2022-04-27 21:45:45.275990] [Epoch 8; 400] [Time 0.497] [Data 0.868] [LR 0.81036]\n", + "[LossC 0.000000] [LossSim 4.380607] [LossShift 0.836404]\n", + "[2022-04-27 21:46:06.499931] [DONE] [Time 0.488] [Data 0.908] [LossC 0.000000] [LossSim 4.348544] [LossShift 0.891716]\n", + "Epoch 9 (logs/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_randpers_resize_factor0.08_color_dist0.5_one_class_1)\n", + "[2022-04-27 21:46:07.537821] [Epoch 9; 0] [Time 0.464] [Data 0.159] [LR 0.82000]\n", + "[LossC 0.000000] [LossSim 4.373352] [LossShift 0.876816]\n", + "[2022-04-27 21:46:30.396968] [Epoch 9; 50] [Time 0.455] [Data 0.856] [LR 0.83004]\n", + "[LossC 0.000000] [LossSim 4.306937] [LossShift 0.909936]\n", + "[2022-04-27 21:46:53.286257] [Epoch 9; 100] [Time 0.451] [Data 0.855] [LR 0.84009]\n", + "[LossC 0.000000] [LossSim 4.355694] [LossShift 1.014931]\n", + "[2022-04-27 21:47:16.173773] [Epoch 9; 150] [Time 0.465] [Data 1.050] [LR 0.85013]\n", + "[LossC 0.000000] [LossSim 4.293055] [LossShift 0.837927]\n", + "[2022-04-27 21:47:38.465545] [Epoch 9; 200] [Time 0.465] [Data 0.872] [LR 0.86018]\n", + "[LossC 0.000000] [LossSim 4.365509] [LossShift 0.908220]\n", + "[2022-04-27 21:48:01.092709] [Epoch 9; 250] [Time 0.461] [Data 0.937] [LR 0.87022]\n", + "[LossC 0.000000] [LossSim 4.350402] [LossShift 0.842791]\n", + "[2022-04-27 21:48:24.019747] [Epoch 9; 300] [Time 0.472] [Data 0.906] [LR 0.88027]\n", + "[LossC 0.000000] [LossSim 4.499863] [LossShift 1.153011]\n", + "[2022-04-27 21:48:46.872260] [Epoch 9; 350] [Time 0.477] [Data 0.890] [LR 0.89031]\n", + "[LossC 0.000000] [LossSim 4.301045] [LossShift 0.840660]\n", + "[2022-04-27 21:49:09.507846] [Epoch 9; 400] [Time 0.447] [Data 0.851] [LR 0.90036]\n", + "[LossC 0.000000] [LossSim 4.358407] [LossShift 0.889107]\n", + "[2022-04-27 21:49:30.079116] [DONE] [Time 0.485] [Data 0.905] [LossC 0.000000] [LossSim 4.353526] [LossShift 0.893255]\n", + "Epoch 10 (logs/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_randpers_resize_factor0.08_color_dist0.5_one_class_1)\n", + "[2022-04-27 21:49:31.077872] [Epoch 10; 0] [Time 0.455] [Data 0.157] [LR 0.91000]\n", + "[LossC 0.000000] [LossSim 4.342908] [LossShift 0.914479]\n", + "[2022-04-27 21:49:53.899316] [Epoch 10; 50] [Time 0.466] [Data 0.991] [LR 0.92004]\n", + "[LossC 0.000000] [LossSim 4.321300] [LossShift 0.815638]\n", + "[2022-04-27 21:50:16.668189] [Epoch 10; 100] [Time 0.497] [Data 0.877] [LR 0.93009]\n", + "[LossC 0.000000] [LossSim 4.261489] [LossShift 0.859249]\n", + "[2022-04-27 21:50:39.620289] [Epoch 10; 150] [Time 0.585] [Data 0.871] [LR 0.94013]\n", + "[LossC 0.000000] [LossSim 4.288896] [LossShift 0.847932]\n", + "[2022-04-27 21:51:02.703581] [Epoch 10; 200] [Time 0.472] [Data 0.893] [LR 0.95018]\n", + "[LossC 0.000000] [LossSim 4.321000] [LossShift 0.911242]\n", + "[2022-04-27 21:51:25.530056] [Epoch 10; 250] [Time 0.460] [Data 0.888] [LR 0.96022]\n", + "[LossC 0.000000] [LossSim 4.281656] [LossShift 0.857911]\n", + "[2022-04-27 21:51:48.577854] [Epoch 10; 300] [Time 0.594] [Data 0.853] [LR 0.97027]\n", + "[LossC 0.000000] [LossSim 4.266364] [LossShift 0.833280]\n", + "[2022-04-27 21:52:11.521917] [Epoch 10; 350] [Time 0.470] [Data 0.921] [LR 0.98031]\n", + "[LossC 0.000000] [LossSim 4.421701] [LossShift 0.852391]\n", + "[2022-04-27 21:52:34.254971] [Epoch 10; 400] [Time 0.472] [Data 1.054] [LR 0.99036]\n", + "[LossC 0.000000] [LossSim 4.423033] [LossShift 0.933093]\n", + "[2022-04-27 21:52:55.124955] [DONE] [Time 0.491] [Data 0.912] [LossC 0.000000] [LossSim 4.332921] [LossShift 0.889218]\n" + ] + } + ], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur_randpers\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# blur_sigma : 40\n", + "# randpers : 0.8\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --blur_sigma 40 --distortion_scale 0.8 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur_randpers --epochs 10 --batch_size 8 --resize_factor 0.08 --optimizer sgd --one_class_idx 1 --res 450px" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3ec34e63", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/home/feoktistovar67431/.local/lib/python3.6/site-packages/torch/distributed/launch.py:186: FutureWarning: The module torch.distributed.launch is deprecated\n", + "and will be removed in future. Use torchrun.\n", + "Note that --use_env is set by default in torchrun.\n", + "If your script expects `--local_rank` argument to be set, please\n", + "change it to read from `os.environ['LOCAL_RANK']` instead. See \n", + "https://pytorch.org/docs/stable/distributed.html#launch-utility for \n", + "further instructions\n", + "\n", + " FutureWarning,\n", + "WARNING:torch.distributed.run:\n", + "*****************************************\n", + "Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed. \n", + "*****************************************\n", + "Warning: using Python fallback for SyncBatchNorm, possibly because apex was installed without --cuda_ext. The exception raised when attempting to import the cuda backend was: No module named 'syncbn'\n", + "Warning: apex was installed without --cpp_ext. Falling back to Python flatten and unflatten.\n", + "Warning: using Python fallback for SyncBatchNorm, possibly because apex was installed without --cuda_ext. The exception raised when attempting to import the cuda backend was: No module named 'syncbn'\n", + "Warning: apex was installed without --cpp_ext. Falling back to Python flatten and unflatten.\n", + "[2022-04-27 21:53:02.070110] Namespace(K_shift=4, batch_size=8, blur_sigma=40.0, color_distort=0.5, dataset='CNMC', distortion_scale=0.6, epochs=10, error_step=5, image_size=(300, 300, 3), load_path=None, local_rank=0, lr_init=0.1, lr_scheduler='cosine', mode='simclr_CSI', model='resnet18_imagenet', multi_gpu=True, n_classes=2, n_gpus=2, n_superclasses=2, no_strict=False, noise_mean=0, noise_std=0.3, one_class_idx=1, ood_batch_size=100, ood_dataset=[0], ood_layer='simclr', ood_samples=1, ood_score=['norm_mean'], optimizer='sgd', print_score=False, proc_step=None, res='450px', resize_factor=0.08, resize_fix=False, resume_path=None, save_score=False, save_step=10, sharpness_factor=128.0, shift_trans=BlurSharpness(\n", + " (gauss): GaussBlur()\n", + " (sharp): RandomAdjustSharpness()\n", + "), shift_trans_type='blur_sharp', sim_lambda=1.0, simclr_dim=128, suffix=None, temperature=0.5, test_batch_size=100, warmup=10, weight_decay=1e-06)\n", + "[2022-04-27 21:53:02.070601] DistributedDataParallel(\n", + " (module): ResNet(\n", + " (linear): Linear(in_features=512, out_features=2, bias=True)\n", + " (simclr_layer): Sequential(\n", + " (0): Linear(in_features=512, out_features=512, bias=True)\n", + " (1): ReLU()\n", + " (2): Linear(in_features=512, out_features=128, bias=True)\n", + " )\n", + " (shift_cls_layer): Linear(in_features=512, out_features=4, bias=True)\n", + " (joint_distribution_layer): Linear(in_features=512, out_features=8, bias=True)\n", + " (conv1): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)\n", + " (bn1): SyncBatchNorm(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)\n", + " (layer1): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): SyncBatchNorm(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): SyncBatchNorm(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): SyncBatchNorm(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): SyncBatchNorm(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (layer2): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (bn1): SyncBatchNorm(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): SyncBatchNorm(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (downsample): Sequential(\n", + " (0): Conv2d(64, 128, kernel_size=(1, 1), stride=(2, 2), bias=False)\n", + " (1): SyncBatchNorm(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): SyncBatchNorm(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): SyncBatchNorm(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (layer3): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (bn1): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (downsample): Sequential(\n", + " (0): Conv2d(128, 256, kernel_size=(1, 1), stride=(2, 2), bias=False)\n", + " (1): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): SyncBatchNorm(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (layer4): Sequential(\n", + " (0): BasicBlock(\n", + " (conv1): Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n", + " (bn1): SyncBatchNorm(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): SyncBatchNorm(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (downsample): Sequential(\n", + " (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)\n", + " (1): SyncBatchNorm(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (1): BasicBlock(\n", + " (conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn1): SyncBatchNorm(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " (relu): ReLU(inplace=True)\n", + " (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n", + " (bn2): SyncBatchNorm(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", + " )\n", + " )\n", + " (avgpool): AdaptiveAvgPool2d(output_size=(1, 1))\n", + " (normalize): NormalizeLayer()\n", + " )\n", + ")\n", + "Epoch 1 (logs/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_sharp_resize_factor0.08_color_dist0.5_one_class_1)\n", + "/home/feoktistovar67431/.local/lib/python3.6/site-packages/torch/optim/lr_scheduler.py:154: UserWarning: The epoch parameter in `scheduler.step()` was not necessary and is being deprecated where possible. Please use `scheduler.step()` to step the scheduler. During the deprecation, if epoch is different from None, the closed form is used instead of the new chainable form, where available. Please open an issue if you are unable to replicate your use case: https://github.com/pytorch/pytorch/issues/new/choose.\n", + " warnings.warn(EPOCH_DEPRECATION_WARNING, UserWarning)\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/home/feoktistovar67431/.local/lib/python3.6/site-packages/torch/optim/lr_scheduler.py:154: UserWarning: The epoch parameter in `scheduler.step()` was not necessary and is being deprecated where possible. Please use `scheduler.step()` to step the scheduler. During the deprecation, if epoch is different from None, the closed form is used instead of the new chainable form, where available. Please open an issue if you are unable to replicate your use case: https://github.com/pytorch/pytorch/issues/new/choose.\n", + " warnings.warn(EPOCH_DEPRECATION_WARNING, UserWarning)\n", + "[2022-04-27 21:53:04.749961] [Epoch 1; 0] [Time 1.525] [Data 0.149] [LR 0.10000]\n", + "[LossC 0.000000] [LossSim 4.858340] [LossShift 1.407876]\n", + "[2022-04-27 21:53:25.624987] [Epoch 1; 50] [Time 0.458] [Data 0.878] [LR 0.11004]\n", + "[LossC 0.000000] [LossSim 4.845747] [LossShift 1.667100]\n", + "[2022-04-27 21:53:47.668063] [Epoch 1; 100] [Time 0.474] [Data 0.893] [LR 0.12009]\n", + "[LossC 0.000000] [LossSim 4.844110] [LossShift 1.436306]\n", + "[2022-04-27 21:54:10.188214] [Epoch 1; 150] [Time 0.454] [Data 0.867] [LR 0.13013]\n", + "[LossC 0.000000] [LossSim 4.843646] [LossShift 1.547756]\n", + "[2022-04-27 21:54:33.381892] [Epoch 1; 200] [Time 0.517] [Data 0.932] [LR 0.14018]\n", + "[LossC 0.000000] [LossSim 4.738900] [LossShift 1.359678]\n", + "[2022-04-27 21:54:56.617839] [Epoch 1; 250] [Time 0.469] [Data 1.055] [LR 0.15022]\n", + "[LossC 0.000000] [LossSim 4.796278] [LossShift 1.271640]\n", + "[2022-04-27 21:55:19.371901] [Epoch 1; 300] [Time 0.469] [Data 0.898] [LR 0.16027]\n", + "[LossC 0.000000] [LossSim 4.608876] [LossShift 1.552633]\n", + "[2022-04-27 21:55:42.571197] [Epoch 1; 350] [Time 0.516] [Data 0.918] [LR 0.17031]\n", + "[LossC 0.000000] [LossSim 4.842148] [LossShift 1.336090]\n", + "[2022-04-27 21:56:05.642156] [Epoch 1; 400] [Time 0.523] [Data 0.867] [LR 0.18036]\n", + "[LossC 0.000000] [LossSim 4.832942] [LossShift 1.156906]\n", + "[2022-04-27 21:56:26.681201] [DONE] [Time 0.489] [Data 0.909] [LossC 0.000000] [LossSim 4.770748] [LossShift 1.591873]\n", + "Epoch 2 (logs/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_sharp_resize_factor0.08_color_dist0.5_one_class_1)\n", + "[2022-04-27 21:56:27.693232] [Epoch 2; 0] [Time 0.440] [Data 0.148] [LR 0.19000]\n", + "[LossC 0.000000] [LossSim 4.602440] [LossShift 1.091861]\n", + "[2022-04-27 21:56:50.382773] [Epoch 2; 50] [Time 0.515] [Data 0.877] [LR 0.20004]\n", + "[LossC 0.000000] [LossSim 4.600789] [LossShift 1.042183]\n", + "[2022-04-27 21:57:13.401066] [Epoch 2; 100] [Time 0.472] [Data 0.977] [LR 0.21009]\n", + "[LossC 0.000000] [LossSim 4.711175] [LossShift 1.322048]\n", + "[2022-04-27 21:57:36.339250] [Epoch 2; 150] [Time 0.608] [Data 0.852] [LR 0.22013]\n", + "[LossC 0.000000] [LossSim 4.559575] [LossShift 1.136288]\n", + "[2022-04-27 21:57:59.495503] [Epoch 2; 200] [Time 0.467] [Data 1.097] [LR 0.23018]\n", + "[LossC 0.000000] [LossSim 4.471087] [LossShift 1.055894]\n", + "[2022-04-27 21:58:22.207180] [Epoch 2; 250] [Time 0.498] [Data 0.879] [LR 0.24022]\n", + "[LossC 0.000000] [LossSim 4.526820] [LossShift 0.970052]\n", + "[2022-04-27 21:58:45.158632] [Epoch 2; 300] [Time 0.468] [Data 1.074] [LR 0.25027]\n", + "[LossC 0.000000] [LossSim 4.660821] [LossShift 1.274141]\n", + "[2022-04-27 21:59:08.291492] [Epoch 2; 350] [Time 0.482] [Data 0.860] [LR 0.26031]\n", + "[LossC 0.000000] [LossSim 4.487653] [LossShift 0.929607]\n", + "[2022-04-27 21:59:31.435978] [Epoch 2; 400] [Time 0.469] [Data 1.006] [LR 0.27036]\n", + "[LossC 0.000000] [LossSim 4.729589] [LossShift 1.065959]\n", + "[2022-04-27 21:59:52.467171] [DONE] [Time 0.494] [Data 0.915] [LossC 0.000000] [LossSim 4.540043] [LossShift 1.051491]\n", + "Epoch 3 (logs/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_sharp_resize_factor0.08_color_dist0.5_one_class_1)\n", + "[2022-04-27 21:59:53.543037] [Epoch 3; 0] [Time 0.515] [Data 0.131] [LR 0.28000]\n", + "[LossC 0.000000] [LossSim 4.606118] [LossShift 1.089750]\n", + "[2022-04-27 22:00:16.551717] [Epoch 3; 50] [Time 0.454] [Data 0.864] [LR 0.29004]\n", + "[LossC 0.000000] [LossSim 4.470480] [LossShift 1.156890]\n", + "[2022-04-27 22:00:39.247741] [Epoch 3; 100] [Time 0.463] [Data 0.960] [LR 0.30009]\n", + "[LossC 0.000000] [LossSim 4.465283] [LossShift 1.034453]\n", + "[2022-04-27 22:01:02.437289] [Epoch 3; 150] [Time 0.485] [Data 0.857] [LR 0.31013]\n", + "[LossC 0.000000] [LossSim 4.579294] [LossShift 1.223945]\n", + "[2022-04-27 22:01:25.646166] [Epoch 3; 200] [Time 0.458] [Data 0.864] [LR 0.32018]\n", + "[LossC 0.000000] [LossSim 4.475991] [LossShift 0.937372]\n", + "[2022-04-27 22:01:48.449946] [Epoch 3; 250] [Time 0.472] [Data 0.846] [LR 0.33022]\n", + "[LossC 0.000000] [LossSim 4.492799] [LossShift 1.123910]\n", + "[2022-04-27 22:02:11.088044] [Epoch 3; 300] [Time 0.584] [Data 0.884] [LR 0.34027]\n", + "[LossC 0.000000] [LossSim 4.520730] [LossShift 1.016755]\n", + "[2022-04-27 22:02:34.026722] [Epoch 3; 350] [Time 0.462] [Data 0.904] [LR 0.35031]\n", + "[LossC 0.000000] [LossSim 4.588828] [LossShift 1.008489]\n", + "[2022-04-27 22:02:57.093785] [Epoch 3; 400] [Time 0.468] [Data 1.008] [LR 0.36036]\n", + "[LossC 0.000000] [LossSim 4.431605] [LossShift 0.948913]\n", + "[2022-04-27 22:03:18.112107] [DONE] [Time 0.493] [Data 0.914] [LossC 0.000000] [LossSim 4.458634] [LossShift 1.007948]\n", + "Epoch 4 (logs/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_sharp_resize_factor0.08_color_dist0.5_one_class_1)\n", + "[2022-04-27 22:03:19.173064] [Epoch 4; 0] [Time 0.486] [Data 0.144] [LR 0.37000]\n", + "[LossC 0.000000] [LossSim 4.522823] [LossShift 0.872640]\n", + "[2022-04-27 22:03:41.681406] [Epoch 4; 50] [Time 0.515] [Data 0.965] [LR 0.38004]\n", + "[LossC 0.000000] [LossSim 4.627268] [LossShift 1.079998]\n", + "[2022-04-27 22:04:04.353249] [Epoch 4; 100] [Time 0.456] [Data 0.890] [LR 0.39009]\n", + "[LossC 0.000000] [LossSim 4.401687] [LossShift 1.002750]\n", + "[2022-04-27 22:04:27.711134] [Epoch 4; 150] [Time 0.474] [Data 0.937] [LR 0.40013]\n", + "[LossC 0.000000] [LossSim 4.423962] [LossShift 0.875453]\n", + "[2022-04-27 22:04:50.564132] [Epoch 4; 200] [Time 0.535] [Data 0.917] [LR 0.41018]\n", + "[LossC 0.000000] [LossSim 4.401275] [LossShift 0.953443]\n", + "[2022-04-27 22:05:13.697441] [Epoch 4; 250] [Time 0.459] [Data 0.858] [LR 0.42022]\n", + "[LossC 0.000000] [LossSim 4.430320] [LossShift 0.948798]\n", + "[2022-04-27 22:05:36.625607] [Epoch 4; 300] [Time 0.475] [Data 0.875] [LR 0.43027]\n", + "[LossC 0.000000] [LossSim 4.321131] [LossShift 0.913674]\n", + "[2022-04-27 22:05:59.610157] [Epoch 4; 350] [Time 0.462] [Data 0.924] [LR 0.44031]\n", + "[LossC 0.000000] [LossSim 4.468315] [LossShift 0.879398]\n", + "[2022-04-27 22:06:22.584148] [Epoch 4; 400] [Time 0.462] [Data 0.924] [LR 0.45036]\n", + "[LossC 0.000000] [LossSim 4.320601] [LossShift 0.835482]\n", + "[2022-04-27 22:06:43.326378] [DONE] [Time 0.492] [Data 0.912] [LossC 0.000000] [LossSim 4.410098] [LossShift 0.938872]\n", + "Epoch 5 (logs/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_sharp_resize_factor0.08_color_dist0.5_one_class_1)\n", + "[2022-04-27 22:06:44.342767] [Epoch 5; 0] [Time 0.473] [Data 0.149] [LR 0.46000]\n", + "[LossC 0.000000] [LossSim 4.376451] [LossShift 0.939952]\n", + "[2022-04-27 22:07:06.782078] [Epoch 5; 50] [Time 0.449] [Data 0.856] [LR 0.47004]\n", + "[LossC 0.000000] [LossSim 4.396927] [LossShift 0.920150]\n", + "[2022-04-27 22:07:29.728200] [Epoch 5; 100] [Time 0.463] [Data 0.908] [LR 0.48009]\n", + "[LossC 0.000000] [LossSim 4.447166] [LossShift 0.918573]\n", + "[2022-04-27 22:07:52.322851] [Epoch 5; 150] [Time 0.473] [Data 1.023] [LR 0.49013]\n", + "[LossC 0.000000] [LossSim 4.367201] [LossShift 0.944386]\n", + "[2022-04-27 22:08:15.084181] [Epoch 5; 200] [Time 0.466] [Data 0.909] [LR 0.50018]\n", + "[LossC 0.000000] [LossSim 4.325580] [LossShift 0.883697]\n", + "[2022-04-27 22:08:37.787865] [Epoch 5; 250] [Time 0.521] [Data 0.937] [LR 0.51022]\n", + "[LossC 0.000000] [LossSim 4.426981] [LossShift 0.855859]\n", + "[2022-04-27 22:09:00.704213] [Epoch 5; 300] [Time 0.467] [Data 0.885] [LR 0.52027]\n", + "[LossC 0.000000] [LossSim 4.355620] [LossShift 0.837514]\n", + "[2022-04-27 22:09:23.448209] [Epoch 5; 350] [Time 0.482] [Data 0.899] [LR 0.53031]\n", + "[LossC 0.000000] [LossSim 4.432379] [LossShift 0.906252]\n", + "[2022-04-27 22:09:46.070029] [Epoch 5; 400] [Time 0.542] [Data 0.907] [LR 0.54036]\n", + "[LossC 0.000000] [LossSim 4.362264] [LossShift 0.886713]\n", + "[2022-04-27 22:10:06.772650] [DONE] [Time 0.486] [Data 0.904] [LossC 0.000000] [LossSim 4.392308] [LossShift 0.915971]\n", + "Epoch 6 (logs/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_sharp_resize_factor0.08_color_dist0.5_one_class_1)\n", + "[2022-04-27 22:10:07.752875] [Epoch 6; 0] [Time 0.446] [Data 0.148] [LR 0.55000]\n", + "[LossC 0.000000] [LossSim 4.358101] [LossShift 0.934794]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[2022-04-27 22:10:30.582189] [Epoch 6; 50] [Time 0.484] [Data 0.911] [LR 0.56004]\n", + "[LossC 0.000000] [LossSim 4.426515] [LossShift 0.982254]\n", + "[2022-04-27 22:10:53.219031] [Epoch 6; 100] [Time 0.596] [Data 0.861] [LR 0.57009]\n", + "[LossC 0.000000] [LossSim 4.355786] [LossShift 0.859021]\n", + "[2022-04-27 22:11:16.124596] [Epoch 6; 150] [Time 0.591] [Data 0.880] [LR 0.58013]\n", + "[LossC 0.000000] [LossSim 4.331424] [LossShift 0.872154]\n", + "[2022-04-27 22:11:38.965621] [Epoch 6; 200] [Time 0.449] [Data 0.886] [LR 0.59018]\n", + "[LossC 0.000000] [LossSim 4.351139] [LossShift 0.876345]\n", + "[2022-04-27 22:12:01.754661] [Epoch 6; 250] [Time 0.461] [Data 0.920] [LR 0.60022]\n", + "[LossC 0.000000] [LossSim 4.491778] [LossShift 1.031505]\n", + "[2022-04-27 22:12:24.410563] [Epoch 6; 300] [Time 0.467] [Data 0.890] [LR 0.61027]\n", + "[LossC 0.000000] [LossSim 4.340865] [LossShift 0.851271]\n", + "[2022-04-27 22:12:47.216964] [Epoch 6; 350] [Time 0.467] [Data 0.897] [LR 0.62031]\n", + "[LossC 0.000000] [LossSim 4.372048] [LossShift 0.921748]\n", + "[2022-04-27 22:13:09.822383] [Epoch 6; 400] [Time 0.469] [Data 0.935] [LR 0.63036]\n", + "[LossC 0.000000] [LossSim 4.349135] [LossShift 0.854723]\n", + "[2022-04-27 22:13:30.781444] [DONE] [Time 0.487] [Data 0.907] [LossC 0.000000] [LossSim 4.368142] [LossShift 0.896633]\n", + "Epoch 7 (logs/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_sharp_resize_factor0.08_color_dist0.5_one_class_1)\n", + "[2022-04-27 22:13:31.766230] [Epoch 7; 0] [Time 0.455] [Data 0.133] [LR 0.64000]\n", + "[LossC 0.000000] [LossSim 4.423601] [LossShift 0.868863]\n", + "[2022-04-27 22:13:54.496806] [Epoch 7; 50] [Time 0.463] [Data 0.904] [LR 0.65004]\n", + "[LossC 0.000000] [LossSim 4.383883] [LossShift 0.905446]\n", + "[2022-04-27 22:14:17.511831] [Epoch 7; 100] [Time 0.470] [Data 1.031] [LR 0.66009]\n", + "[LossC 0.000000] [LossSim 4.296111] [LossShift 0.895986]\n", + "[2022-04-27 22:14:40.280189] [Epoch 7; 150] [Time 0.477] [Data 0.871] [LR 0.67013]\n", + "[LossC 0.000000] [LossSim 4.305459] [LossShift 0.909102]\n", + "[2022-04-27 22:15:03.937648] [Epoch 7; 200] [Time 0.513] [Data 1.929] [LR 0.68018]\n", + "[LossC 0.000000] [LossSim 4.345171] [LossShift 0.866567]\n", + "[2022-04-27 22:15:26.668402] [Epoch 7; 250] [Time 0.594] [Data 0.859] [LR 0.69022]\n", + "[LossC 0.000000] [LossSim 4.381218] [LossShift 0.895947]\n", + "[2022-04-27 22:15:49.487447] [Epoch 7; 300] [Time 0.473] [Data 0.861] [LR 0.70027]\n", + "[LossC 0.000000] [LossSim 4.351787] [LossShift 0.836976]\n", + "[2022-04-27 22:16:12.051757] [Epoch 7; 350] [Time 0.466] [Data 1.045] [LR 0.71031]\n", + "[LossC 0.000000] [LossSim 4.400456] [LossShift 0.845599]\n", + "[2022-04-27 22:16:34.818097] [Epoch 7; 400] [Time 0.468] [Data 0.849] [LR 0.72036]\n", + "[LossC 0.000000] [LossSim 4.433661] [LossShift 1.035500]\n", + "[2022-04-27 22:16:56.032426] [DONE] [Time 0.491] [Data 0.912] [LossC 0.000000] [LossSim 4.370436] [LossShift 0.907309]\n", + "Epoch 8 (logs/CNMC_resnet18_imagenet_unsup_simclr_CSI_450px_shift_blur_sharp_resize_factor0.08_color_dist0.5_one_class_1)\n", + "[2022-04-27 22:16:57.048328] [Epoch 8; 0] [Time 0.470] [Data 0.160] [LR 0.73000]\n", + "[LossC 0.000000] [LossSim 4.345762] [LossShift 0.854992]\n" + ] + } + ], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur_sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 128\n", + "# blur_sigma : 40\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --blur_sigma 40 --sharpness_factor 128 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur_sharp --epochs 10 --batch_size 8 --resize_factor 0.08 --optimizer sgd --one_class_idx 1 --res 450px" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cb3bca71", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : randpers_sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 128\n", + "# randpers : 0.8\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --sharpness_factor 128 --distortion_scale 0.8 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type randpers_sharp --epochs 10 --batch_size 8 --resize_factor 0.08 --optimizer sgd --one_class_idx 1 --res 450px" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "baf0eff6", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur_randpers_sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 128\n", + "# blur_sigma : 40\n", + "# randpers : 0.8\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --blur_sigma 40 --sharpness_factor 128 --distortion_scale 0.8 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur_randpers_sharp --epochs 10 --batch_size 8 --resize_factor 0.08 --optimizer sgd --one_class_idx 1 --res 450px" + ] + }, + { + "cell_type": "markdown", + "id": "30642f7c", + "metadata": {}, + "source": [ + "# Rotation" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d3be9f07", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : rotation\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type rotation --epochs 10 --batch_size 8 --resize_factor 0.08 --optimizer sgd --one_class_idx 1 --res 450px" + ] + }, + { + "cell_type": "markdown", + "id": "d5b3adfc", + "metadata": {}, + "source": [ + "# Cutperm" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f2a006f7", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : rotation\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type cutperm --epochs 10 --batch_size 8 --resize_factor 0.08 --optimizer sgd --one_class_idx 1 --res 450px" + ] + }, + { + "cell_type": "markdown", + "id": "dff09fe7", + "metadata": {}, + "source": [ + "# Adjust Sharpness" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "695ed30c", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 4096\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --sharpness_factor 4096 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type sharp --epochs 10 --batch_size 8 --optimizer sgd --one_class_idx 1" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3537b825", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 2048\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --sharpness_factor 2048 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type sharp --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a6495274", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 1024\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --sharpness_factor 1024 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type sharp --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3f9a0fe8", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 512\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --sharpness_factor 512 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type sharp --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "44688e2b", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 256\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --sharpness_factor 256 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type sharp --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e97c21fe", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 150\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --sharpness_factor 150 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type sharp --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9ecf758b", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 140\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --sharpness_factor 140 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type sharp --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0d9767a5", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 130\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --sharpness_factor 130 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type sharp --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bd662097", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 128\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --sharpness_factor 128 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type sharp --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a7c01b6f", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 120\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --sharpness_factor 120 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type sharp --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0d129e42", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 100\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --sharpness_factor 100 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type sharp --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d70d2983", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 80\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --sharpness_factor 80 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type sharp --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6b32d416", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 64\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --sharpness_factor 64 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type sharp --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cf996327", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 32\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --sharpness_factor 32 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type sharp --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4d841ffb", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 16\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --sharpness_factor 16 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type sharp --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fd929ab1", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 8\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --sharpness_factor 8 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type sharp --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e1d33ea1", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 5\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --sharpness_factor 5 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type sharp --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0c1fd73c", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 4\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --sharpness_factor 4 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type sharp --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9395e2f2", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 3\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --sharpness_factor 3 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type sharp --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "959cc49f", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : sharp\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# sharp : 2\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --sharpness_factor 2 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type sharp --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "markdown", + "id": "76fd693e", + "metadata": {}, + "source": [ + "# Random Perspective" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c6dfe547", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : randpers\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# randper_dist: 0.95\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --distortion_scale 0.95 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type randpers --epochs 10 --batch_size 8 --optimizer sgd --one_class_idx 0 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ccc4b932", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : randpers\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# randper_dist: 0.9\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --distortion_scale 0.9 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type randpers --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4148f1e6", + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : randpers\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# randper_dist: 0.85\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --distortion_scale 0.85 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type randpers --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "022d5ce0", + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : randpers\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# randper_dist: 0.8\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --distortion_scale 0.8 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type randpers --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2bec00e6", + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : randpers\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# randper_dist: 0.75\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --distortion_scale 0.75 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type randpers --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1875267e", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : randpers\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# randper_dist: 0.6\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --distortion_scale 0.6 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type randpers --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a02ed7ec", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : randpers\n", + "# crop : 0.08\n", + "# color_dist : 0.5\n", + "# randper_dist: 0.3\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --distortion_scale 0.3 --resize_factor 0.08 --res 450px --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type randpers --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "markdown", + "id": "d599ef3f", + "metadata": {}, + "source": [ + "## Examine crop" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7195ad51", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.5\n", + "# blur_sigma : 2\n", + "# color_dist : 0.8\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --resize_factor 0.5 --res 450px --blur_sigma 2 --color_distort 0.8 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur --epochs 10 --batch_size 8 --optimizer sgd --one_class_idx 0 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7401d0e7", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.3\n", + "# blur_sigma : 2\n", + "# color_dist : 0.8\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --resize_factor 0.3 --res 450px --blur_sigma 2 --color_distort 0.8 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 0 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b88a2670", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.02\n", + "# blur_sigma : 2\n", + "# color_dist : 0.8\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --resize_factor 0.02 --res 450px --blur_sigma 2 --color_distort 0.8 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 0 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "83922b52", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : all\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.008\n", + "# blur_sigma : 2\n", + "# color_dist : 0.8\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --resize_factor 0.008 --res 450px --blur_sigma 2 --color_distort 0.8 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 0 " + ] + }, + { + "cell_type": "markdown", + "id": "006079f3", + "metadata": {}, + "source": [ + "## Examine blur_sigma" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4b65d654", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 180\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --resize_factor 0.08 --res 450px --blur_sigma 180 --color_distort 0.5 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur --epochs 10 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8aa50f84", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 120\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --resize_factor 0.08 --res 450px --blur_sigma 120 --color_distort 0.5 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f94522c3", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 110\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --resize_factor 0.08 --res 450px --blur_sigma 110 --color_distort 0.5 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8bd4c63a", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 105\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --resize_factor 0.08 --res 450px --blur_sigma 105 --color_distort 0.5 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cade09f1", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 100\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --resize_factor 0.08 --res 450px --blur_sigma 100 --color_distort 0.5 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0f1af3f1", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 95\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --resize_factor 0.08 --res 450px --blur_sigma 95 --color_distort 0.5 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e5b5e043", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 90\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --resize_factor 0.08 --res 450px --blur_sigma 90 --color_distort 0.5 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f4c30628", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 80\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --resize_factor 0.08 --res 450px --gauss_sigma 80 --color_distort 0.5 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13a022fc", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 60\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --resize_factor 0.08 --res 450px --blur_sigma 60 --color_distort 0.5 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "02779f69", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 40\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --resize_factor 0.08 --res 450px --blur_sigma 40 --color_distort 0.5 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b63a705a", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 20\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --resize_factor 0.08 --res 450px --blur_sigma 20 --color_distort 0.5 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "dde3e377", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 6\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --resize_factor 0.08 --res 450px --blur_sigma 6 --color_distort 0.5 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c23c0e0a", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 4\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --resize_factor 0.08 --res 450px --blur_sigma 4 --color_distort 0.5 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "35fbd79f", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 3\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --resize_factor 0.08 --res 450px --blur_sigma 3 --color_distort 0.5 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "42510921", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 2\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --resize_factor 0.08 --res 450px --blur_sigma 2 --color_distort 0.5 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7672da24", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 1.5\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --resize_factor 0.08 --res 450px --blur_sigma 1.5 --color_distort 0.5 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1e94687e", + "metadata": {}, + "outputs": [], + "source": [ + "# TRAINING\n", + "# dataset : CNMC\n", + "# res : 450px\n", + "# id_class : hem\n", + "# epoch : 100\n", + "# shift_tr : blur\n", + "# crop : 0.08\n", + "# blur_sigma : 1\n", + "# color_dist : 0.5\n", + "!CUDA_VISIBLE_DEVICES=0,1 python3 -m torch.distributed.launch --nproc_per_node=2 \"train.py\" --resize_factor 0.08 --res 450px --blur_sigma 1 --color_distort 0.5 --dataset 'CNMC' --model 'resnet18_imagenet' --mode simclr_CSI --shift_trans_type blur --epochs 100 --batch_size 8 --optimizer sgd --one_class_idx 1 " + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.9" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/train.py b/train.py new file mode 100644 index 0000000..3b88fb9 --- /dev/null +++ b/train.py @@ -0,0 +1,57 @@ +from utils.utils import Logger +from utils.utils import save_checkpoint +from utils.utils import save_linear_checkpoint + +from common.train import * +from evals import test_classifier + +if 'sup' in P.mode: + from training.sup import setup +else: + from training.unsup import setup +train, fname = setup(P.mode, P) + +logger = Logger(fname, ask=not resume, local_rank=P.local_rank) +logger.log(P) +logger.log(model) + +if P.multi_gpu: + linear = model.module.linear +else: + linear = model.linear +linear_optim = torch.optim.Adam(linear.parameters(), lr=1e-3, betas=(.9, .999), weight_decay=P.weight_decay) + +# Run experiments +for epoch in range(start_epoch, P.epochs + 1): + logger.log_dirname(f"Epoch {epoch}") + model.train() + + if P.multi_gpu: + train_sampler.set_epoch(epoch) + + kwargs = {} + kwargs['linear'] = linear + kwargs['linear_optim'] = linear_optim + kwargs['simclr_aug'] = simclr_aug + + train(P, epoch, model, criterion, optimizer, scheduler_warmup, train_loader, logger=logger, **kwargs) + + model.eval() + + if epoch % P.save_step == 0 and P.local_rank == 0: + if P.multi_gpu: + save_states = model.module.state_dict() + else: + save_states = model.state_dict() + save_checkpoint(epoch, save_states, optimizer.state_dict(), logger.logdir) + save_linear_checkpoint(linear_optim.state_dict(), logger.logdir) + + if epoch % P.error_step == 0 and ('sup' in P.mode): + error = test_classifier(P, model, test_loader, epoch, logger=logger) + + is_best = (best > error) + if is_best: + best = error + + logger.scalar_summary('eval/best_error', best, epoch) + logger.log('[Epoch %3d] [Test %5.2f] [Best %5.2f]' % (epoch, error, best)) diff --git a/training/__init__.py b/training/__init__.py new file mode 100644 index 0000000..409e974 --- /dev/null +++ b/training/__init__.py @@ -0,0 +1,97 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + + +def update_learning_rate(P, optimizer, cur_epoch, n, n_total): + + cur_epoch = cur_epoch - 1 + + lr = P.lr_init + if P.optimizer == 'sgd' or 'lars': + DECAY_RATIO = 0.1 + elif P.optimizer == 'adam': + DECAY_RATIO = 0.3 + else: + raise NotImplementedError() + + if P.warmup > 0: + cur_iter = cur_epoch * n_total + n + if cur_iter <= P.warmup: + lr *= cur_iter / float(P.warmup) + + if cur_epoch >= 0.5 * P.epochs: + lr *= DECAY_RATIO + if cur_epoch >= 0.75 * P.epochs: + lr *= DECAY_RATIO + for param_group in optimizer.param_groups: + param_group['lr'] = lr + return lr + + +def _cross_entropy(input, targets, reduction='mean'): + targets_prob = F.softmax(targets, dim=1) + xent = (-targets_prob * F.log_softmax(input, dim=1)).sum(1) + if reduction == 'sum': + return xent.sum() + elif reduction == 'mean': + return xent.mean() + elif reduction == 'none': + return xent + else: + raise NotImplementedError() + + +def _entropy(input, reduction='mean'): + return _cross_entropy(input, input, reduction) + + +def cross_entropy_soft(input, targets, reduction='mean'): + targets_prob = F.softmax(targets, dim=1) + xent = (-targets_prob * F.log_softmax(input, dim=1)).sum(1) + if reduction == 'sum': + return xent.sum() + elif reduction == 'mean': + return xent.mean() + elif reduction == 'none': + return xent + else: + raise NotImplementedError() + + +def kl_div(input, targets, reduction='batchmean'): + return F.kl_div(F.log_softmax(input, dim=1), F.softmax(targets, dim=1), + reduction=reduction) + + +def target_nll_loss(inputs, targets, reduction='none'): + inputs_t = -F.nll_loss(inputs, targets, reduction='none') + logit_diff = inputs - inputs_t.view(-1, 1) + logit_diff = logit_diff.scatter(1, targets.view(-1, 1), -1e8) + diff_max = logit_diff.max(1)[0] + + if reduction == 'sum': + return diff_max.sum() + elif reduction == 'mean': + return diff_max.mean() + elif reduction == 'none': + return diff_max + else: + raise NotImplementedError() + + +def target_nll_c(inputs, targets, reduction='none'): + conf = torch.softmax(inputs, dim=1) + conf_t = -F.nll_loss(conf, targets, reduction='none') + conf_diff = conf - conf_t.view(-1, 1) + conf_diff = conf_diff.scatter(1, targets.view(-1, 1), -1) + diff_max = conf_diff.max(1)[0] + + if reduction == 'sum': + return diff_max.sum() + elif reduction == 'mean': + return diff_max.mean() + elif reduction == 'none': + return diff_max + else: + raise NotImplementedError() \ No newline at end of file diff --git a/training/__pycache__/__init__.cpython-36.pyc b/training/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..19b9651 Binary files /dev/null and b/training/__pycache__/__init__.cpython-36.pyc differ diff --git a/training/__pycache__/__init__.cpython-37.pyc b/training/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000..bc3f546 Binary files /dev/null and b/training/__pycache__/__init__.cpython-37.pyc differ diff --git a/training/__pycache__/contrastive_loss.cpython-36.pyc b/training/__pycache__/contrastive_loss.cpython-36.pyc new file mode 100644 index 0000000..1fc09c9 Binary files /dev/null and b/training/__pycache__/contrastive_loss.cpython-36.pyc differ diff --git a/training/__pycache__/contrastive_loss.cpython-37.pyc b/training/__pycache__/contrastive_loss.cpython-37.pyc new file mode 100644 index 0000000..743dad4 Binary files /dev/null and b/training/__pycache__/contrastive_loss.cpython-37.pyc differ diff --git a/training/__pycache__/scheduler.cpython-36.pyc b/training/__pycache__/scheduler.cpython-36.pyc new file mode 100644 index 0000000..5b8ea95 Binary files /dev/null and b/training/__pycache__/scheduler.cpython-36.pyc differ diff --git a/training/__pycache__/scheduler.cpython-37.pyc b/training/__pycache__/scheduler.cpython-37.pyc new file mode 100644 index 0000000..127b1ea Binary files /dev/null and b/training/__pycache__/scheduler.cpython-37.pyc differ diff --git a/training/contrastive_loss.py b/training/contrastive_loss.py new file mode 100644 index 0000000..8697cd5 --- /dev/null +++ b/training/contrastive_loss.py @@ -0,0 +1,79 @@ +import torch +import torch.distributed as dist +import diffdist.functional as distops + + +def get_similarity_matrix(outputs, chunk=2, multi_gpu=False): + ''' + Compute similarity matrix + - outputs: (B', d) tensor for B' = B * chunk + - sim_matrix: (B', B') tensor + ''' + + if multi_gpu: + outputs_gathered = [] + for out in outputs.chunk(chunk): + gather_t = [torch.empty_like(out) for _ in range(dist.get_world_size())] + gather_t = torch.cat(distops.all_gather(gather_t, out)) + outputs_gathered.append(gather_t) + outputs = torch.cat(outputs_gathered) + + sim_matrix = torch.mm(outputs, outputs.t()) # (B', d), (d, B') -> (B', B') + + return sim_matrix + + +def NT_xent(sim_matrix, temperature=0.5, chunk=2, eps=1e-8): + ''' + Compute NT_xent loss + - sim_matrix: (B', B') tensor for B' = B * chunk (first 2B are pos samples) + ''' + + device = sim_matrix.device + + B = sim_matrix.size(0) // chunk # B = B' / chunk + + eye = torch.eye(B * chunk).to(device) # (B', B') + sim_matrix = torch.exp(sim_matrix / temperature) * (1 - eye) # remove diagonal + + denom = torch.sum(sim_matrix, dim=1, keepdim=True) + sim_matrix = -torch.log(sim_matrix / (denom + eps) + eps) # loss matrix + + loss = torch.sum(sim_matrix[:B, B:].diag() + sim_matrix[B:, :B].diag()) / (2 * B) + + return loss + + +def Supervised_NT_xent(sim_matrix, labels, temperature=0.5, chunk=2, eps=1e-8, multi_gpu=False): + ''' + Compute NT_xent loss + - sim_matrix: (B', B') tensor for B' = B * chunk (first 2B are pos samples) + ''' + + device = sim_matrix.device + + if multi_gpu: + gather_t = [torch.empty_like(labels) for _ in range(dist.get_world_size())] + labels = torch.cat(distops.all_gather(gather_t, labels)) + labels = labels.repeat(2) + + logits_max, _ = torch.max(sim_matrix, dim=1, keepdim=True) + sim_matrix = sim_matrix - logits_max.detach() + + B = sim_matrix.size(0) // chunk # B = B' / chunk + + eye = torch.eye(B * chunk).to(device) # (B', B') + sim_matrix = torch.exp(sim_matrix / temperature) * (1 - eye) # remove diagonal + + denom = torch.sum(sim_matrix, dim=1, keepdim=True) + sim_matrix = -torch.log(sim_matrix / (denom + eps) + eps) # loss matrix + + labels = labels.contiguous().view(-1, 1) + Mask = torch.eq(labels, labels.t()).float().to(device) + #Mask = eye * torch.stack([labels == labels[i] for i in range(labels.size(0))]).float().to(device) + Mask = Mask / (Mask.sum(dim=1, keepdim=True) + eps) + + loss = torch.sum(Mask * sim_matrix) / (2 * B) + + return loss + diff --git a/training/scheduler.py b/training/scheduler.py new file mode 100644 index 0000000..0df7bb9 --- /dev/null +++ b/training/scheduler.py @@ -0,0 +1,63 @@ +from torch.optim.lr_scheduler import _LRScheduler +from torch.optim.lr_scheduler import ReduceLROnPlateau + + +class GradualWarmupScheduler(_LRScheduler): + """ Gradually warm-up(increasing) learning rate in optimizer. + Proposed in 'Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour'. + + Args: + optimizer (Optimizer): Wrapped optimizer. + multiplier: target learning rate = base lr * multiplier if multiplier > 1.0. if multiplier = 1.0, lr starts from 0 and ends up with the base_lr. + total_epoch: target learning rate is reached at total_epoch, gradually + after_scheduler: after target_epoch, use this scheduler(eg. ReduceLROnPlateau) + """ + + def __init__(self, optimizer, multiplier, total_epoch, after_scheduler=None): + self.multiplier = multiplier + if self.multiplier < 1.: + raise ValueError('multiplier should be greater thant or equal to 1.') + self.total_epoch = total_epoch + self.after_scheduler = after_scheduler + self.finished = False + super(GradualWarmupScheduler, self).__init__(optimizer) + + def get_lr(self): + if self.last_epoch > self.total_epoch: + if self.after_scheduler: + if not self.finished: + self.after_scheduler.base_lrs = [base_lr * self.multiplier for base_lr in self.base_lrs] + self.finished = True + return self.after_scheduler.get_lr() + return [base_lr * self.multiplier for base_lr in self.base_lrs] + + if self.multiplier == 1.0: + return [base_lr * (float(self.last_epoch) / self.total_epoch) for base_lr in self.base_lrs] + else: + return [base_lr * ((self.multiplier - 1.) * self.last_epoch / self.total_epoch + 1.) for base_lr in self.base_lrs] + + def step_ReduceLROnPlateau(self, metrics, epoch=None): + if epoch is None: + epoch = self.last_epoch + 1 + self.last_epoch = epoch if epoch != 0 else 1 # ReduceLROnPlateau is called at the end of epoch, whereas others are called at beginning + if self.last_epoch <= self.total_epoch: + warmup_lr = [base_lr * ((self.multiplier - 1.) * self.last_epoch / self.total_epoch + 1.) for base_lr in self.base_lrs] + for param_group, lr in zip(self.optimizer.param_groups, warmup_lr): + param_group['lr'] = lr + else: + if epoch is None: + self.after_scheduler.step(metrics, None) + else: + self.after_scheduler.step(metrics, epoch - self.total_epoch) + + def step(self, epoch=None, metrics=None): + if type(self.after_scheduler) != ReduceLROnPlateau: + if self.finished and self.after_scheduler: + if epoch is None: + self.after_scheduler.step(None) + else: + self.after_scheduler.step(epoch - self.total_epoch) + else: + return super(GradualWarmupScheduler, self).step(epoch) + else: + self.step_ReduceLROnPlateau(metrics, epoch) diff --git a/training/sup/__init__.py b/training/sup/__init__.py new file mode 100644 index 0000000..5d7e24c --- /dev/null +++ b/training/sup/__init__.py @@ -0,0 +1,33 @@ +def setup(mode, P): + fname = f'{P.dataset}_{P.model}_{mode}_{P.res}' + + if mode == 'sup_linear': + from .sup_linear import train + elif mode == 'sup_CSI_linear': + from .sup_CSI_linear import train + elif mode == 'sup_simclr': + from .sup_simclr import train + elif mode == 'sup_simclr_CSI': + assert P.batch_size == 32 + # currently only support rotation + from .sup_simclr_CSI import train + else: + raise NotImplementedError() + + if P.suffix is not None: + fname += f'_{P.suffix}' + + return train, fname + + +def update_comp_loss(loss_dict, loss_in, loss_out, loss_diff, batch_size): + loss_dict['pos'].update(loss_in, batch_size) + loss_dict['neg'].update(loss_out, batch_size) + loss_dict['diff'].update(loss_diff, batch_size) + + +def summary_comp_loss(logger, tag, loss_dict, epoch): + logger.scalar_summary(f'{tag}/pos', loss_dict['pos'].average, epoch) + logger.scalar_summary(f'{tag}/neg', loss_dict['neg'].average, epoch) + logger.scalar_summary(f'{tag}', loss_dict['diff'].average, epoch) + diff --git a/training/sup/__pycache__/__init__.cpython-36.pyc b/training/sup/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..c260357 Binary files /dev/null and b/training/sup/__pycache__/__init__.cpython-36.pyc differ diff --git a/training/sup/__pycache__/sup_simclr.cpython-36.pyc b/training/sup/__pycache__/sup_simclr.cpython-36.pyc new file mode 100644 index 0000000..e27bfc9 Binary files /dev/null and b/training/sup/__pycache__/sup_simclr.cpython-36.pyc differ diff --git a/training/sup/__pycache__/sup_simclr_CSI.cpython-36.pyc b/training/sup/__pycache__/sup_simclr_CSI.cpython-36.pyc new file mode 100644 index 0000000..16ad464 Binary files /dev/null and b/training/sup/__pycache__/sup_simclr_CSI.cpython-36.pyc differ diff --git a/training/sup/sup_CSI_linear.py b/training/sup/sup_CSI_linear.py new file mode 100644 index 0000000..ef2339e --- /dev/null +++ b/training/sup/sup_CSI_linear.py @@ -0,0 +1,130 @@ +import time + +import torch.optim +import torch.optim.lr_scheduler as lr_scheduler + +import models.transform_layers as TL +from utils.utils import AverageMeter, normalize + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") +hflip = TL.HorizontalFlipLayer().to(device) + + +def train(P, epoch, model, criterion, optimizer, scheduler, loader, logger=None, + simclr_aug=None, linear=None, linear_optim=None): + + if P.multi_gpu: + rotation_linear = model.module.shift_cls_layer + joint_linear = model.module.joint_distribution_layer + else: + rotation_linear = model.shift_cls_layer + joint_linear = model.joint_distribution_layer + + if epoch == 1: + # define optimizer and save in P (argument) + milestones = [int(0.6 * P.epochs), int(0.75 * P.epochs), int(0.9 * P.epochs)] + + linear_optim = torch.optim.SGD(linear.parameters(), + lr=1e-1, weight_decay=P.weight_decay) + P.linear_optim = linear_optim + P.linear_scheduler = lr_scheduler.MultiStepLR(P.linear_optim, gamma=0.1, milestones=milestones) + + rotation_linear_optim = torch.optim.SGD(rotation_linear.parameters(), + lr=1e-1, weight_decay=P.weight_decay) + P.rotation_linear_optim = rotation_linear_optim + P.rot_scheduler = lr_scheduler.MultiStepLR(P.rotation_linear_optim, gamma=0.1, milestones=milestones) + + joint_linear_optim = torch.optim.SGD(joint_linear.parameters(), + lr=1e-1, weight_decay=P.weight_decay) + P.joint_linear_optim = joint_linear_optim + P.joint_scheduler = lr_scheduler.MultiStepLR(P.joint_linear_optim, gamma=0.1, milestones=milestones) + + if logger is None: + log_ = print + else: + log_ = logger.log + + batch_time = AverageMeter() + data_time = AverageMeter() + + losses = dict() + losses['cls'] = AverageMeter() + losses['rot'] = AverageMeter() + + check = time.time() + for n, (images, labels) in enumerate(loader): + model.eval() + count = n * P.n_gpus # number of trained samples + + data_time.update(time.time() - check) + check = time.time() + + ### SimCLR loss ### + if P.dataset != 'imagenet': + batch_size = images.size(0) + images = images.to(device) + images = hflip(images) # 2B with hflip + else: + batch_size = images[0].size(0) + images = images[0].to(device) + + labels = labels.to(device) + images = torch.cat([torch.rot90(images, rot, (2, 3)) for rot in range(4)]) # 4B + rot_labels = torch.cat([torch.ones_like(labels) * k for k in range(4)], 0) # B -> 4B + joint_labels = torch.cat([labels + P.n_classes * i for i in range(4)], dim=0) + + images = simclr_aug(images) # simclr augmentation + _, outputs_aux = model(images, penultimate=True) + penultimate = outputs_aux['penultimate'].detach() + + outputs = linear(penultimate[0:batch_size]) # only use 0 degree samples for linear eval + outputs_rot = rotation_linear(penultimate) + outputs_joint = joint_linear(penultimate) + + loss_ce = criterion(outputs, labels) + loss_rot = criterion(outputs_rot, rot_labels) + loss_joint = criterion(outputs_joint, joint_labels) + + ### CE loss ### + P.linear_optim.zero_grad() + loss_ce.backward() + P.linear_optim.step() + + ### Rot loss ### + P.rotation_linear_optim.zero_grad() + loss_rot.backward() + P.rotation_linear_optim.step() + + ### Joint loss ### + P.joint_linear_optim.zero_grad() + loss_joint.backward() + P.joint_linear_optim.step() + + ### optimizer learning rate ### + lr = P.linear_optim.param_groups[0]['lr'] + + batch_time.update(time.time() - check) + + ### Log losses ### + losses['cls'].update(loss_ce.item(), batch_size) + losses['rot'].update(loss_rot.item(), batch_size) + + if count % 50 == 0: + log_('[Epoch %3d; %3d] [Time %.3f] [Data %.3f] [LR %.5f]\n' + '[LossC %f] [LossR %f]' % + (epoch, count, batch_time.value, data_time.value, lr, + losses['cls'].value, losses['rot'].value)) + check = time.time() + + P.linear_scheduler.step() + P.rot_scheduler.step() + P.joint_scheduler.step() + + log_('[DONE] [Time %.3f] [Data %.3f] [LossC %f] [LossR %f]' % + (batch_time.average, data_time.average, + losses['cls'].average, losses['rot'].average)) + + if logger is not None: + logger.scalar_summary('train/loss_cls', losses['cls'].average, epoch) + logger.scalar_summary('train/loss_rot', losses['rot'].average, epoch) + logger.scalar_summary('train/batch_time', batch_time.average, epoch) diff --git a/training/sup/sup_linear.py b/training/sup/sup_linear.py new file mode 100644 index 0000000..7247443 --- /dev/null +++ b/training/sup/sup_linear.py @@ -0,0 +1,91 @@ +import time + +import torch.optim +import torch.optim.lr_scheduler as lr_scheduler + +import models.transform_layers as TL +from utils.utils import AverageMeter, normalize + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") +hflip = TL.HorizontalFlipLayer().to(device) + + +def train(P, epoch, model, criterion, optimizer, scheduler, loader, logger=None, + simclr_aug=None, linear=None, linear_optim=None): + + if epoch == 1: + # define optimizer and save in P (argument) + milestones = [int(0.6 * P.epochs), int(0.75 * P.epochs), int(0.9 * P.epochs)] + + linear_optim = torch.optim.SGD(linear.parameters(), + lr=1e-1, weight_decay=P.weight_decay) + P.linear_optim = linear_optim + P.linear_scheduler = lr_scheduler.MultiStepLR(P.linear_optim, gamma=0.1, milestones=milestones) + + if logger is None: + log_ = print + else: + log_ = logger.log + + batch_time = AverageMeter() + data_time = AverageMeter() + + losses = dict() + losses['cls'] = AverageMeter() + + check = time.time() + for n, (images, labels) in enumerate(loader): + model.eval() + count = n * P.n_gpus # number of trained samples + + data_time.update(time.time() - check) + check = time.time() + + ### SimCLR loss ### + if P.dataset != 'imagenet': + batch_size = images.size(0) + images = images.to(device) + images = hflip(images) # 2B with hflip + else: + batch_size = images[0].size(0) + images = images[0].to(device) + + labels = labels.to(device) + + images = simclr_aug(images) # simclr augmentation + _, outputs_aux = model(images, penultimate=True) + penultimate = outputs_aux['penultimate'].detach() + + outputs = linear(penultimate[0:batch_size]) # only use 0 degree samples for linear eval + + loss_ce = criterion(outputs, labels) + + ### CE loss ### + P.linear_optim.zero_grad() + loss_ce.backward() + P.linear_optim.step() + + ### optimizer learning rate ### + lr = P.linear_optim.param_groups[0]['lr'] + + batch_time.update(time.time() - check) + + ### Log losses ### + losses['cls'].update(loss_ce.item(), batch_size) + + if count % 50 == 0: + log_('[Epoch %3d; %3d] [Time %.3f] [Data %.3f] [LR %.5f]\n' + '[LossC %f]' % + (epoch, count, batch_time.value, data_time.value, lr, + losses['cls'].value, )) + check = time.time() + + P.linear_scheduler.step() + + log_('[DONE] [Time %.3f] [Data %.3f] [LossC %f]' % + (batch_time.average, data_time.average, + losses['cls'].average)) + + if logger is not None: + logger.scalar_summary('train/loss_cls', losses['cls'].average, epoch) + logger.scalar_summary('train/batch_time', batch_time.average, epoch) diff --git a/training/sup/sup_simclr.py b/training/sup/sup_simclr.py new file mode 100644 index 0000000..7912698 --- /dev/null +++ b/training/sup/sup_simclr.py @@ -0,0 +1,104 @@ +import time + +import torch.optim + +import models.transform_layers as TL +from training.contrastive_loss import get_similarity_matrix, Supervised_NT_xent +from utils.utils import AverageMeter, normalize + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") +hflip = TL.HorizontalFlipLayer().to(device) + + +def train(P, epoch, model, criterion, optimizer, scheduler, loader, logger=None, + simclr_aug=None, linear=None, linear_optim=None): + + assert simclr_aug is not None + assert P.sim_lambda == 1.0 + + if logger is None: + log_ = print + else: + log_ = logger.log + + batch_time = AverageMeter() + data_time = AverageMeter() + + losses = dict() + losses['cls'] = AverageMeter() + losses['sim'] = AverageMeter() + losses['simnorm'] = AverageMeter() + + check = time.time() + for n, (images, labels) in enumerate(loader): + model.train() + count = n * P.n_gpus # number of trained samples + + data_time.update(time.time() - check) + check = time.time() + + ### SimCLR loss ### + if P.dataset != 'imagenet' and P.dataset != 'CNMC' and P.dataset != 'CNMC_grayscale': + batch_size = images.size(0) + images = images.to(device) + images_pair = hflip(images.repeat(2, 1, 1, 1)) # 2B with hflip + else: + batch_size = images[0].size(0) + images1, images2 = images[0].to(device), images[1].to(device) + images_pair = torch.cat([images1, images2], dim=0) # 2B + + labels = labels.to(device) + + images_pair = simclr_aug(images_pair) # simclr augmentation + + _, outputs_aux = model(images_pair, simclr=True, penultimate=True) + + simclr = normalize(outputs_aux['simclr']) # normalize + sim_matrix = get_similarity_matrix(simclr, multi_gpu=P.multi_gpu) + loss_sim = Supervised_NT_xent(sim_matrix, labels=labels, temperature=0.07, multi_gpu=P.multi_gpu) * P.sim_lambda + + ### total loss ### + loss = loss_sim + + optimizer.zero_grad() + loss.backward() + optimizer.step() + + scheduler.step(epoch - 1 + n / len(loader)) + lr = optimizer.param_groups[0]['lr'] + + batch_time.update(time.time() - check) + + ### Post-processing stuffs ### + simclr_norm = outputs_aux['simclr'].norm(dim=1).mean() + + ### Linear evaluation ### + outputs_linear_eval = linear(outputs_aux['penultimate'].detach()) + loss_linear = criterion(outputs_linear_eval, labels.repeat(2)) + + linear_optim.zero_grad() + loss_linear.backward() + linear_optim.step() + + ### Log losses ### + losses['cls'].update(0, batch_size) + losses['sim'].update(loss_sim.item(), batch_size) + losses['simnorm'].update(simclr_norm.item(), batch_size) + + if count % 50 == 0: + log_('[Epoch %3d; %3d] [Time %.3f] [Data %.3f] [LR %.5f]\n' + '[LossC %f] [LossSim %f] [SimNorm %f]' % + (epoch, count, batch_time.value, data_time.value, lr, + losses['cls'].value, losses['sim'].value, losses['simnorm'].value)) + + check = time.time() + + log_('[DONE] [Time %.3f] [Data %.3f] [LossC %f] [LossSim %f] [SimNorm %f]' % + (batch_time.average, data_time.average, + losses['cls'].average, losses['sim'].average, losses['simnorm'].average)) + + if logger is not None: + logger.scalar_summary('train/loss_cls', losses['cls'].average, epoch) + logger.scalar_summary('train/loss_sim', losses['sim'].average, epoch) + logger.scalar_summary('train/batch_time', batch_time.average, epoch) + logger.scalar_summary('train/simclr_norm', losses['simnorm'].average, epoch) diff --git a/training/sup/sup_simclr_CSI.py b/training/sup/sup_simclr_CSI.py new file mode 100644 index 0000000..d41f714 --- /dev/null +++ b/training/sup/sup_simclr_CSI.py @@ -0,0 +1,111 @@ +import time + +import torch.optim + +import models.transform_layers as TL +from training.contrastive_loss import get_similarity_matrix, Supervised_NT_xent +from utils.utils import AverageMeter, normalize + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") +hflip = TL.HorizontalFlipLayer().to(device) + + +def train(P, epoch, model, criterion, optimizer, scheduler, loader, logger=None, + simclr_aug=None, linear=None, linear_optim=None): + + # currently only support rotation shifting augmentation + assert simclr_aug is not None + assert P.sim_lambda == 1.0 + + if logger is None: + log_ = print + else: + log_ = logger.log + + batch_time = AverageMeter() + data_time = AverageMeter() + + losses = dict() + losses['cls'] = AverageMeter() + losses['sim'] = AverageMeter() + + check = time.time() + for n, (images, labels) in enumerate(loader): + model.train() + count = n * P.n_gpus # number of trained samples + + data_time.update(time.time() - check) + check = time.time() + + ### SimCLR loss ### + if P.dataset != 'imagenet' and P.dataset != 'CNMC' and P.dataset != 'CNMC_grayscale': + batch_size = images.size(0) + images = images.to(device) + images1, images2 = hflip(images.repeat(2, 1, 1, 1)).chunk(2) # hflip + else: + batch_size = images[0].size(0) + images1, images2 = images[0].to(device), images[1].to(device) + #print("\nImages" + str(images.shape) + "\n") + + images1 = torch.cat([torch.rot90(images1, rot, (2, 3)) for rot in range(4)]) # 4B + images2 = torch.cat([torch.rot90(images2, rot, (2, 3)) for rot in range(4)]) # 4B + images_pair = torch.cat([images1, images2], dim=0) # 8B + + labels = labels.to(device) + rot_sim_labels = torch.cat([labels + P.n_classes * i for i in range(4)], dim=0) + rot_sim_labels = rot_sim_labels.to(device) + + images_pair = simclr_aug(images_pair) # simclr augment + _, outputs_aux = model(images_pair, simclr=True, penultimate=True) + + simclr = normalize(outputs_aux['simclr']) # normalize + sim_matrix = get_similarity_matrix(simclr, multi_gpu=P.multi_gpu) + loss_sim = Supervised_NT_xent(sim_matrix, labels=rot_sim_labels, + temperature=0.07, multi_gpu=P.multi_gpu) * P.sim_lambda + + ### total loss ### + loss = loss_sim + + optimizer.zero_grad() + loss.backward() + optimizer.step() + + scheduler.step(epoch - 1 + n / len(loader)) + lr = optimizer.param_groups[0]['lr'] + + batch_time.update(time.time() - check) + + ### Post-processing stuffs ### + penul_1 = outputs_aux['penultimate'][:batch_size] + penul_2 = outputs_aux['penultimate'][4 * batch_size: 5 * batch_size] + outputs_aux['penultimate'] = torch.cat([penul_1, penul_2]) # only use original rotation + + ### Linear evaluation ### + outputs_linear_eval = linear(outputs_aux['penultimate'].detach()) + loss_linear = criterion(outputs_linear_eval, labels.repeat(2)) + + linear_optim.zero_grad() + loss_linear.backward() + linear_optim.step() + + ### Log losses ### + losses['cls'].update(0, batch_size) + losses['sim'].update(loss_sim.item(), batch_size) + + if count % 50 == 0: + log_('[Epoch %3d; %3d] [Time %.3f] [Data %.3f] [LR %.5f]\n' + '[LossC %f] [LossSim %f]' % + (epoch, count, batch_time.value, data_time.value, lr, + losses['cls'].value, losses['sim'].value)) + + check = time.time() + + log_('[DONE] [Time %.3f] [Data %.3f] [LossC %f] [LossSim %f]' % + (batch_time.average, data_time.average, + losses['cls'].average, losses['sim'].average)) + + if logger is not None: + logger.scalar_summary('train/loss_cls', losses['cls'].average, epoch) + logger.scalar_summary('train/loss_sim', losses['sim'].average, epoch) + logger.scalar_summary('train/batch_time', batch_time.average, epoch) + diff --git a/training/unsup/__init__.py b/training/unsup/__init__.py new file mode 100644 index 0000000..3979721 --- /dev/null +++ b/training/unsup/__init__.py @@ -0,0 +1,39 @@ +def setup(mode, P): + fname = f'{P.dataset}_{P.model}_unsup_{mode}_{P.res}' + + if mode == 'simclr': + from .simclr import train + elif mode == 'simclr_CSI': + from .simclr_CSI import train + fname += f'_shift_{P.shift_trans_type}_resize_factor{P.resize_factor}_color_dist{P.color_distort}' + if P.shift_trans_type == 'gauss': + fname += f'_gauss_sigma{P.gauss_sigma}' + elif P.shift_trans_type == 'randpers': + fname += f'_distortion_scale{P.distortion_scale}' + elif P.shift_trans_type == 'sharp': + fname += f'_sharpness_factor{P.sharpness_factor}' + elif P.shift_trans_type == 'sharp': + fname += f'_nmean_{P.noise_mean}_nstd_{P.noise_std}' + else: + raise NotImplementedError() + + if P.one_class_idx is not None: + fname += f'_one_class_{P.one_class_idx}' + + if P.suffix is not None: + fname += f'_{P.suffix}' + + return train, fname + + +def update_comp_loss(loss_dict, loss_in, loss_out, loss_diff, batch_size): + loss_dict['pos'].update(loss_in, batch_size) + loss_dict['neg'].update(loss_out, batch_size) + loss_dict['diff'].update(loss_diff, batch_size) + + +def summary_comp_loss(logger, tag, loss_dict, epoch): + logger.scalar_summary(f'{tag}/pos', loss_dict['pos'].average, epoch) + logger.scalar_summary(f'{tag}/neg', loss_dict['neg'].average, epoch) + logger.scalar_summary(f'{tag}', loss_dict['diff'].average, epoch) + diff --git a/training/unsup/__pycache__/__init__.cpython-36.pyc b/training/unsup/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..32fdd4b Binary files /dev/null and b/training/unsup/__pycache__/__init__.cpython-36.pyc differ diff --git a/training/unsup/__pycache__/__init__.cpython-37.pyc b/training/unsup/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000..9cb9326 Binary files /dev/null and b/training/unsup/__pycache__/__init__.cpython-37.pyc differ diff --git a/training/unsup/__pycache__/simclr_CSI.cpython-36.pyc b/training/unsup/__pycache__/simclr_CSI.cpython-36.pyc new file mode 100644 index 0000000..2a0a2f1 Binary files /dev/null and b/training/unsup/__pycache__/simclr_CSI.cpython-36.pyc differ diff --git a/training/unsup/__pycache__/simclr_CSI.cpython-37.pyc b/training/unsup/__pycache__/simclr_CSI.cpython-37.pyc new file mode 100644 index 0000000..5215cf1 Binary files /dev/null and b/training/unsup/__pycache__/simclr_CSI.cpython-37.pyc differ diff --git a/training/unsup/__pycache__/simclr_CSI.cpython-37.pyc.2078473038560 b/training/unsup/__pycache__/simclr_CSI.cpython-37.pyc.2078473038560 new file mode 100644 index 0000000..e69de29 diff --git a/training/unsup/simclr.py b/training/unsup/simclr.py new file mode 100644 index 0000000..bd0c7b9 --- /dev/null +++ b/training/unsup/simclr.py @@ -0,0 +1,101 @@ +import time + +import torch.optim + +import models.transform_layers as TL +from training.contrastive_loss import get_similarity_matrix, NT_xent +from utils.utils import AverageMeter, normalize + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") +hflip = TL.HorizontalFlipLayer().to(device) + + +def train(P, epoch, model, criterion, optimizer, scheduler, loader, logger=None, + simclr_aug=None, linear=None, linear_optim=None): + + assert simclr_aug is not None + assert P.sim_lambda == 1.0 + + if logger is None: + log_ = print + else: + log_ = logger.log + + batch_time = AverageMeter() + data_time = AverageMeter() + + losses = dict() + losses['cls'] = AverageMeter() + losses['sim'] = AverageMeter() + + check = time.time() + for n, (images, labels) in enumerate(loader): + model.train() + count = n * P.n_gpus # number of trained samples + + data_time.update(time.time() - check) + check = time.time() + + ### SimCLR loss ### + if P.dataset != 'imagenet': + batch_size = images.size(0) + images = images.to(device) + images_pair = hflip(images.repeat(2, 1, 1, 1)) # 2B with hflip + else: + batch_size = images[0].size(0) + images1, images2 = images[0].to(device), images[1].to(device) + images_pair = torch.cat([images1, images2], dim=0) # 2B + + labels = labels.to(device) + + images_pair = simclr_aug(images_pair) # transform + + _, outputs_aux = model(images_pair, simclr=True, penultimate=True) + + simclr = normalize(outputs_aux['simclr']) # normalize + sim_matrix = get_similarity_matrix(simclr, multi_gpu=P.multi_gpu) + loss_sim = NT_xent(sim_matrix, temperature=0.5) * P.sim_lambda + + ### total loss ### + loss = loss_sim + + optimizer.zero_grad() + loss.backward() + optimizer.step() + + scheduler.step(epoch - 1 + n / len(loader)) + lr = optimizer.param_groups[0]['lr'] + + batch_time.update(time.time() - check) + + ### Post-processing stuffs ### + simclr_norm = outputs_aux['simclr'].norm(dim=1).mean() + + ### Linear evaluation ### + outputs_linear_eval = linear(outputs_aux['penultimate'].detach()) + loss_linear = criterion(outputs_linear_eval, labels.repeat(2)) + + linear_optim.zero_grad() + loss_linear.backward() + linear_optim.step() + + ### Log losses ### + losses['cls'].update(0, batch_size) + losses['sim'].update(loss_sim.item(), batch_size) + + if count % 50 == 0: + log_('[Epoch %3d; %3d] [Time %.3f] [Data %.3f] [LR %.5f]\n' + '[LossC %f] [LossSim %f]' % + (epoch, count, batch_time.value, data_time.value, lr, + losses['cls'].value, losses['sim'].value)) + + check = time.time() + + log_('[DONE] [Time %.3f] [Data %.3f] [LossC %f] [LossSim %f]' % + (batch_time.average, data_time.average, + losses['cls'].average, losses['sim'].average)) + + if logger is not None: + logger.scalar_summary('train/loss_cls', losses['cls'].average, epoch) + logger.scalar_summary('train/loss_sim', losses['sim'].average, epoch) + logger.scalar_summary('train/batch_time', batch_time.average, epoch) diff --git a/training/unsup/simclr_CSI.py b/training/unsup/simclr_CSI.py new file mode 100644 index 0000000..d6d1901 --- /dev/null +++ b/training/unsup/simclr_CSI.py @@ -0,0 +1,114 @@ +import time + +import torch.optim + +import models.transform_layers as TL +from training.contrastive_loss import get_similarity_matrix, NT_xent +from utils.utils import AverageMeter, normalize + +device = torch.device(f"cuda" if torch.cuda.is_available() else "cpu") +hflip = TL.HorizontalFlipLayer().to(device) + + +def train(P, epoch, model, criterion, optimizer, scheduler, loader, logger=None, + simclr_aug=None, linear=None, linear_optim=None): + + assert simclr_aug is not None + assert P.sim_lambda == 1.0 # to avoid mistake + assert P.K_shift > 1 + + if logger is None: + log_ = print + else: + log_ = logger.log + + batch_time = AverageMeter() + data_time = AverageMeter() + + losses = dict() + losses['cls'] = AverageMeter() + losses['sim'] = AverageMeter() + losses['shift'] = AverageMeter() + + check = time.time() + for n, (images, labels) in enumerate(loader): + model.train() + count = n * P.n_gpus # number of trained samples + + data_time.update(time.time() - check) + check = time.time() + + ### SimCLR loss ### + if P.dataset != 'imagenet' and P.dataset != 'CNMC' and P.dataset != 'CNMC_grayscale': + batch_size = images.size(0) + images = images.to(device) + images1, images2 = hflip(images.repeat(2, 1, 1, 1)).chunk(2) # hflip + else: + batch_size = images[0].size(0) + images1, images2 = images[0].to(device), images[1].to(device) + labels = labels.to(device) + + images1 = torch.cat([P.shift_trans(images1, k) for k in range(P.K_shift)]) + images2 = torch.cat([P.shift_trans(images2, k) for k in range(P.K_shift)]) + + shift_labels = torch.cat([torch.ones_like(labels) * k for k in range(P.K_shift)], 0) # B -> 4B + shift_labels = shift_labels.repeat(2) + + images_pair = torch.cat([images1, images2], dim=0) # 8B + images_pair = simclr_aug(images_pair) # transform + + _, outputs_aux = model(images_pair, simclr=True, penultimate=True, shift=True) + + simclr = normalize(outputs_aux['simclr']) # normalize + sim_matrix = get_similarity_matrix(simclr, multi_gpu=P.multi_gpu) + loss_sim = NT_xent(sim_matrix, temperature=0.5) * P.sim_lambda + + loss_shift = criterion(outputs_aux['shift'], shift_labels) + + ### total loss ### + loss = loss_sim + loss_shift + + optimizer.zero_grad() + loss.backward() + optimizer.step() + + scheduler.step(epoch - 1 + n / len(loader)) + lr = optimizer.param_groups[0]['lr'] + + batch_time.update(time.time() - check) + + ### Post-processing stuffs ### + simclr_norm = outputs_aux['simclr'].norm(dim=1).mean() + + penul_1 = outputs_aux['penultimate'][:batch_size] + penul_2 = outputs_aux['penultimate'][P.K_shift * batch_size: (P.K_shift + 1) * batch_size] + outputs_aux['penultimate'] = torch.cat([penul_1, penul_2]) # only use original rotation + + ### Linear evaluation ### + outputs_linear_eval = linear(outputs_aux['penultimate'].detach()) + loss_linear = criterion(outputs_linear_eval, labels.repeat(2)) + + linear_optim.zero_grad() + loss_linear.backward() + linear_optim.step() + + losses['cls'].update(0, batch_size) + losses['sim'].update(loss_sim.item(), batch_size) + losses['shift'].update(loss_shift.item(), batch_size) + + if count % 50 == 0: + log_('[Epoch %3d; %3d] [Time %.3f] [Data %.3f] [LR %.5f]\n' + '[LossC %f] [LossSim %f] [LossShift %f]' % + (epoch, count, batch_time.value, data_time.value, lr, + losses['cls'].value, losses['sim'].value, losses['shift'].value)) + + log_('[DONE] [Time %.3f] [Data %.3f] [LossC %f] [LossSim %f] [LossShift %f]' % + (batch_time.average, data_time.average, + losses['cls'].average, losses['sim'].average, losses['shift'].average)) + + if logger is not None: + logger.scalar_summary('train/loss_cls', losses['cls'].average, epoch) + logger.scalar_summary('train/loss_sim', losses['sim'].average, epoch) + logger.scalar_summary('train/loss_shift', losses['shift'].average, epoch) + logger.scalar_summary('train/batch_time', batch_time.average, epoch) + diff --git a/utils/__init__.py b/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/utils/__pycache__/__init__.cpython-36.pyc b/utils/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..8e90297 Binary files /dev/null and b/utils/__pycache__/__init__.cpython-36.pyc differ diff --git a/utils/__pycache__/__init__.cpython-37.pyc b/utils/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 0000000..388a81c Binary files /dev/null and b/utils/__pycache__/__init__.cpython-37.pyc differ diff --git a/utils/__pycache__/__init__.cpython-38.pyc b/utils/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000..32d0276 Binary files /dev/null and b/utils/__pycache__/__init__.cpython-38.pyc differ diff --git a/utils/__pycache__/temperature_scaling.cpython-36.pyc b/utils/__pycache__/temperature_scaling.cpython-36.pyc new file mode 100644 index 0000000..e6560b4 Binary files /dev/null and b/utils/__pycache__/temperature_scaling.cpython-36.pyc differ diff --git a/utils/__pycache__/temperature_scaling.cpython-37.pyc b/utils/__pycache__/temperature_scaling.cpython-37.pyc new file mode 100644 index 0000000..072eb06 Binary files /dev/null and b/utils/__pycache__/temperature_scaling.cpython-37.pyc differ diff --git a/utils/__pycache__/utils.cpython-36.pyc b/utils/__pycache__/utils.cpython-36.pyc new file mode 100644 index 0000000..af4f948 Binary files /dev/null and b/utils/__pycache__/utils.cpython-36.pyc differ diff --git a/utils/__pycache__/utils.cpython-37.pyc b/utils/__pycache__/utils.cpython-37.pyc new file mode 100644 index 0000000..17c0653 Binary files /dev/null and b/utils/__pycache__/utils.cpython-37.pyc differ diff --git a/utils/__pycache__/utils.cpython-38.pyc b/utils/__pycache__/utils.cpython-38.pyc new file mode 100644 index 0000000..69c5b92 Binary files /dev/null and b/utils/__pycache__/utils.cpython-38.pyc differ diff --git a/utils/temperature_scaling.py b/utils/temperature_scaling.py new file mode 100644 index 0000000..13fa93a --- /dev/null +++ b/utils/temperature_scaling.py @@ -0,0 +1,120 @@ +import torch +from torch import nn, optim +from torch.nn import functional as F + + +class ModelWithTemperature(nn.Module): + """ + A thin decorator, which wraps a model with temperature scaling + model (nn.Module): + A classification neural network + NB: Output of the neural network should be the classification logits, + NOT the softmax (or log softmax)! + """ + def __init__(self, model): + super(ModelWithTemperature, self).__init__() + self.model = model + self.temperature = nn.Parameter(torch.ones(1) * 0.5) + + def forward(self, input): + logits = self.model(input) + return self.temperature_scale(logits) + + def temperature_scale(self, logits): + """ + Perform temperature scaling on logits + """ + # Expand temperature to match the size of logits + temperature = self.temperature.unsqueeze(1).expand(logits.size(0), logits.size(1)) + return logits / temperature + + # This function probably should live outside of this class, but whatever + def set_temperature(self, valid_loader): + """ + Tune the tempearature of the model (using the validation set). + We're going to set it to optimize NLL. + valid_loader (DataLoader): validation set loader + """ + self.cuda() + nll_criterion = nn.CrossEntropyLoss().cuda() + ece_criterion = _ECELoss().cuda() + + # First: collect all the logits and labels for the validation set + logits_list = [] + labels_list = [] + with torch.no_grad(): + for input, label in valid_loader: + input = input.cuda() + logits = self.model(input) + logits_list.append(logits) + labels_list.append(label) + logits = torch.cat(logits_list).cuda() + labels = torch.cat(labels_list).cuda() + + # Calculate NLL and ECE before temperature scaling + before_temperature_nll = nll_criterion(logits, labels).item() + before_temperature_ece = ece_criterion(logits, labels).item() + print('Before temperature - NLL: %.3f, ECE: %.3f' % (before_temperature_nll, before_temperature_ece)) + + # Next: optimize the temperature w.r.t. NLL + optimizer = optim.LBFGS([self.temperature], lr=0.0001, max_iter=50000) + + def eval(): + loss = nll_criterion(self.temperature_scale(logits), labels) + loss.backward() + return loss + optimizer.step(eval) + + # Calculate NLL and ECE after temperature scaling + after_temperature_nll = nll_criterion(self.temperature_scale(logits), labels).item() + after_temperature_ece = ece_criterion(self.temperature_scale(logits), labels).item() + print('Optimal temperature: %.3f' % self.temperature.item()) + print('After temperature - NLL: %.3f, ECE: %.3f' % (after_temperature_nll, after_temperature_ece)) + + return self + + +class _ECELoss(nn.Module): + """ + Calculates the Expected Calibration Error of a model. + (This isn't necessary for temperature scaling, just a cool metric). + + The input to this loss is the logits of a model, NOT the softmax scores. + + This divides the confidence outputs into equally-sized interval bins. + In each bin, we compute the confidence gap: + + bin_gap = | avg_confidence_in_bin - accuracy_in_bin | + + We then return a weighted average of the gaps, based on the number + of samples in each bin + + See: Naeini, Mahdi Pakdaman, Gregory F. Cooper, and Milos Hauskrecht. + "Obtaining Well Calibrated Probabilities Using Bayesian Binning." AAAI. + 2015. + """ + def __init__(self, n_bins=15): + """ + n_bins (int): number of confidence interval bins + """ + super(_ECELoss, self).__init__() + bin_boundaries = torch.linspace(0, 1, n_bins + 1) + self.bin_lowers = bin_boundaries[:-1] + self.bin_uppers = bin_boundaries[1:] + + def forward(self, logits, labels): + softmaxes = F.softmax(logits, dim=1) + confidences, predictions = torch.max(softmaxes, 1) + accuracies = predictions.eq(labels) + + ece = torch.zeros(1, device=logits.device) + for bin_lower, bin_upper in zip(self.bin_lowers, self.bin_uppers): + # Calculated |confidence - accuracy| in each bin + in_bin = confidences.gt(bin_lower.item()) * confidences.le(bin_upper.item()) + prop_in_bin = in_bin.float().mean() + if prop_in_bin.item() > 0: + accuracy_in_bin = accuracies[in_bin].float().mean() + avg_confidence_in_bin = confidences[in_bin].mean() + ece += torch.abs(avg_confidence_in_bin - accuracy_in_bin) * prop_in_bin + + return ece \ No newline at end of file diff --git a/utils/utils.py b/utils/utils.py new file mode 100644 index 0000000..0ff4e36 --- /dev/null +++ b/utils/utils.py @@ -0,0 +1,205 @@ +import os +import pickle +import random +import shutil +import sys +from datetime import datetime + +import numpy as np +import torch +from matplotlib import pyplot as plt +from tensorboardX import SummaryWriter + + +class Logger(object): + """Reference: https://gist.github.com/gyglim/1f8dfb1b5c82627ae3efcfbbadb9f514""" + + def __init__(self, fn, ask=True, local_rank=0): + self.local_rank = local_rank + if self.local_rank == 0: + if not os.path.exists("./logs/"): + os.mkdir("./logs/") + + logdir = self._make_dir(fn) + if not os.path.exists(logdir): + os.mkdir(logdir) + + if len(os.listdir(logdir)) != 0 and ask: + ans = input("log_dir is not empty. All data inside log_dir will be deleted. " + "Will you proceed [y/N]? ") + if ans in ['y', 'Y']: + shutil.rmtree(logdir) + else: + exit(1) + + self.set_dir(logdir) + + def _make_dir(self, fn): + today = datetime.today().strftime("%y%m%d") + logdir = 'logs/' + fn + return logdir + + def set_dir(self, logdir, log_fn='log.txt'): + self.logdir = logdir + if not os.path.exists(logdir): + os.mkdir(logdir) + self.writer = SummaryWriter(logdir) + self.log_file = open(os.path.join(logdir, log_fn), 'a') + + def log(self, string): + if self.local_rank == 0: + self.log_file.write('[%s] %s' % (datetime.now(), string) + '\n') + self.log_file.flush() + + print('[%s] %s' % (datetime.now(), string)) + sys.stdout.flush() + + def log_dirname(self, string): + if self.local_rank == 0: + self.log_file.write('%s (%s)' % (string, self.logdir) + '\n') + self.log_file.flush() + + print('%s (%s)' % (string, self.logdir)) + sys.stdout.flush() + + def scalar_summary(self, tag, value, step): + """Log a scalar variable.""" + if self.local_rank == 0: + self.writer.add_scalar(tag, value, step) + + def image_summary(self, tag, images, step): + """Log a list of images.""" + if self.local_rank == 0: + self.writer.add_image(tag, images, step) + + def histo_summary(self, tag, values, step): + """Log a histogram of the tensor of values.""" + if self.local_rank == 0: + self.writer.add_histogram(tag, values, step, bins='auto') + + +class AverageMeter(object): + """Computes and stores the average and current value""" + + def __init__(self): + self.value = 0 + self.average = 0 + self.sum = 0 + self.count = 0 + + def reset(self): + self.value = 0 + self.average = 0 + self.sum = 0 + self.count = 0 + + def update(self, value, n=1): + self.value = value + self.sum += value * n + self.count += n + self.average = self.sum / self.count + + +def load_checkpoint(logdir, mode='last'): + if mode == 'last': + model_path = os.path.join(logdir, 'last.model') + optim_path = os.path.join(logdir, 'last.optim') + config_path = os.path.join(logdir, 'last.config') + elif mode == 'best': + model_path = os.path.join(logdir, 'best.model') + optim_path = os.path.join(logdir, 'best.optim') + config_path = os.path.join(logdir, 'best.config') + + else: + raise NotImplementedError() + + print("=> Loading checkpoint from '{}'".format(logdir)) + if os.path.exists(model_path): + model_state = torch.load(model_path) + optim_state = torch.load(optim_path) + with open(config_path, 'rb') as handle: + cfg = pickle.load(handle) + else: + return None, None, None + + return model_state, optim_state, cfg + + +def save_checkpoint(epoch, model_state, optim_state, logdir): + last_model = os.path.join(logdir, 'last.model') + last_optim = os.path.join(logdir, 'last.optim') + last_config = os.path.join(logdir, 'last.config') + + opt = { + 'epoch': epoch, + } + torch.save(model_state, last_model) + torch.save(optim_state, last_optim) + with open(last_config, 'wb') as handle: + pickle.dump(opt, handle, protocol=pickle.HIGHEST_PROTOCOL) + + +def load_linear_checkpoint(logdir, mode='last'): + if mode == 'last': + linear_optim_path = os.path.join(logdir, 'last.linear_optim') + elif mode == 'best': + linear_optim_path = os.path.join(logdir, 'best.linear_optim') + else: + raise NotImplementedError() + + print("=> Loading linear optimizer checkpoint from '{}'".format(logdir)) + if os.path.exists(linear_optim_path): + linear_optim_state = torch.load(linear_optim_path) + return linear_optim_state + else: + return None + + +def save_linear_checkpoint(linear_optim_state, logdir): + last_linear_optim = os.path.join(logdir, 'last.linear_optim') + torch.save(linear_optim_state, last_linear_optim) + + +def set_random_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + + +def normalize(x, dim=1, eps=1e-8): + return x / (x.norm(dim=dim, keepdim=True) + eps) + + +def make_model_diagrams(probs, labels, n_bins=10): + """ + outputs - a torch tensor (size n x num_classes) with the outputs from the final linear layer + - NOT the softmaxes + labels - a torch tensor (size n) with the labels + """ + confidences, predictions = probs.max(1) + accuracies = torch.eq(predictions, labels) + f, rel_ax = plt.subplots(1, 2, figsize=(4, 2.5)) + + # Reliability diagram + bins = torch.linspace(0, 1, n_bins + 1) + bins[-1] = 1.0001 + width = bins[1] - bins[0] + bin_indices = [confidences.ge(bin_lower) * confidences.lt(bin_upper) for bin_lower, bin_upper in + zip(bins[:-1], bins[1:])] + bin_corrects = [torch.mean(accuracies[bin_index]) for bin_index in bin_indices] + bin_scores = [torch.mean(confidences[bin_index]) for bin_index in bin_indices] + + confs = rel_ax.bar(bins[:-1], bin_corrects.numpy(), width=width) + gaps = rel_ax.bar(bins[:-1], (bin_scores - bin_corrects).numpy(), bottom=bin_corrects.numpy(), color=[1, 0.7, 0.7], + alpha=0.5, width=width, hatch='//', edgecolor='r') + rel_ax.plot([0, 1], [0, 1], '--', color='gray') + rel_ax.legend([confs, gaps], ['Outputs', 'Gap'], loc='best', fontsize='small') + + # Clean up + rel_ax.set_ylabel('Accuracy') + rel_ax.set_xlabel('Confidence') + f.tight_layout() + return f + +