info2Praktikum-NeuronalesNetz/neuralNetworkTests.c
2025-11-23 16:41:57 +01:00

308 lines
11 KiB
C

#include "neuralNetwork.h"
#include "unity.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
static void prepareNeuralNetworkFile(const char *path, const NeuralNetwork nn) {
FILE *f = fopen(path, "wb");
if (f == NULL)
return;
/* 1) Header: exakt das String, ohne '\n' oder abschließendes '\0' */
const char header[] = "__info2_neural_network_file_format__";
fwrite(header, sizeof(char), strlen(header), f);
/* Wenn es keine Layer gibt, kein Dimensionspaar schreiben (loadModel
wird beim Lesen dann 0 zurückgeben). Aber wir können auch frühzeitig
mit einem 0-Int terminieren — beides ist in Ordnung. */
if (nn.numberOfLayers == 0) {
/* optional: schreibe ein 0 als next outputDimension (nicht nötig) */
int zero = 0;
fwrite(&zero, sizeof(int), 1, f);
fclose(f);
return;
}
/* 2) Für die erste Layer schreiben wir inputDimension und outputDimension */
/* inputDimension == weights.cols, outputDimension == weights.rows */
int inputDim = (int)nn.layers[0].weights.cols;
int outputDim = (int)nn.layers[0].weights.rows;
fwrite(&inputDim, sizeof(int), 1, f);
fwrite(&outputDim, sizeof(int), 1, f);
/* 3) Für jede Layer in Reihenfolge: Gewichte (output x input), Biases (output
x 1). Zwischen Layern wird nur die nächste outputDimension (int)
geschrieben. */
for (int i = 0; i < nn.numberOfLayers; i++) {
Layer layer = nn.layers[i];
int wrows = (int)layer.weights.rows;
int wcols = (int)layer.weights.cols;
int wcount = wrows * wcols;
int bcount =
layer.biases.rows * layer.biases.cols; /* normalerweise rows * 1 */
/* Gewichte (MatrixType binär) */
if (wcount > 0 && layer.weights.buffer != NULL) {
fwrite(layer.weights.buffer, sizeof(MatrixType), (size_t)wcount, f);
}
/* Biases (MatrixType binär) */
if (bcount > 0 && layer.biases.buffer != NULL) {
fwrite(layer.biases.buffer, sizeof(MatrixType), (size_t)bcount, f);
}
/* Für die nächste Layer: falls vorhanden, schreibe deren outputDimension */
if (i + 1 < nn.numberOfLayers) {
int nextOutput = (int)nn.layers[i + 1].weights.rows;
fwrite(&nextOutput, sizeof(int), 1, f);
} else {
/* Letzte Layer: wir können das Ende signalisieren, indem wir ein 0
schreiben. loadModel liest dann outputDimension = 0 und beendet die
Schleife. */
int zero = 0;
fwrite(&zero, sizeof(int), 1, f);
}
}
fclose(f);
}
void test_loadModelReturnsCorrectNumberOfLayers(void) {
const char *path = "some__nn_test_file.info2";
MatrixType buffer1[] = {1, 2, 3, 4, 5, 6};
MatrixType buffer2[] = {1, 2, 3, 4, 5, 6};
Matrix weights1 = {.buffer = buffer1, .rows = 3, .cols = 2};
Matrix weights2 = {.buffer = buffer2, .rows = 2, .cols = 3};
MatrixType buffer3[] = {1, 2, 3};
MatrixType buffer4[] = {1, 2};
Matrix biases1 = {.buffer = buffer3, .rows = 3, .cols = 1};
Matrix biases2 = {.buffer = buffer4, .rows = 2, .cols = 1};
Layer layers[] = {{.weights = weights1, .biases = biases1},
{.weights = weights2, .biases = biases2}};
NeuralNetwork expectedNet = {.layers = layers, .numberOfLayers = 2};
NeuralNetwork netUnderTest;
prepareNeuralNetworkFile(path, expectedNet);
netUnderTest = loadModel(path);
remove(path);
TEST_ASSERT_EQUAL_INT(expectedNet.numberOfLayers,
netUnderTest.numberOfLayers);
clearModel(&netUnderTest);
}
void test_loadModelReturnsCorrectWeightDimensions(void) {
const char *path = "some__nn_test_file.info2";
MatrixType weightBuffer[] = {1, 2, 3, 4, 5, 6};
Matrix weights = {.buffer = weightBuffer, .rows = 3, .cols = 2};
MatrixType biasBuffer[] = {7, 8, 9};
Matrix biases = {.buffer = biasBuffer, .rows = 3, .cols = 1};
Layer layers[] = {{.weights = weights, .biases = biases}};
NeuralNetwork expectedNet = {.layers = layers, .numberOfLayers = 1};
NeuralNetwork netUnderTest;
prepareNeuralNetworkFile(path, expectedNet);
netUnderTest = loadModel(path);
remove(path);
TEST_ASSERT_TRUE(netUnderTest.numberOfLayers > 0);
TEST_ASSERT_EQUAL_INT(expectedNet.layers[0].weights.rows,
netUnderTest.layers[0].weights.rows);
TEST_ASSERT_EQUAL_INT(expectedNet.layers[0].weights.cols,
netUnderTest.layers[0].weights.cols);
clearModel(&netUnderTest);
}
void test_loadModelReturnsCorrectBiasDimensions(void) {
const char *path = "some__nn_test_file.info2";
MatrixType weightBuffer[] = {1, 2, 3, 4, 5, 6};
Matrix weights = {.buffer = weightBuffer, .rows = 3, .cols = 2};
MatrixType biasBuffer[] = {7, 8, 9};
Matrix biases = {.buffer = biasBuffer, .rows = 3, .cols = 1};
Layer layers[] = {{.weights = weights, .biases = biases}};
NeuralNetwork expectedNet = {.layers = layers, .numberOfLayers = 1};
NeuralNetwork netUnderTest;
prepareNeuralNetworkFile(path, expectedNet);
netUnderTest = loadModel(path);
remove(path);
TEST_ASSERT_TRUE(netUnderTest.numberOfLayers > 0);
TEST_ASSERT_EQUAL_INT(expectedNet.layers[0].biases.rows,
netUnderTest.layers[0].biases.rows);
TEST_ASSERT_EQUAL_INT(expectedNet.layers[0].biases.cols,
netUnderTest.layers[0].biases.cols);
clearModel(&netUnderTest);
}
void test_loadModelReturnsCorrectWeights(void) {
const char *path = "some__nn_test_file.info2";
MatrixType weightBuffer[] = {1, 2, 3, 4, 5, 6};
Matrix weights = {.buffer = weightBuffer, .rows = 3, .cols = 2};
MatrixType biasBuffer[] = {7, 8, 9};
Matrix biases = {.buffer = biasBuffer, .rows = 3, .cols = 1};
Layer layers[] = {{.weights = weights, .biases = biases}};
NeuralNetwork expectedNet = {.layers = layers, .numberOfLayers = 1};
NeuralNetwork netUnderTest;
prepareNeuralNetworkFile(path, expectedNet);
netUnderTest = loadModel(path);
remove(path);
TEST_ASSERT_TRUE(netUnderTest.numberOfLayers > 0);
TEST_ASSERT_EQUAL_INT(expectedNet.layers[0].weights.rows,
netUnderTest.layers[0].weights.rows);
TEST_ASSERT_EQUAL_INT(expectedNet.layers[0].weights.cols,
netUnderTest.layers[0].weights.cols);
int n =
netUnderTest.layers[0].weights.rows * netUnderTest.layers[0].weights.cols;
TEST_ASSERT_EQUAL_INT_ARRAY(expectedNet.layers[0].weights.buffer,
netUnderTest.layers[0].weights.buffer, n);
clearModel(&netUnderTest);
}
void test_loadModelReturnsCorrectBiases(void) {
const char *path = "some__nn_test_file.info2";
MatrixType weightBuffer[] = {1, 2, 3, 4, 5, 6};
Matrix weights = {.buffer = weightBuffer, .rows = 3, .cols = 2};
MatrixType biasBuffer[] = {7, 8, 9};
Matrix biases = {.buffer = biasBuffer, .rows = 3, .cols = 1};
Layer layers[] = {{.weights = weights, .biases = biases}};
NeuralNetwork expectedNet = {.layers = layers, .numberOfLayers = 1};
NeuralNetwork netUnderTest;
prepareNeuralNetworkFile(path, expectedNet);
netUnderTest = loadModel(path);
remove(path);
TEST_ASSERT_TRUE(netUnderTest.numberOfLayers > 0);
TEST_ASSERT_EQUAL_INT(expectedNet.layers[0].weights.rows,
netUnderTest.layers[0].weights.rows);
TEST_ASSERT_EQUAL_INT(expectedNet.layers[0].weights.cols,
netUnderTest.layers[0].weights.cols);
int n =
netUnderTest.layers[0].biases.rows * netUnderTest.layers[0].biases.cols;
TEST_ASSERT_EQUAL_INT_ARRAY(expectedNet.layers[0].biases.buffer,
netUnderTest.layers[0].biases.buffer, n);
clearModel(&netUnderTest);
}
void test_loadModelFailsOnWrongFileTag(void) {
const char *path = "some_nn_test_file.info2";
NeuralNetwork netUnderTest;
FILE *file = fopen(path, "wb");
if (file != NULL) {
const char *fileTag = "info2_neural_network_file_format";
fwrite(fileTag, sizeof(char), strlen(fileTag), file);
fclose(file);
}
netUnderTest = loadModel(path);
remove(path);
TEST_ASSERT_NULL(netUnderTest.layers);
TEST_ASSERT_EQUAL_INT(0, netUnderTest.numberOfLayers);
}
void test_clearModelSetsMembersToNull(void) {
const char *path = "some__nn_test_file.info2";
MatrixType weightBuffer[] = {1, 2, 3, 4, 5, 6};
Matrix weights = {.buffer = weightBuffer, .rows = 3, .cols = 2};
MatrixType biasBuffer[] = {7, 8, 9};
Matrix biases = {.buffer = biasBuffer, .rows = 3, .cols = 1};
Layer layers[] = {{.weights = weights, .biases = biases}};
NeuralNetwork expectedNet = {.layers = layers, .numberOfLayers = 1};
NeuralNetwork netUnderTest;
prepareNeuralNetworkFile(path, expectedNet);
netUnderTest = loadModel(path);
remove(path);
TEST_ASSERT_NOT_NULL(netUnderTest.layers);
TEST_ASSERT_TRUE(netUnderTest.numberOfLayers > 0);
clearModel(&netUnderTest);
TEST_ASSERT_NULL(netUnderTest.layers);
TEST_ASSERT_EQUAL_INT(0, netUnderTest.numberOfLayers);
}
static void someActivation(Matrix *matrix) {
for (int i = 0; i < matrix->rows * matrix->cols; i++) {
matrix->buffer[i] = fabs(matrix->buffer[i]);
}
}
void test_predictReturnsCorrectLabels(void) {
const unsigned char expectedLabels[] = {4, 2};
GrayScalePixelType imageBuffer1[] = {10, 30, 25, 17};
GrayScalePixelType imageBuffer2[] = {20, 40, 10, 128};
GrayScaleImage inputImages[] = {
{.buffer = imageBuffer1, .width = 2, .height = 2},
{.buffer = imageBuffer2, .width = 2, .height = 2}};
MatrixType weightsBuffer1[] = {1, -2, 3, -4, 5, -6, 7, -8};
MatrixType weightsBuffer2[] = {-9, 10, 11, 12, 13, 14};
MatrixType weightsBuffer3[] = {-15, 16, 17, 18, -19, 20, 21, 22,
23, -24, 25, 26, 27, -28, -29};
Matrix weights1 = {.buffer = weightsBuffer1, .rows = 2, .cols = 4};
Matrix weights2 = {.buffer = weightsBuffer2, .rows = 3, .cols = 2};
Matrix weights3 = {.buffer = weightsBuffer3, .rows = 5, .cols = 3};
MatrixType biasBuffer1[] = {200, 0};
MatrixType biasBuffer2[] = {0, -100, 0};
MatrixType biasBuffer3[] = {0, -1000, 0, 2000, 0};
Matrix biases1 = {.buffer = biasBuffer1, .rows = 2, .cols = 1};
Matrix biases2 = {.buffer = biasBuffer2, .rows = 3, .cols = 1};
Matrix biases3 = {.buffer = biasBuffer3, .rows = 5, .cols = 1};
Layer layers[] = {
{.weights = weights1, .biases = biases1, .activation = someActivation},
{.weights = weights2, .biases = biases2, .activation = someActivation},
{.weights = weights3, .biases = biases3, .activation = someActivation}};
NeuralNetwork netUnderTest = {.layers = layers, .numberOfLayers = 3};
unsigned char *predictedLabels = predict(netUnderTest, inputImages, 2);
TEST_ASSERT_NOT_NULL(predictedLabels);
int n = (int)(sizeof(expectedLabels) / sizeof(expectedLabels[0]));
TEST_ASSERT_EQUAL_UINT8_ARRAY(expectedLabels, predictedLabels, n);
free(predictedLabels);
}
void setUp(void) {
// Falls notwendig, kann hier Vorbereitungsarbeit gemacht werden
}
void tearDown(void) {
// Hier kann Bereinigungsarbeit nach jedem Test durchgeführt werden
}
int main() {
UNITY_BEGIN();
printf("\n============================\nNeural network "
"tests\n============================\n");
RUN_TEST(test_loadModelReturnsCorrectNumberOfLayers);
RUN_TEST(test_loadModelReturnsCorrectWeightDimensions);
RUN_TEST(test_loadModelReturnsCorrectBiasDimensions);
RUN_TEST(test_loadModelReturnsCorrectWeights);
RUN_TEST(test_loadModelReturnsCorrectBiases);
RUN_TEST(test_loadModelFailsOnWrongFileTag);
RUN_TEST(test_clearModelSetsMembersToNull);
RUN_TEST(test_predictReturnsCorrectLabels);
return UNITY_END();
}