neuralNetworkTests

This commit is contained in:
Kristin 2025-11-23 16:41:57 +01:00
parent 86a9d16c4f
commit 56d59b1b50
3 changed files with 511 additions and 451 deletions

View File

@ -218,4 +218,31 @@ Matrix add(const Matrix matrix1, const Matrix matrix2) {
return error;
}
}
Matrix multiply(const Matrix matrix1, const Matrix matrix2) { return matrix1; }
Matrix multiply(const Matrix matrix1, const Matrix matrix2) {
// Spalten1 müssen gleich zeilen2 sein! dann multiplizieren
if (matrix1.cols == matrix2.rows) {
Matrix multMatrix = createMatrix(matrix1.rows, matrix2.cols);
// durch neue matrix iterieren
for (int r = 0; r < matrix1.rows; r++) {
for (int c = 0; c < matrix2.cols; c++) {
MatrixType sum = 0.0;
// skalarprodukte berechnen, k damit die ganze zeile mal die ganze
// spalte genommen wird quasi
for (int k = 0; k < matrix1.cols; k++) {
// sum+=
// matrix1.buffer[r*matrix1.cols+k]*matrix2.buffer[k*matrix2.cols+c];
sum += getMatrixAt(matrix1, r, k) * getMatrixAt(matrix2, k, c);
}
// Ergebnisse in neue matrix speichern
setMatrixAt(sum, multMatrix, r, c);
}
}
return multMatrix;
}
// sonst fehler, kein multiply möglich
else {
Matrix errorMatrix = {0, 0, NULL};
return errorMatrix;
}
}

View File

@ -1,35 +1,29 @@
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <string.h>
#include "neuralNetwork.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define BUFFER_SIZE 100
#define FILE_HEADER_STRING "__info2_neural_network_file_format__"
static void softmax(Matrix *matrix)
{
if(matrix->cols > 0)
{
static void softmax(Matrix *matrix) {
if (matrix->cols > 0) {
double *colSums = (double *)calloc(matrix->cols, sizeof(double));
if(colSums != NULL)
{
for(int colIdx = 0; colIdx < matrix->cols; colIdx++)
{
for(int rowIdx = 0; rowIdx < matrix->rows; rowIdx++)
{
if (colSums != NULL) {
for (int colIdx = 0; colIdx < matrix->cols; colIdx++) {
for (int rowIdx = 0; rowIdx < matrix->rows; rowIdx++) {
MatrixType expValue = exp(getMatrixAt(*matrix, rowIdx, colIdx));
setMatrixAt(expValue, *matrix, rowIdx, colIdx);
colSums[colIdx] += expValue;
}
}
for(int colIdx = 0; colIdx < matrix->cols; colIdx++)
{
for(int rowIdx = 0; rowIdx < matrix->rows; rowIdx++)
{
MatrixType normalizedValue = getMatrixAt(*matrix, rowIdx, colIdx) / colSums[colIdx];
for (int colIdx = 0; colIdx < matrix->cols; colIdx++) {
for (int rowIdx = 0; rowIdx < matrix->rows; rowIdx++) {
MatrixType normalizedValue =
getMatrixAt(*matrix, rowIdx, colIdx) / colSums[colIdx];
setMatrixAt(normalizedValue, *matrix, rowIdx, colIdx);
}
}
@ -38,54 +32,49 @@ static void softmax(Matrix *matrix)
}
}
static void relu(Matrix *matrix)
{
for(int i = 0; i < matrix->rows * matrix->cols; i++)
{
static void relu(Matrix *matrix) {
for (int i = 0; i < matrix->rows * matrix->cols; i++) {
matrix->buffer[i] = matrix->buffer[i] >= 0 ? matrix->buffer[i] : 0;
}
}
static int checkFileHeader(FILE *file)
{
static int checkFileHeader(FILE *file) {
int isValid = 0;
int fileHeaderLen = strlen(FILE_HEADER_STRING);
char buffer[BUFFER_SIZE] = {0};
if(BUFFER_SIZE-1 < fileHeaderLen)
fileHeaderLen = BUFFER_SIZE-1;
if (BUFFER_SIZE - 1 < fileHeaderLen)
fileHeaderLen = BUFFER_SIZE - 1;
if(fread(buffer, sizeof(char), fileHeaderLen, file) == fileHeaderLen)
if (fread(buffer, sizeof(char), fileHeaderLen, file) == fileHeaderLen)
isValid = strcmp(buffer, FILE_HEADER_STRING) == 0;
return isValid;
}
static unsigned int readDimension(FILE *file)
{
static unsigned int readDimension(FILE *file) {
int dimension = 0;
if(fread(&dimension, sizeof(int), 1, file) != 1)
if (fread(&dimension, sizeof(int), 1, file) != 1)
dimension = 0;
return dimension;
}
static Matrix readMatrix(FILE *file, unsigned int rows, unsigned int cols)
{
static Matrix readMatrix(FILE *file, unsigned int rows, unsigned int cols) {
Matrix matrix = createMatrix(rows, cols);
if(matrix.buffer != NULL)
{
if(fread(matrix.buffer, sizeof(MatrixType), rows*cols, file) != rows*cols)
if (matrix.buffer != NULL) {
if (fread(matrix.buffer, sizeof(MatrixType), rows * cols, file) !=
rows * cols)
clearMatrix(&matrix);
}
return matrix;
}
static Layer readLayer(FILE *file, unsigned int inputDimension, unsigned int outputDimension)
{
static Layer readLayer(FILE *file, unsigned int inputDimension,
unsigned int outputDimension) {
Layer layer;
layer.weights = readMatrix(file, outputDimension, inputDimension);
layer.biases = readMatrix(file, outputDimension, 1);
@ -93,62 +82,54 @@ static Layer readLayer(FILE *file, unsigned int inputDimension, unsigned int out
return layer;
}
static int isEmptyLayer(const Layer layer)
{
return layer.biases.cols == 0 || layer.biases.rows == 0 || layer.biases.buffer == NULL || layer.weights.rows == 0 || layer.weights.cols == 0 || layer.weights.buffer == NULL;
static int isEmptyLayer(const Layer layer) {
return layer.biases.cols == 0 || layer.biases.rows == 0 ||
layer.biases.buffer == NULL || layer.weights.rows == 0 ||
layer.weights.cols == 0 || layer.weights.buffer == NULL;
}
static void clearLayer(Layer *layer)
{
if(layer != NULL)
{
static void clearLayer(Layer *layer) {
if (layer != NULL) {
clearMatrix(&layer->weights);
clearMatrix(&layer->biases);
layer->activation = NULL;
}
}
static void assignActivations(NeuralNetwork model)
{
for(int i = 0; i < (int)model.numberOfLayers-1; i++)
{
static void assignActivations(NeuralNetwork model) {
for (int i = 0; i < (int)model.numberOfLayers - 1; i++) {
model.layers[i].activation = relu;
}
if(model.numberOfLayers > 0)
model.layers[model.numberOfLayers-1].activation = softmax;
if (model.numberOfLayers > 0)
model.layers[model.numberOfLayers - 1].activation = softmax;
}
NeuralNetwork loadModel(const char *path)
{
NeuralNetwork loadModel(const char *path) {
NeuralNetwork model = {NULL, 0};
FILE *file = fopen(path, "rb");
if(file != NULL)
{
if(checkFileHeader(file))
{
if (file != NULL) {
if (checkFileHeader(file)) {
unsigned int inputDimension = readDimension(file);
unsigned int outputDimension = readDimension(file);
while(inputDimension > 0 && outputDimension > 0)
{
while (inputDimension > 0 && outputDimension > 0) {
Layer layer = readLayer(file, inputDimension, outputDimension);
Layer *layerBuffer = NULL;
if(isEmptyLayer(layer))
{
if (isEmptyLayer(layer)) {
clearLayer(&layer);
clearModel(&model);
break;
}
layerBuffer = (Layer *)realloc(model.layers, (model.numberOfLayers + 1) * sizeof(Layer));
layerBuffer = (Layer *)realloc(
model.layers, (model.numberOfLayers + 1) * sizeof(Layer));
if(layerBuffer != NULL)
if (layerBuffer != NULL)
model.layers = layerBuffer;
else
{
else {
clearModel(&model);
break;
}
@ -168,20 +149,16 @@ NeuralNetwork loadModel(const char *path)
return model;
}
static Matrix imageBatchToMatrixOfImageVectors(const GrayScaleImage images[], unsigned int count)
{
Matrix matrix = {NULL, 0, 0};
static Matrix imageBatchToMatrixOfImageVectors(const GrayScaleImage images[],
unsigned int count) {
Matrix matrix = {0, 0, NULL}; // falsch herum
if(count > 0 && images != NULL)
{
if (count > 0 && images != NULL) {
matrix = createMatrix(images[0].height * images[0].width, count);
if(matrix.buffer != NULL)
{
for(int i = 0; i < count; i++)
{
for(int j = 0; j < images[i].width * images[i].height; j++)
{
if (matrix.buffer != NULL) {
for (int i = 0; i < count; i++) {
for (int j = 0; j < images[i].width * images[i].height; j++) {
setMatrixAt((MatrixType)images[i].buffer[j], matrix, j, i);
}
}
@ -191,14 +168,11 @@ static Matrix imageBatchToMatrixOfImageVectors(const GrayScaleImage images[], un
return matrix;
}
static Matrix forward(const NeuralNetwork model, Matrix inputBatch)
{
static Matrix forward(const NeuralNetwork model, Matrix inputBatch) {
Matrix result = inputBatch;
if(result.buffer != NULL)
{
for(int i = 0; i < model.numberOfLayers; i++)
{
if (result.buffer != NULL) {
for (int i = 0; i < model.numberOfLayers; i++) {
Matrix biasResult;
Matrix weightResult;
@ -207,7 +181,7 @@ static Matrix forward(const NeuralNetwork model, Matrix inputBatch)
biasResult = add(model.layers[i].biases, weightResult);
clearMatrix(&weightResult);
if(model.layers[i].activation != NULL)
if (model.layers[i].activation != NULL)
model.layers[i].activation(&biasResult);
result = biasResult;
}
@ -216,23 +190,19 @@ static Matrix forward(const NeuralNetwork model, Matrix inputBatch)
return result;
}
unsigned char *argmax(const Matrix matrix)
{
unsigned char *argmax(const Matrix matrix) {
unsigned char *maxIdx = NULL;
if(matrix.rows > 0 && matrix.cols > 0)
{
if (matrix.rows > 0 && matrix.cols > 0) {
maxIdx = (unsigned char *)malloc(sizeof(unsigned char) * matrix.cols);
if(maxIdx != NULL)
{
for(int colIdx = 0; colIdx < matrix.cols; colIdx++)
{
if (maxIdx != NULL) {
for (int colIdx = 0; colIdx < matrix.cols; colIdx++) {
maxIdx[colIdx] = 0;
for(int rowIdx = 1; rowIdx < matrix.rows; rowIdx++)
{
if(getMatrixAt(matrix, rowIdx, colIdx) > getMatrixAt(matrix, maxIdx[colIdx], colIdx))
for (int rowIdx = 1; rowIdx < matrix.rows; rowIdx++) {
if (getMatrixAt(matrix, rowIdx, colIdx) >
getMatrixAt(matrix, maxIdx[colIdx], colIdx))
maxIdx[colIdx] = rowIdx;
}
}
@ -242,8 +212,8 @@ unsigned char *argmax(const Matrix matrix)
return maxIdx;
}
unsigned char *predict(const NeuralNetwork model, const GrayScaleImage images[], unsigned int numberOfImages)
{
unsigned char *predict(const NeuralNetwork model, const GrayScaleImage images[],
unsigned int numberOfImages) {
Matrix inputBatch = imageBatchToMatrixOfImageVectors(images, numberOfImages);
Matrix outputBatch = forward(model, inputBatch);
@ -254,12 +224,9 @@ unsigned char *predict(const NeuralNetwork model, const GrayScaleImage images[],
return result;
}
void clearModel(NeuralNetwork *model)
{
if(model != NULL)
{
for(int i = 0; i < model->numberOfLayers; i++)
{
void clearModel(NeuralNetwork *model) {
if (model != NULL) {
for (int i = 0; i < model->numberOfLayers; i++) {
clearLayer(&model->layers[i]);
}
model->layers = NULL;

View File

@ -1,30 +1,89 @@
#include "neuralNetwork.h"
#include "unity.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include "unity.h"
#include "neuralNetwork.h"
static void prepareNeuralNetworkFile(const char *path, const NeuralNetwork nn) {
FILE *f = fopen(path, "wb");
if (f == NULL)
return;
static void prepareNeuralNetworkFile(const char *path, const NeuralNetwork nn)
{
// TODO
/* 1) Header: exakt das String, ohne '\n' oder abschließendes '\0' */
const char header[] = "__info2_neural_network_file_format__";
fwrite(header, sizeof(char), strlen(header), f);
/* Wenn es keine Layer gibt, kein Dimensionspaar schreiben (loadModel
wird beim Lesen dann 0 zurückgeben). Aber wir können auch frühzeitig
mit einem 0-Int terminieren beides ist in Ordnung. */
if (nn.numberOfLayers == 0) {
/* optional: schreibe ein 0 als next outputDimension (nicht nötig) */
int zero = 0;
fwrite(&zero, sizeof(int), 1, f);
fclose(f);
return;
}
/* 2) Für die erste Layer schreiben wir inputDimension und outputDimension */
/* inputDimension == weights.cols, outputDimension == weights.rows */
int inputDim = (int)nn.layers[0].weights.cols;
int outputDim = (int)nn.layers[0].weights.rows;
fwrite(&inputDim, sizeof(int), 1, f);
fwrite(&outputDim, sizeof(int), 1, f);
/* 3) Für jede Layer in Reihenfolge: Gewichte (output x input), Biases (output
x 1). Zwischen Layern wird nur die nächste outputDimension (int)
geschrieben. */
for (int i = 0; i < nn.numberOfLayers; i++) {
Layer layer = nn.layers[i];
int wrows = (int)layer.weights.rows;
int wcols = (int)layer.weights.cols;
int wcount = wrows * wcols;
int bcount =
layer.biases.rows * layer.biases.cols; /* normalerweise rows * 1 */
/* Gewichte (MatrixType binär) */
if (wcount > 0 && layer.weights.buffer != NULL) {
fwrite(layer.weights.buffer, sizeof(MatrixType), (size_t)wcount, f);
}
/* Biases (MatrixType binär) */
if (bcount > 0 && layer.biases.buffer != NULL) {
fwrite(layer.biases.buffer, sizeof(MatrixType), (size_t)bcount, f);
}
/* Für die nächste Layer: falls vorhanden, schreibe deren outputDimension */
if (i + 1 < nn.numberOfLayers) {
int nextOutput = (int)nn.layers[i + 1].weights.rows;
fwrite(&nextOutput, sizeof(int), 1, f);
} else {
/* Letzte Layer: wir können das Ende signalisieren, indem wir ein 0
schreiben. loadModel liest dann outputDimension = 0 und beendet die
Schleife. */
int zero = 0;
fwrite(&zero, sizeof(int), 1, f);
}
}
fclose(f);
}
void test_loadModelReturnsCorrectNumberOfLayers(void)
{
void test_loadModelReturnsCorrectNumberOfLayers(void) {
const char *path = "some__nn_test_file.info2";
MatrixType buffer1[] = {1, 2, 3, 4, 5, 6};
MatrixType buffer2[] = {1, 2, 3, 4, 5, 6};
Matrix weights1 = {.buffer=buffer1, .rows=3, .cols=2};
Matrix weights2 = {.buffer=buffer2, .rows=2, .cols=3};
Matrix weights1 = {.buffer = buffer1, .rows = 3, .cols = 2};
Matrix weights2 = {.buffer = buffer2, .rows = 2, .cols = 3};
MatrixType buffer3[] = {1, 2, 3};
MatrixType buffer4[] = {1, 2};
Matrix biases1 = {.buffer=buffer3, .rows=3, .cols=1};
Matrix biases2 = {.buffer=buffer4, .rows=2, .cols=1};
Layer layers[] = {{.weights=weights1, .biases=biases1}, {.weights=weights2, .biases=biases2}};
Matrix biases1 = {.buffer = buffer3, .rows = 3, .cols = 1};
Matrix biases2 = {.buffer = buffer4, .rows = 2, .cols = 1};
Layer layers[] = {{.weights = weights1, .biases = biases1},
{.weights = weights2, .biases = biases2}};
NeuralNetwork expectedNet = {.layers=layers, .numberOfLayers=2};
NeuralNetwork expectedNet = {.layers = layers, .numberOfLayers = 2};
NeuralNetwork netUnderTest;
prepareNeuralNetworkFile(path, expectedNet);
@ -32,20 +91,20 @@ void test_loadModelReturnsCorrectNumberOfLayers(void)
netUnderTest = loadModel(path);
remove(path);
TEST_ASSERT_EQUAL_INT(expectedNet.numberOfLayers, netUnderTest.numberOfLayers);
TEST_ASSERT_EQUAL_INT(expectedNet.numberOfLayers,
netUnderTest.numberOfLayers);
clearModel(&netUnderTest);
}
void test_loadModelReturnsCorrectWeightDimensions(void)
{
void test_loadModelReturnsCorrectWeightDimensions(void) {
const char *path = "some__nn_test_file.info2";
MatrixType weightBuffer[] = {1, 2, 3, 4, 5, 6};
Matrix weights = {.buffer=weightBuffer, .rows=3, .cols=2};
Matrix weights = {.buffer = weightBuffer, .rows = 3, .cols = 2};
MatrixType biasBuffer[] = {7, 8, 9};
Matrix biases = {.buffer=biasBuffer, .rows=3, .cols=1};
Layer layers[] = {{.weights=weights, .biases=biases}};
Matrix biases = {.buffer = biasBuffer, .rows = 3, .cols = 1};
Layer layers[] = {{.weights = weights, .biases = biases}};
NeuralNetwork expectedNet = {.layers=layers, .numberOfLayers=1};
NeuralNetwork expectedNet = {.layers = layers, .numberOfLayers = 1};
NeuralNetwork netUnderTest;
prepareNeuralNetworkFile(path, expectedNet);
@ -54,21 +113,22 @@ void test_loadModelReturnsCorrectWeightDimensions(void)
remove(path);
TEST_ASSERT_TRUE(netUnderTest.numberOfLayers > 0);
TEST_ASSERT_EQUAL_INT(expectedNet.layers[0].weights.rows, netUnderTest.layers[0].weights.rows);
TEST_ASSERT_EQUAL_INT(expectedNet.layers[0].weights.cols, netUnderTest.layers[0].weights.cols);
TEST_ASSERT_EQUAL_INT(expectedNet.layers[0].weights.rows,
netUnderTest.layers[0].weights.rows);
TEST_ASSERT_EQUAL_INT(expectedNet.layers[0].weights.cols,
netUnderTest.layers[0].weights.cols);
clearModel(&netUnderTest);
}
void test_loadModelReturnsCorrectBiasDimensions(void)
{
void test_loadModelReturnsCorrectBiasDimensions(void) {
const char *path = "some__nn_test_file.info2";
MatrixType weightBuffer[] = {1, 2, 3, 4, 5, 6};
Matrix weights = {.buffer=weightBuffer, .rows=3, .cols=2};
Matrix weights = {.buffer = weightBuffer, .rows = 3, .cols = 2};
MatrixType biasBuffer[] = {7, 8, 9};
Matrix biases = {.buffer=biasBuffer, .rows=3, .cols=1};
Layer layers[] = {{.weights=weights, .biases=biases}};
Matrix biases = {.buffer = biasBuffer, .rows = 3, .cols = 1};
Layer layers[] = {{.weights = weights, .biases = biases}};
NeuralNetwork expectedNet = {.layers=layers, .numberOfLayers=1};
NeuralNetwork expectedNet = {.layers = layers, .numberOfLayers = 1};
NeuralNetwork netUnderTest;
prepareNeuralNetworkFile(path, expectedNet);
@ -77,21 +137,22 @@ void test_loadModelReturnsCorrectBiasDimensions(void)
remove(path);
TEST_ASSERT_TRUE(netUnderTest.numberOfLayers > 0);
TEST_ASSERT_EQUAL_INT(expectedNet.layers[0].biases.rows, netUnderTest.layers[0].biases.rows);
TEST_ASSERT_EQUAL_INT(expectedNet.layers[0].biases.cols, netUnderTest.layers[0].biases.cols);
TEST_ASSERT_EQUAL_INT(expectedNet.layers[0].biases.rows,
netUnderTest.layers[0].biases.rows);
TEST_ASSERT_EQUAL_INT(expectedNet.layers[0].biases.cols,
netUnderTest.layers[0].biases.cols);
clearModel(&netUnderTest);
}
void test_loadModelReturnsCorrectWeights(void)
{
void test_loadModelReturnsCorrectWeights(void) {
const char *path = "some__nn_test_file.info2";
MatrixType weightBuffer[] = {1, 2, 3, 4, 5, 6};
Matrix weights = {.buffer=weightBuffer, .rows=3, .cols=2};
Matrix weights = {.buffer = weightBuffer, .rows = 3, .cols = 2};
MatrixType biasBuffer[] = {7, 8, 9};
Matrix biases = {.buffer=biasBuffer, .rows=3, .cols=1};
Layer layers[] = {{.weights=weights, .biases=biases}};
Matrix biases = {.buffer = biasBuffer, .rows = 3, .cols = 1};
Layer layers[] = {{.weights = weights, .biases = biases}};
NeuralNetwork expectedNet = {.layers=layers, .numberOfLayers=1};
NeuralNetwork expectedNet = {.layers = layers, .numberOfLayers = 1};
NeuralNetwork netUnderTest;
prepareNeuralNetworkFile(path, expectedNet);
@ -100,23 +161,26 @@ void test_loadModelReturnsCorrectWeights(void)
remove(path);
TEST_ASSERT_TRUE(netUnderTest.numberOfLayers > 0);
TEST_ASSERT_EQUAL_INT(expectedNet.layers[0].weights.rows, netUnderTest.layers[0].weights.rows);
TEST_ASSERT_EQUAL_INT(expectedNet.layers[0].weights.cols, netUnderTest.layers[0].weights.cols);
int n = netUnderTest.layers[0].weights.rows * netUnderTest.layers[0].weights.cols;
TEST_ASSERT_EQUAL_INT_ARRAY(expectedNet.layers[0].weights.buffer, netUnderTest.layers[0].weights.buffer, n);
TEST_ASSERT_EQUAL_INT(expectedNet.layers[0].weights.rows,
netUnderTest.layers[0].weights.rows);
TEST_ASSERT_EQUAL_INT(expectedNet.layers[0].weights.cols,
netUnderTest.layers[0].weights.cols);
int n =
netUnderTest.layers[0].weights.rows * netUnderTest.layers[0].weights.cols;
TEST_ASSERT_EQUAL_INT_ARRAY(expectedNet.layers[0].weights.buffer,
netUnderTest.layers[0].weights.buffer, n);
clearModel(&netUnderTest);
}
void test_loadModelReturnsCorrectBiases(void)
{
void test_loadModelReturnsCorrectBiases(void) {
const char *path = "some__nn_test_file.info2";
MatrixType weightBuffer[] = {1, 2, 3, 4, 5, 6};
Matrix weights = {.buffer=weightBuffer, .rows=3, .cols=2};
Matrix weights = {.buffer = weightBuffer, .rows = 3, .cols = 2};
MatrixType biasBuffer[] = {7, 8, 9};
Matrix biases = {.buffer=biasBuffer, .rows=3, .cols=1};
Layer layers[] = {{.weights=weights, .biases=biases}};
Matrix biases = {.buffer = biasBuffer, .rows = 3, .cols = 1};
Layer layers[] = {{.weights = weights, .biases = biases}};
NeuralNetwork expectedNet = {.layers=layers, .numberOfLayers=1};
NeuralNetwork expectedNet = {.layers = layers, .numberOfLayers = 1};
NeuralNetwork netUnderTest;
prepareNeuralNetworkFile(path, expectedNet);
@ -125,21 +189,23 @@ void test_loadModelReturnsCorrectBiases(void)
remove(path);
TEST_ASSERT_TRUE(netUnderTest.numberOfLayers > 0);
TEST_ASSERT_EQUAL_INT(expectedNet.layers[0].weights.rows, netUnderTest.layers[0].weights.rows);
TEST_ASSERT_EQUAL_INT(expectedNet.layers[0].weights.cols, netUnderTest.layers[0].weights.cols);
int n = netUnderTest.layers[0].biases.rows * netUnderTest.layers[0].biases.cols;
TEST_ASSERT_EQUAL_INT_ARRAY(expectedNet.layers[0].biases.buffer, netUnderTest.layers[0].biases.buffer, n);
TEST_ASSERT_EQUAL_INT(expectedNet.layers[0].weights.rows,
netUnderTest.layers[0].weights.rows);
TEST_ASSERT_EQUAL_INT(expectedNet.layers[0].weights.cols,
netUnderTest.layers[0].weights.cols);
int n =
netUnderTest.layers[0].biases.rows * netUnderTest.layers[0].biases.cols;
TEST_ASSERT_EQUAL_INT_ARRAY(expectedNet.layers[0].biases.buffer,
netUnderTest.layers[0].biases.buffer, n);
clearModel(&netUnderTest);
}
void test_loadModelFailsOnWrongFileTag(void)
{
void test_loadModelFailsOnWrongFileTag(void) {
const char *path = "some_nn_test_file.info2";
NeuralNetwork netUnderTest;
FILE *file = fopen(path, "wb");
if(file != NULL)
{
if (file != NULL) {
const char *fileTag = "info2_neural_network_file_format";
fwrite(fileTag, sizeof(char), strlen(fileTag), file);
@ -155,16 +221,15 @@ void test_loadModelFailsOnWrongFileTag(void)
TEST_ASSERT_EQUAL_INT(0, netUnderTest.numberOfLayers);
}
void test_clearModelSetsMembersToNull(void)
{
void test_clearModelSetsMembersToNull(void) {
const char *path = "some__nn_test_file.info2";
MatrixType weightBuffer[] = {1, 2, 3, 4, 5, 6};
Matrix weights = {.buffer=weightBuffer, .rows=3, .cols=2};
Matrix weights = {.buffer = weightBuffer, .rows = 3, .cols = 2};
MatrixType biasBuffer[] = {7, 8, 9};
Matrix biases = {.buffer=biasBuffer, .rows=3, .cols=1};
Layer layers[] = {{.weights=weights, .biases=biases}};
Matrix biases = {.buffer = biasBuffer, .rows = 3, .cols = 1};
Layer layers[] = {{.weights = weights, .biases = biases}};
NeuralNetwork expectedNet = {.layers=layers, .numberOfLayers=1};
NeuralNetwork expectedNet = {.layers = layers, .numberOfLayers = 1};
NeuralNetwork netUnderTest;
prepareNeuralNetworkFile(path, expectedNet);
@ -179,36 +244,37 @@ void test_clearModelSetsMembersToNull(void)
TEST_ASSERT_EQUAL_INT(0, netUnderTest.numberOfLayers);
}
static void someActivation(Matrix *matrix)
{
for(int i = 0; i < matrix->rows * matrix->cols; i++)
{
static void someActivation(Matrix *matrix) {
for (int i = 0; i < matrix->rows * matrix->cols; i++) {
matrix->buffer[i] = fabs(matrix->buffer[i]);
}
}
void test_predictReturnsCorrectLabels(void)
{
void test_predictReturnsCorrectLabels(void) {
const unsigned char expectedLabels[] = {4, 2};
GrayScalePixelType imageBuffer1[] = {10, 30, 25, 17};
GrayScalePixelType imageBuffer2[] = {20, 40, 10, 128};
GrayScaleImage inputImages[] = {{.buffer=imageBuffer1, .width=2, .height=2}, {.buffer=imageBuffer2, .width=2, .height=2}};
GrayScaleImage inputImages[] = {
{.buffer = imageBuffer1, .width = 2, .height = 2},
{.buffer = imageBuffer2, .width = 2, .height = 2}};
MatrixType weightsBuffer1[] = {1, -2, 3, -4, 5, -6, 7, -8};
MatrixType weightsBuffer2[] = {-9, 10, 11, 12, 13, 14};
MatrixType weightsBuffer3[] = {-15, 16, 17, 18, -19, 20, 21, 22, 23, -24, 25, 26, 27, -28, -29};
Matrix weights1 = {.buffer=weightsBuffer1, .rows=2, .cols=4};
Matrix weights2 = {.buffer=weightsBuffer2, .rows=3, .cols=2};
Matrix weights3 = {.buffer=weightsBuffer3, .rows=5, .cols=3};
MatrixType weightsBuffer3[] = {-15, 16, 17, 18, -19, 20, 21, 22,
23, -24, 25, 26, 27, -28, -29};
Matrix weights1 = {.buffer = weightsBuffer1, .rows = 2, .cols = 4};
Matrix weights2 = {.buffer = weightsBuffer2, .rows = 3, .cols = 2};
Matrix weights3 = {.buffer = weightsBuffer3, .rows = 5, .cols = 3};
MatrixType biasBuffer1[] = {200, 0};
MatrixType biasBuffer2[] = {0, -100, 0};
MatrixType biasBuffer3[] = {0, -1000, 0, 2000, 0};
Matrix biases1 = {.buffer=biasBuffer1, .rows=2, .cols=1};
Matrix biases2 = {.buffer=biasBuffer2, .rows=3, .cols=1};
Matrix biases3 = {.buffer=biasBuffer3, .rows=5, .cols=1};
Layer layers[] = {{.weights=weights1, .biases=biases1, .activation=someActivation}, \
{.weights=weights2, .biases=biases2, .activation=someActivation}, \
{.weights=weights3, .biases=biases3, .activation=someActivation}};
NeuralNetwork netUnderTest = {.layers=layers, .numberOfLayers=3};
Matrix biases1 = {.buffer = biasBuffer1, .rows = 2, .cols = 1};
Matrix biases2 = {.buffer = biasBuffer2, .rows = 3, .cols = 1};
Matrix biases3 = {.buffer = biasBuffer3, .rows = 5, .cols = 1};
Layer layers[] = {
{.weights = weights1, .biases = biases1, .activation = someActivation},
{.weights = weights2, .biases = biases2, .activation = someActivation},
{.weights = weights3, .biases = biases3, .activation = someActivation}};
NeuralNetwork netUnderTest = {.layers = layers, .numberOfLayers = 3};
unsigned char *predictedLabels = predict(netUnderTest, inputImages, 2);
TEST_ASSERT_NOT_NULL(predictedLabels);
int n = (int)(sizeof(expectedLabels) / sizeof(expectedLabels[0]));
@ -224,11 +290,11 @@ void tearDown(void) {
// Hier kann Bereinigungsarbeit nach jedem Test durchgeführt werden
}
int main()
{
int main() {
UNITY_BEGIN();
printf("\n============================\nNeural network tests\n============================\n");
printf("\n============================\nNeural network "
"tests\n============================\n");
RUN_TEST(test_loadModelReturnsCorrectNumberOfLayers);
RUN_TEST(test_loadModelReturnsCorrectWeightDimensions);
RUN_TEST(test_loadModelReturnsCorrectBiasDimensions);