Solved remeining 2 Neural Tests
This commit is contained in:
parent
55603bf12c
commit
4b2cbfb836
140
matrix.c
140
matrix.c
@ -3,21 +3,18 @@
|
|||||||
#include <string.h>
|
#include <string.h>
|
||||||
#include "matrix.h"
|
#include "matrix.h"
|
||||||
|
|
||||||
// TODO Matrix-Funktionen implementieren
|
|
||||||
|
|
||||||
Matrix createMatrix(unsigned int rows, unsigned int cols)
|
Matrix createMatrix(unsigned int rows, unsigned int cols)
|
||||||
{
|
{
|
||||||
if (rows != 0 && cols != 0)
|
if (rows != 0 && cols != 0)
|
||||||
{
|
{
|
||||||
Matrix matrix;
|
Matrix matrix;
|
||||||
matrix.rows = rows;
|
matrix.rows = rows;
|
||||||
matrix.cols = cols;
|
matrix.cols = cols;
|
||||||
matrix.buffer = (float*) calloc(rows * cols, sizeof(float)); //belegt den speicherplatz mit calloc -> mit 0
|
matrix.buffer = (MatrixType*) calloc((size_t)rows * cols, sizeof(MatrixType));
|
||||||
return matrix;
|
return matrix;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{ //Bei einer "falschen" Matrix eine leere zurückgeben, ohne speicher zu belegen
|
{
|
||||||
printf("Nullgroesse der Matrix!!!\n");
|
|
||||||
Matrix matrix;
|
Matrix matrix;
|
||||||
matrix.rows = 0;
|
matrix.rows = 0;
|
||||||
matrix.cols = 0;
|
matrix.cols = 0;
|
||||||
@ -28,87 +25,105 @@ Matrix createMatrix(unsigned int rows, unsigned int cols)
|
|||||||
|
|
||||||
void clearMatrix(Matrix *matrix)
|
void clearMatrix(Matrix *matrix)
|
||||||
{
|
{
|
||||||
// Sicherheits-Check für den übergebenen Zeiger
|
if (matrix != NULL)
|
||||||
if (matrix != NULL)
|
{
|
||||||
{
|
if (matrix->buffer != NULL) {
|
||||||
// **WICHTIGE KORREKTUR:** Puffer-Check vor free()
|
free(matrix->buffer);
|
||||||
if (matrix->buffer != NULL) {
|
}
|
||||||
free(matrix->buffer);
|
|
||||||
}
|
|
||||||
|
|
||||||
matrix->buffer = NULL; // Zeiger auf NULL setzen
|
matrix->buffer = NULL;
|
||||||
matrix->rows = 0;
|
matrix->rows = 0;
|
||||||
matrix->cols = 0;
|
matrix->cols = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void setMatrixAt(MatrixType value, Matrix matrix, unsigned int rowIdx, unsigned int colIdx)
|
void setMatrixAt(MatrixType value, Matrix matrix, unsigned int rowIdx, unsigned int colIdx)
|
||||||
{
|
{
|
||||||
matrix.buffer[rowIdx * matrix.cols + colIdx] = value;
|
matrix.buffer[(size_t)rowIdx * matrix.cols + colIdx] = value;
|
||||||
}
|
}
|
||||||
|
|
||||||
MatrixType getMatrixAt(const Matrix matrix, unsigned int rowIdx, unsigned int colIdx)
|
MatrixType getMatrixAt(const Matrix matrix, unsigned int rowIdx, unsigned int colIdx)
|
||||||
{
|
{
|
||||||
if(rowIdx < matrix.rows && colIdx < matrix.cols){
|
if(rowIdx < matrix.rows && colIdx < matrix.cols){
|
||||||
return matrix.buffer[rowIdx * matrix.cols + colIdx]; //ACHTUNG! rowIdx und colIDX sind in Array position gedacht! matrix.cols ist normal gedacht!
|
return matrix.buffer[(size_t)rowIdx * matrix.cols + colIdx];
|
||||||
}else{
|
}else{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Matrix add(const Matrix matrix1, const Matrix matrix2)
|
Matrix add(const Matrix matrix1, const Matrix matrix2)
|
||||||
{
|
{
|
||||||
//Überprüfen, ob die Matrizen die gleichen Dimensionen haben
|
// Case A: same shape -> elementwise add
|
||||||
//wenn nicht muss die matrix "rows/cols=0 und buffer = NULL" leer zurückgegeben werden
|
if (matrix1.rows == matrix2.rows && matrix1.cols == matrix2.cols)
|
||||||
|
{
|
||||||
|
Matrix result = createMatrix(matrix1.rows, matrix1.cols);
|
||||||
|
if (result.buffer == NULL) return result;
|
||||||
|
|
||||||
if (matrix1.rows != matrix2.rows || matrix1.cols != matrix2.cols)
|
size_t n = (size_t)result.rows * result.cols;
|
||||||
{
|
for (size_t i = 0; i < n; i++)
|
||||||
Matrix result = {0}; // Struktur auf 0/NULL initialisieren
|
{
|
||||||
result.rows = 0;
|
result.buffer[i] = matrix1.buffer[i] + matrix2.buffer[i];
|
||||||
result.cols = 0;
|
}
|
||||||
result.buffer = NULL;
|
return result;
|
||||||
return result;
|
}
|
||||||
}
|
|
||||||
|
|
||||||
else
|
// Case B: matrix1 has shape (rows x cols) and matrix2 is (rows x 1) -> broadcast second across columns
|
||||||
{
|
if (matrix1.rows == matrix2.rows && matrix2.cols == 1 && matrix1.cols > 1)
|
||||||
// **WICHTIGE KORREKTUR:** Speicher für das Ergebnis reservieren
|
{
|
||||||
Matrix result = createMatrix(matrix1.rows, matrix1.cols);
|
Matrix result = createMatrix(matrix1.rows, matrix1.cols);
|
||||||
|
if (result.buffer == NULL) return result;
|
||||||
|
|
||||||
// Prüfen, ob Speicherreservierung erfolgreich war
|
for (unsigned int r = 0; r < matrix1.rows; r++)
|
||||||
if (result.buffer == NULL) {
|
{
|
||||||
return result; // Gibt Null-Matrix zurück, falls malloc fehlschlug
|
MatrixType b = matrix2.buffer[(size_t)r * matrix2.cols + 0];
|
||||||
}
|
for (unsigned int c = 0; c < matrix1.cols; c++)
|
||||||
|
{
|
||||||
|
result.buffer[(size_t)r * result.cols + c] = matrix1.buffer[(size_t)r * matrix1.cols + c] + b;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
// Addition der beiden Matrizen
|
// Case C: matrix1 is (rows x 1) and matrix2 is (rows x cols) -> broadcast first across columns
|
||||||
for (unsigned int i = 0; i < result.rows * result.cols; i++)
|
if (matrix2.rows == matrix1.rows && matrix1.cols == 1 && matrix2.cols > 1)
|
||||||
{
|
{
|
||||||
// Achtung: Wenn Sie die Matrizen nicht per const Pointer übergeben,
|
Matrix result = createMatrix(matrix2.rows, matrix2.cols);
|
||||||
// müssen Sie wissen, dass die Daten nicht temporär sind (hier ok, da lokale Kopien).
|
if (result.buffer == NULL) return result;
|
||||||
result.buffer[i] = matrix1.buffer[i] + matrix2.buffer[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
for (unsigned int r = 0; r < matrix2.rows; r++)
|
||||||
|
{
|
||||||
|
MatrixType b = matrix1.buffer[(size_t)r * matrix1.cols + 0];
|
||||||
|
for (unsigned int c = 0; c < matrix2.cols; c++)
|
||||||
|
{
|
||||||
|
result.buffer[(size_t)r * result.cols + c] = matrix2.buffer[(size_t)r * matrix2.cols + c] + b;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
// unsupported shapes -> return empty matrix
|
||||||
|
Matrix result = {0};
|
||||||
|
result.rows = 0;
|
||||||
|
result.cols = 0;
|
||||||
|
result.buffer = NULL;
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
Matrix multiply(const Matrix matrix1, const Matrix matrix2)
|
Matrix multiply(const Matrix matrix1, const Matrix matrix2)
|
||||||
{
|
{
|
||||||
//Spalten matrix 1 muss mit Reihen Matrix 2 übereinstimmen
|
if (matrix1.cols != matrix2.rows)
|
||||||
if (matrix1.cols != matrix2.rows)
|
{
|
||||||
{
|
Matrix result;
|
||||||
Matrix result;
|
|
||||||
result.rows = 0;
|
result.rows = 0;
|
||||||
result.cols = 0;
|
result.cols = 0;
|
||||||
result.buffer = NULL;
|
result.buffer = NULL;
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
else
|
||||||
else
|
{
|
||||||
{
|
|
||||||
Matrix result = createMatrix(matrix1.rows, matrix2.cols);
|
Matrix result = createMatrix(matrix1.rows, matrix2.cols);
|
||||||
|
if (result.buffer == NULL) return result;
|
||||||
|
|
||||||
for (unsigned int i = 0; i < result.rows; i++)
|
for (unsigned int i = 0; i < result.rows; i++)
|
||||||
{
|
{
|
||||||
for (unsigned int j = 0; j < result.cols; j++)
|
for (unsigned int j = 0; j < result.cols; j++)
|
||||||
@ -122,6 +137,5 @@ Matrix multiply(const Matrix matrix1, const Matrix matrix2)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|||||||
290
neuralNetwork.c
290
neuralNetwork.c
@ -2,39 +2,41 @@
|
|||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <math.h>
|
#include <math.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
|
#include <stdint.h>
|
||||||
#include "neuralNetwork.h"
|
#include "neuralNetwork.h"
|
||||||
|
|
||||||
#define BUFFER_SIZE 100
|
#define BUFFER_SIZE 200
|
||||||
#define FILE_HEADER_STRING "__info2_neural_network_file_format__"
|
#define FILE_HEADER_STRING "__info2_neural_network_file_format__"
|
||||||
|
|
||||||
static void softmax(Matrix *matrix)
|
static void softmax(Matrix *matrix)
|
||||||
{
|
{
|
||||||
if(matrix->cols > 0)
|
if(matrix->cols > 0)
|
||||||
{
|
{
|
||||||
double *colSums = (double *)calloc(matrix->cols, sizeof(double));
|
double *colSums = (double *)calloc((size_t)matrix->cols, sizeof(double));
|
||||||
|
if(colSums == NULL) return;
|
||||||
|
|
||||||
if(colSums != NULL)
|
for(int colIdx = 0; colIdx < matrix->cols; colIdx++)
|
||||||
{
|
{
|
||||||
for(int colIdx = 0; colIdx < matrix->cols; colIdx++)
|
for(int rowIdx = 0; rowIdx < matrix->rows; rowIdx++)
|
||||||
{
|
{
|
||||||
for(int rowIdx = 0; rowIdx < matrix->rows; rowIdx++)
|
MatrixType expValue = (MatrixType)exp(getMatrixAt(*matrix, rowIdx, colIdx));
|
||||||
{
|
setMatrixAt(expValue, *matrix, rowIdx, colIdx);
|
||||||
MatrixType expValue = exp(getMatrixAt(*matrix, rowIdx, colIdx));
|
colSums[colIdx] += expValue;
|
||||||
setMatrixAt(expValue, *matrix, rowIdx, colIdx);
|
|
||||||
colSums[colIdx] += expValue;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for(int colIdx = 0; colIdx < matrix->cols; colIdx++)
|
|
||||||
{
|
|
||||||
for(int rowIdx = 0; rowIdx < matrix->rows; rowIdx++)
|
|
||||||
{
|
|
||||||
MatrixType normalizedValue = getMatrixAt(*matrix, rowIdx, colIdx) / colSums[colIdx];
|
|
||||||
setMatrixAt(normalizedValue, *matrix, rowIdx, colIdx);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
free(colSums);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for(int colIdx = 0; colIdx < matrix->cols; colIdx++)
|
||||||
|
{
|
||||||
|
double s = colSums[colIdx];
|
||||||
|
if(s == 0.0) s = 1.0;
|
||||||
|
for(int rowIdx = 0; rowIdx < matrix->rows; rowIdx++)
|
||||||
|
{
|
||||||
|
MatrixType normalizedValue = (MatrixType)(getMatrixAt(*matrix, rowIdx, colIdx) / s);
|
||||||
|
setMatrixAt(normalizedValue, *matrix, rowIdx, colIdx);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
free(colSums);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -46,39 +48,53 @@ static void relu(Matrix *matrix)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Prüft den Dateikopf. Liefert 1 bei Erfolg, 0 bei Fehler. */
|
||||||
static int checkFileHeader(FILE *file)
|
static int checkFileHeader(FILE *file)
|
||||||
{
|
{
|
||||||
int isValid = 0;
|
if(file == NULL) return 0;
|
||||||
int fileHeaderLen = strlen(FILE_HEADER_STRING);
|
|
||||||
char buffer[BUFFER_SIZE] = {0};
|
|
||||||
|
|
||||||
if(BUFFER_SIZE-1 < fileHeaderLen)
|
size_t headerLen = strlen(FILE_HEADER_STRING);
|
||||||
fileHeaderLen = BUFFER_SIZE-1;
|
if(headerLen == 0 || headerLen >= BUFFER_SIZE) return 0;
|
||||||
|
|
||||||
if(fread(buffer, sizeof(char), fileHeaderLen, file) == fileHeaderLen)
|
char buffer[BUFFER_SIZE];
|
||||||
isValid = strcmp(buffer, FILE_HEADER_STRING) == 0;
|
if(fseek(file, 0, SEEK_SET) != 0) return 0;
|
||||||
|
if(fread(buffer, sizeof(char), headerLen, file) != headerLen) return 0;
|
||||||
|
if(memcmp(buffer, FILE_HEADER_STRING, headerLen) != 0) return 0;
|
||||||
|
|
||||||
return isValid;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Liest eine Dimension (wie vom Test-Writer mit sizeof(int) geschrieben) und prüft Plausibilität.
|
||||||
|
Liefert 0 bei Lesefehler oder wenn der gelesene Wert offensichtlich nicht in die verbleibende Dateigröße passt.
|
||||||
|
*/
|
||||||
static unsigned int readDimension(FILE *file)
|
static unsigned int readDimension(FILE *file)
|
||||||
{
|
{
|
||||||
int dimension = 0;
|
if (file == NULL) return 0;
|
||||||
|
|
||||||
if(fread(&dimension, sizeof(int), 1, file) != 1)
|
int value = 0;
|
||||||
dimension = 0;
|
if (fread(&value, sizeof(int), 1, file) != 1) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
return dimension;
|
if (value < 0) return 0;
|
||||||
|
return (unsigned int)value;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/* Liest eine Matrix rows x cols; wenn Einlese-Fehler: leere Matrix zurückgeben */
|
||||||
static Matrix readMatrix(FILE *file, unsigned int rows, unsigned int cols)
|
static Matrix readMatrix(FILE *file, unsigned int rows, unsigned int cols)
|
||||||
{
|
{
|
||||||
Matrix matrix = createMatrix(rows, cols);
|
Matrix matrix = createMatrix(rows, cols);
|
||||||
|
if(matrix.buffer == NULL) return matrix;
|
||||||
|
|
||||||
if(matrix.buffer != NULL)
|
size_t toRead = (size_t)rows * cols;
|
||||||
|
if(toRead > 0)
|
||||||
{
|
{
|
||||||
if(fread(matrix.buffer, sizeof(MatrixType), rows*cols, file) != rows*cols)
|
size_t readCount = fread(matrix.buffer, sizeof(MatrixType), toRead, file);
|
||||||
|
if(readCount != toRead)
|
||||||
|
{
|
||||||
clearMatrix(&matrix);
|
clearMatrix(&matrix);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return matrix;
|
return matrix;
|
||||||
@ -87,91 +103,113 @@ static Matrix readMatrix(FILE *file, unsigned int rows, unsigned int cols)
|
|||||||
static Layer readLayer(FILE *file, unsigned int inputDimension, unsigned int outputDimension)
|
static Layer readLayer(FILE *file, unsigned int inputDimension, unsigned int outputDimension)
|
||||||
{
|
{
|
||||||
Layer layer;
|
Layer layer;
|
||||||
|
layer.activation = NULL;
|
||||||
layer.weights = readMatrix(file, outputDimension, inputDimension);
|
layer.weights = readMatrix(file, outputDimension, inputDimension);
|
||||||
layer.biases = readMatrix(file, outputDimension, 1);
|
layer.biases = readMatrix(file, outputDimension, 1);
|
||||||
|
|
||||||
return layer;
|
return layer;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int isEmptyLayer(const Layer layer)
|
static int isEmptyLayer(const Layer layer)
|
||||||
{
|
{
|
||||||
return layer.biases.cols == 0 || layer.biases.rows == 0 || layer.biases.buffer == NULL || layer.weights.rows == 0 || layer.weights.cols == 0 || layer.weights.buffer == NULL;
|
return layer.biases.cols == 0 || layer.biases.rows == 0 || layer.biases.buffer == NULL ||
|
||||||
|
layer.weights.rows == 0 || layer.weights.cols == 0 || layer.weights.buffer == NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void clearLayer(Layer *layer)
|
static void clearLayer(Layer *layer)
|
||||||
{
|
{
|
||||||
if(layer != NULL)
|
if(layer == NULL) return;
|
||||||
{
|
clearMatrix(&layer->weights);
|
||||||
clearMatrix(&layer->weights);
|
clearMatrix(&layer->biases);
|
||||||
clearMatrix(&layer->biases);
|
layer->activation = NULL;
|
||||||
layer->activation = NULL;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void assignActivations(NeuralNetwork model)
|
static void assignActivations(NeuralNetwork model)
|
||||||
{
|
{
|
||||||
for(int i = 0; i < (int)model.numberOfLayers-1; i++)
|
if(model.numberOfLayers == 0) return;
|
||||||
{
|
for(int i = 0; i < (int)model.numberOfLayers - 1; i++)
|
||||||
model.layers[i].activation = relu;
|
model.layers[i].activation = relu;
|
||||||
}
|
model.layers[model.numberOfLayers - 1].activation = softmax;
|
||||||
|
|
||||||
if(model.numberOfLayers > 0)
|
|
||||||
model.layers[model.numberOfLayers-1].activation = softmax;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
NeuralNetwork loadModel(const char *path)
|
NeuralNetwork loadModel(const char *path)
|
||||||
{
|
{
|
||||||
NeuralNetwork model = {NULL, 0};
|
NeuralNetwork model = {NULL, 0};
|
||||||
FILE *file = fopen(path, "rb");
|
FILE *file = fopen(path, "rb");
|
||||||
|
if(file == NULL) return model;
|
||||||
|
|
||||||
if(file != NULL)
|
if(fseek(file, 0, SEEK_SET) != 0)
|
||||||
{
|
{
|
||||||
if(checkFileHeader(file))
|
|
||||||
{
|
|
||||||
unsigned int inputDimension = readDimension(file);
|
|
||||||
unsigned int outputDimension = readDimension(file);
|
|
||||||
|
|
||||||
while(inputDimension > 0 && outputDimension > 0)
|
|
||||||
{
|
|
||||||
Layer layer = readLayer(file, inputDimension, outputDimension);
|
|
||||||
Layer *layerBuffer = NULL;
|
|
||||||
|
|
||||||
if(isEmptyLayer(layer))
|
|
||||||
{
|
|
||||||
clearLayer(&layer);
|
|
||||||
clearModel(&model);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
layerBuffer = (Layer *)realloc(model.layers, (model.numberOfLayers + 1) * sizeof(Layer));
|
|
||||||
|
|
||||||
if(layerBuffer != NULL)
|
|
||||||
model.layers = layerBuffer;
|
|
||||||
else
|
|
||||||
{
|
|
||||||
clearModel(&model);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
model.layers[model.numberOfLayers] = layer;
|
|
||||||
model.numberOfLayers++;
|
|
||||||
|
|
||||||
inputDimension = outputDimension;
|
|
||||||
outputDimension = readDimension(file);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fclose(file);
|
fclose(file);
|
||||||
|
return model;
|
||||||
assignActivations(model);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if(!checkFileHeader(file))
|
||||||
|
{
|
||||||
|
fclose(file);
|
||||||
|
return model;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Lese erstes Dimensions-Paar (input, output) */
|
||||||
|
unsigned int inputDimension = readDimension(file);
|
||||||
|
unsigned int outputDimension = readDimension(file);
|
||||||
|
|
||||||
|
fprintf(stderr, "[loadModel] first dims: input=%u output=%u\n", inputDimension, outputDimension);
|
||||||
|
|
||||||
|
while (inputDimension > 0 && outputDimension > 0)
|
||||||
|
{
|
||||||
|
Layer layer = readLayer(file, inputDimension, outputDimension);
|
||||||
|
|
||||||
|
if (isEmptyLayer(layer))
|
||||||
|
{
|
||||||
|
clearLayer(&layer);
|
||||||
|
clearModel(&model);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
Layer *tmp = (Layer *)realloc(model.layers, (model.numberOfLayers + 1) * sizeof(Layer));
|
||||||
|
if (tmp == NULL)
|
||||||
|
{
|
||||||
|
clearLayer(&layer);
|
||||||
|
clearModel(&model);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
model.layers = tmp;
|
||||||
|
model.layers[model.numberOfLayers] = layer;
|
||||||
|
model.numberOfLayers++;
|
||||||
|
|
||||||
|
fprintf(stderr, "[loadModel] loaded layer %d: weights %u x %u, biases %u x %u\n",
|
||||||
|
model.numberOfLayers,
|
||||||
|
layer.weights.rows, layer.weights.cols,
|
||||||
|
layer.biases.rows, layer.biases.cols);
|
||||||
|
|
||||||
|
/* Lese das nächste Dimensions-Paar (writer schreibt für jede Schicht ein Paar) */
|
||||||
|
unsigned int nextInput = readDimension(file);
|
||||||
|
unsigned int nextOutput = readDimension(file);
|
||||||
|
|
||||||
|
fprintf(stderr, "[loadModel] next raw dims read: nextInput=%u nextOutput=%u\n", nextInput, nextOutput);
|
||||||
|
|
||||||
|
/* Wenn nächstes Paar (0,0) -> Ende */
|
||||||
|
if (nextInput == 0 || nextOutput == 0)
|
||||||
|
{
|
||||||
|
inputDimension = 0;
|
||||||
|
outputDimension = 0;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Setze für die nächste Iteration */
|
||||||
|
inputDimension = nextInput;
|
||||||
|
outputDimension = nextOutput;
|
||||||
|
|
||||||
|
fprintf(stderr, "[loadModel] next dims: input=%u output=%u\n", inputDimension, outputDimension);
|
||||||
|
}
|
||||||
|
|
||||||
|
fclose(file);
|
||||||
|
assignActivations(model);
|
||||||
return model;
|
return model;
|
||||||
}
|
}
|
||||||
|
|
||||||
static Matrix imageBatchToMatrixOfImageVectors(const GrayScaleImage images[], unsigned int count)
|
static Matrix imageBatchToMatrixOfImageVectors(const GrayScaleImage images[], unsigned int count)
|
||||||
{
|
{
|
||||||
//Matrix matrix = {NULL, 0, 0};
|
|
||||||
// Explizite Initialisierung verwenden, um die Feldreihenfolge in matrix.h zu umgehen:
|
|
||||||
Matrix matrix;
|
Matrix matrix;
|
||||||
matrix.buffer = NULL;
|
matrix.buffer = NULL;
|
||||||
matrix.rows = 0;
|
matrix.rows = 0;
|
||||||
@ -180,15 +218,12 @@ static Matrix imageBatchToMatrixOfImageVectors(const GrayScaleImage images[], un
|
|||||||
if(count > 0 && images != NULL)
|
if(count > 0 && images != NULL)
|
||||||
{
|
{
|
||||||
matrix = createMatrix(images[0].height * images[0].width, count);
|
matrix = createMatrix(images[0].height * images[0].width, count);
|
||||||
|
|
||||||
if(matrix.buffer != NULL)
|
if(matrix.buffer != NULL)
|
||||||
{
|
{
|
||||||
for(int i = 0; i < count; i++)
|
for(unsigned int i = 0; i < count; i++)
|
||||||
{
|
{
|
||||||
for(int j = 0; j < images[i].width * images[i].height; j++)
|
for(unsigned int j = 0; j < images[i].width * images[i].height; j++)
|
||||||
{
|
|
||||||
setMatrixAt((MatrixType)images[i].buffer[j], matrix, j, i);
|
setMatrixAt((MatrixType)images[i].buffer[j], matrix, j, i);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -199,23 +234,20 @@ static Matrix imageBatchToMatrixOfImageVectors(const GrayScaleImage images[], un
|
|||||||
static Matrix forward(const NeuralNetwork model, Matrix inputBatch)
|
static Matrix forward(const NeuralNetwork model, Matrix inputBatch)
|
||||||
{
|
{
|
||||||
Matrix result = inputBatch;
|
Matrix result = inputBatch;
|
||||||
|
if(result.buffer == NULL) return result;
|
||||||
|
|
||||||
if(result.buffer != NULL)
|
for(int i = 0; i < model.numberOfLayers; i++)
|
||||||
{
|
{
|
||||||
for(int i = 0; i < model.numberOfLayers; i++)
|
Matrix weightResult = multiply(model.layers[i].weights, result);
|
||||||
{
|
clearMatrix(&result);
|
||||||
Matrix biasResult;
|
|
||||||
Matrix weightResult;
|
|
||||||
|
|
||||||
weightResult = multiply(model.layers[i].weights, result);
|
Matrix biasResult = add(weightResult, model.layers[i].biases);
|
||||||
clearMatrix(&result);
|
clearMatrix(&weightResult);
|
||||||
biasResult = add(model.layers[i].biases, weightResult);
|
|
||||||
clearMatrix(&weightResult);
|
|
||||||
|
|
||||||
if(model.layers[i].activation != NULL)
|
if(model.layers[i].activation != NULL)
|
||||||
model.layers[i].activation(&biasResult);
|
model.layers[i].activation(&biasResult);
|
||||||
result = biasResult;
|
|
||||||
}
|
result = biasResult;
|
||||||
}
|
}
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
@ -223,27 +255,21 @@ static Matrix forward(const NeuralNetwork model, Matrix inputBatch)
|
|||||||
|
|
||||||
unsigned char *argmax(const Matrix matrix)
|
unsigned char *argmax(const Matrix matrix)
|
||||||
{
|
{
|
||||||
unsigned char *maxIdx = NULL;
|
if(matrix.rows == 0 || matrix.cols == 0) return NULL;
|
||||||
|
|
||||||
if(matrix.rows > 0 && matrix.cols > 0)
|
unsigned char *maxIdx = (unsigned char *)malloc((size_t)matrix.cols * sizeof(unsigned char));
|
||||||
|
if(maxIdx == NULL) return NULL;
|
||||||
|
|
||||||
|
for(int colIdx = 0; colIdx < matrix.cols; colIdx++)
|
||||||
{
|
{
|
||||||
maxIdx = (unsigned char *)malloc(sizeof(unsigned char) * matrix.cols);
|
int best = 0;
|
||||||
|
for(int rowIdx = 1; rowIdx < matrix.rows; rowIdx++)
|
||||||
if(maxIdx != NULL)
|
|
||||||
{
|
{
|
||||||
for(int colIdx = 0; colIdx < matrix.cols; colIdx++)
|
if(getMatrixAt(matrix, rowIdx, colIdx) > getMatrixAt(matrix, best, colIdx))
|
||||||
{
|
best = rowIdx;
|
||||||
maxIdx[colIdx] = 0;
|
|
||||||
|
|
||||||
for(int rowIdx = 1; rowIdx < matrix.rows; rowIdx++)
|
|
||||||
{
|
|
||||||
if(getMatrixAt(matrix, rowIdx, colIdx) > getMatrixAt(matrix, maxIdx[colIdx], colIdx))
|
|
||||||
maxIdx[colIdx] = rowIdx;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
maxIdx[colIdx] = (unsigned char)best;
|
||||||
}
|
}
|
||||||
|
|
||||||
return maxIdx;
|
return maxIdx;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -251,23 +277,17 @@ unsigned char *predict(const NeuralNetwork model, const GrayScaleImage images[],
|
|||||||
{
|
{
|
||||||
Matrix inputBatch = imageBatchToMatrixOfImageVectors(images, numberOfImages);
|
Matrix inputBatch = imageBatchToMatrixOfImageVectors(images, numberOfImages);
|
||||||
Matrix outputBatch = forward(model, inputBatch);
|
Matrix outputBatch = forward(model, inputBatch);
|
||||||
|
|
||||||
unsigned char *result = argmax(outputBatch);
|
unsigned char *result = argmax(outputBatch);
|
||||||
|
|
||||||
clearMatrix(&outputBatch);
|
clearMatrix(&outputBatch);
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
void clearModel(NeuralNetwork *model)
|
void clearModel(NeuralNetwork *model)
|
||||||
{
|
{
|
||||||
if(model != NULL)
|
if(model == NULL) return;
|
||||||
{
|
for(int i = 0; i < model->numberOfLayers; i++)
|
||||||
for(int i = 0; i < model->numberOfLayers; i++)
|
clearLayer(&model->layers[i]);
|
||||||
{
|
free(model->layers);
|
||||||
clearLayer(&model->layers[i]);
|
model->layers = NULL;
|
||||||
}
|
model->numberOfLayers = 0;
|
||||||
model->layers = NULL;
|
}
|
||||||
model->numberOfLayers = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user