Merge pull request 'Anpassungen an neuralnetwork' (#3) from Simon's-Branch into main
Reviewed-on: #3
This commit is contained in:
commit
e844ca13cb
@ -39,8 +39,8 @@ mnistVisualization.o: mnistVisualization.c
|
||||
matrixTests: matrix.o matrixTests.c
|
||||
$(CC) $(CFLAGS) -I$(unityfolder) -o runMatrixTests matrixTests.c matrix.o $(BINARIES)/libunity.a
|
||||
|
||||
neuralNetworkTests: neuralNetwork.o neuralNetworkTests.c
|
||||
$(CC) $(CFLAGS) -I$(unityfolder) -o runNeuralNetworkTests neuralNetworkTests.c neuralNetwork.o $(BINARIES)/libunity.a
|
||||
neuralNetworkTests: neuralNetwork.o matrix.o neuralNetworkTests.c
|
||||
$(CC) $(CFLAGS) -I$(unityfolder) -o runNeuralNetworkTests neuralNetworkTests.c neuralNetwork.o matrix.o $(BINARIES)/libunity.a
|
||||
|
||||
imageInputTests: imageInput.o imageInputTests.c
|
||||
$(CC) $(CFLAGS) -I$(unityfolder) -o runImageInputTests imageInputTests.c imageInput.o $(BINARIES)/libunity.a
|
||||
|
||||
@ -20,7 +20,7 @@ static void softmax(Matrix *matrix)
|
||||
for(int rowIdx = 0; rowIdx < matrix->rows; rowIdx++)
|
||||
{
|
||||
MatrixType expValue = exp(getMatrixAt(*matrix, rowIdx, colIdx));
|
||||
setMatrixAt(expValue, *matrix, rowIdx, colIdx);
|
||||
setMatrixAt(expValue, matrix, rowIdx, colIdx);
|
||||
colSums[colIdx] += expValue;
|
||||
}
|
||||
}
|
||||
@ -30,7 +30,7 @@ static void softmax(Matrix *matrix)
|
||||
for(int rowIdx = 0; rowIdx < matrix->rows; rowIdx++)
|
||||
{
|
||||
MatrixType normalizedValue = getMatrixAt(*matrix, rowIdx, colIdx) / colSums[colIdx];
|
||||
setMatrixAt(normalizedValue, *matrix, rowIdx, colIdx);
|
||||
setMatrixAt(normalizedValue, matrix, rowIdx, colIdx);
|
||||
}
|
||||
}
|
||||
free(colSums);
|
||||
@ -182,7 +182,7 @@ static Matrix imageBatchToMatrixOfImageVectors(const GrayScaleImage images[], un
|
||||
{
|
||||
for(int j = 0; j < images[i].width * images[i].height; j++)
|
||||
{
|
||||
setMatrixAt((MatrixType)images[i].buffer[j], matrix, j, i);
|
||||
setMatrixAt((MatrixType)images[i].buffer[j], &matrix, j, i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -23,49 +23,49 @@
|
||||
// 2) Stellen Sie sicher, dass alle Unittests erfolgreich durchlaufen.
|
||||
// make neuralNetworkTests && ./runNeuralNetworkTests
|
||||
|
||||
|
||||
static void prepareNeuralNetworkFile(const char *path, const NeuralNetwork nn)
|
||||
{
|
||||
// First Draft
|
||||
|
||||
// 1. Datei im binären Schreibmodus öffnen
|
||||
FILE *file = fopen(path, "wb");
|
||||
if (file == NULL) {
|
||||
perror("Fehler beim Öffnen der Datei");
|
||||
return;
|
||||
}
|
||||
|
||||
// 2. Den Identifikations-Tag schreiben
|
||||
const char *fileTag = "__info2_neural_network_file_format__";
|
||||
fwrite(fileTag, sizeof(char), strlen(fileTag), file);
|
||||
// 1. Header schreiben
|
||||
const char *fileHeader = "__info2_neural_network_file_format__";
|
||||
fwrite(fileHeader, sizeof(char), strlen(fileHeader), file);
|
||||
|
||||
// 3. Die Anzahl der Schichten schreiben
|
||||
fwrite(&nn.numberOfLayers, sizeof(int), 1, file);
|
||||
// Prüfen ob Layer existieren
|
||||
if (nn.numberOfLayers > 0)
|
||||
{
|
||||
// 2. Die Input-Dimension der ALLERERSTEN Schicht schreiben
|
||||
// (Das sind die Spalten der Gewichtsmatrix der ersten Schicht)
|
||||
int inputDim = nn.layers[0].weights.cols;
|
||||
fwrite(&inputDim, sizeof(int), 1, file);
|
||||
|
||||
// 4. Schleife über alle Schichten, um deren Daten zu schreiben
|
||||
for (int i = 0; i < nn.numberOfLayers; i++) {
|
||||
Layer currentLayer = nn.layers[i];
|
||||
// 3. Durch alle Schichten iterieren
|
||||
for(int i = 0; i < nn.numberOfLayers; i++)
|
||||
{
|
||||
// Die Output-Dimension dieser Schicht schreiben (Zeilen der Gewichtsmatrix)
|
||||
int outputDim = nn.layers[i].weights.rows;
|
||||
fwrite(&outputDim, sizeof(int), 1, file);
|
||||
|
||||
// 4a. Daten der Gewichts-Matrix (weights) schreiben
|
||||
Matrix weights = currentLayer.weights;
|
||||
int weightElements = weights.rows * weights.cols;
|
||||
// 4. Gewichte (Weights) schreiben (nur den Buffer, keine Dimensionen mehr!)
|
||||
// loadModel weiß durch inputDim und outputDim schon, wie groß die Matrix ist.
|
||||
int weightsCount = nn.layers[i].weights.rows * nn.layers[i].weights.cols;
|
||||
fwrite(nn.layers[i].weights.buffer, sizeof(MatrixType), weightsCount, file);
|
||||
|
||||
// Schreibe Dimensionen (Zeilen, Spalten)
|
||||
fwrite(&weights.rows, sizeof(int), 1, file);
|
||||
fwrite(&weights.cols, sizeof(int), 1, file);
|
||||
// Schreibe den Daten-Buffer (die eigentlichen Zahlen)
|
||||
fwrite(weights.buffer, sizeof(MatrixType), weightElements, file);
|
||||
|
||||
// 4b. Daten der Bias-Matrix (biases) schreiben
|
||||
Matrix biases = currentLayer.biases;
|
||||
int biasElements = biases.rows * biases.cols;
|
||||
|
||||
// Schreibe Dimensionen (Zeilen, Spalten)
|
||||
fwrite(&biases.rows, sizeof(int), 1, file);
|
||||
fwrite(&biases.cols, sizeof(int), 1, file);
|
||||
// Schreibe den Daten-Buffer
|
||||
fwrite(biases.buffer, sizeof(MatrixType), biasElements, file);
|
||||
// 5. Biases schreiben (nur den Buffer)
|
||||
int biasCount = nn.layers[i].biases.rows * nn.layers[i].biases.cols;
|
||||
fwrite(nn.layers[i].biases.buffer, sizeof(MatrixType), biasCount, file);
|
||||
}
|
||||
}
|
||||
// 5. Datei schließen
|
||||
|
||||
// 6. Eine 0 schreiben, um das Ende der Dimensionen zu signalisieren
|
||||
// (loadModel bricht die while-Schleife ab, wenn readDimension 0 liefert)
|
||||
int stopMark = 0;
|
||||
fwrite(&stopMark, sizeof(int), 1, file);
|
||||
|
||||
fclose(file);
|
||||
}
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user