From 9a05ae38410ac35904deb89fa215938b2fece01f Mon Sep 17 00:00:00 2001 From: Marcel Plch Date: Fri, 27 Dec 2024 10:41:03 +0100 Subject: [PATCH] Repaird data handling I have no clue how these bugs happened. I have no clue what was the issue exacly. All I know is that I have multiplied wrong indeces with wrong dimensions. Guess even this is maths sometimes. --- include/neural.h | 1 + src/cx.c | 4 +++- src/neural.c | 33 +++++++++++++++++++++++---------- 3 files changed, 27 insertions(+), 11 deletions(-) diff --git a/include/neural.h b/include/neural.h index d2aafd4..ce3fab6 100644 --- a/include/neural.h +++ b/include/neural.h @@ -27,6 +27,7 @@ typedef struct _neural_data { Neural_Network *neural_new(size_t, size_t, size_t); void neural_free(Neural_Network *); +void neural_populate_sequential(Neural_Network *); void neural_randomize(Neural_Network *); float *neural_loadData(Neural_Network *, const char *); float *neural_process(Neural_Network *, float *); diff --git a/src/cx.c b/src/cx.c index 493e932..e51e212 100644 --- a/src/cx.c +++ b/src/cx.c @@ -183,7 +183,7 @@ cx_nninit(Neural_Network **nn) { } // Populate the neural network with sensible values. - neural_randomize(*nn); + neural_populate_sequential(*nn); return 0; } @@ -306,6 +306,8 @@ cx_run(CX_Context *ctx) { pthread_join(tg[1]->group_manager->thread, &neural_xml); + printf("%s\n", neural_xml); + ctx->gl_ctx->master_lock = 0; neural_getMesh(ctx->nn_ctx->nn, ctx->gl_ctx->mr); diff --git a/src/neural.c b/src/neural.c index 32ec5f1..b25a4f0 100644 --- a/src/neural.c +++ b/src/neural.c @@ -67,6 +67,22 @@ neural_free(Neural_Network *self) { free(self); } +void +neural_populate_sequential(Neural_Network *self) { + Neural_Layer *nl; + + for (int i = 0; i < self->layer_count; i++) { + nl = self->layers[i]; + int populator = 0; + for (int j = 0; j < nl->layer_size; j++) { + for (int k = 0; k < nl->layer_size_next; k++) { + nl->neurons[j].synapses[k] = (float)populator; + populator++; + } + } + } +} + void neural_randomize(Neural_Network *self) { FILE *f; @@ -222,6 +238,10 @@ neural_data_new(int layer_size, int layer_size_next) { * sizeof(float)); self->mat_len = layer_size_next; } + else { + self->synapse_matrix = NULL; + self->mat_len = 0; + } return self; } @@ -230,20 +250,13 @@ neural_getData(Neural_Network *self, size_t layer) { Neural_Layer *nl; Neural_Data *retval; - nl = self->layers[layer]; retval = neural_data_new(nl->layer_size, nl->layer_size_next); - - retval->vect_len = nl->layer_size; - if (!nl->layer_size_next) { - retval->synapse_matrix = NULL; - retval->mat_len = 0; - } - else { + if (retval->mat_len) { for (int i = 0; i < nl->layer_size; i++) { for (int j = 0; j < nl->layer_size_next; j++) { - retval->synapse_matrix[i*j+i] = nl->neurons[i].synapses[j]; + retval->synapse_matrix[i+(nl->layer_size*j)] = nl->neurons[i].synapses[j]; } } } @@ -381,7 +394,7 @@ neural_getXML(Neural_Network *nn) { line_prep = strcat(line_prep, "[ "); for (int k = 0; k < nd->vect_len; k++) { - strfromf(number_buffer, 32, "%.2f ", nd->synapse_matrix[k+j*nd->mat_len]); + strfromf(number_buffer, 32, "%.4f ", nd->synapse_matrix[k+(j*nd->vect_len)]); line_prep = strcat(line_prep, number_buffer); if (k < nd->vect_len - 1) { line_prep = strcat(line_prep, ", ");