From 264fcb407ba1430a0eb112b13d708c6f5fda98a9 Mon Sep 17 00:00:00 2001 From: Marcel Plch Date: Wed, 4 Dec 2024 16:01:50 +0100 Subject: [PATCH] Implement XML export This allows to save the neural network once it has been trained. --- include/cx.h | 4 + include/model.h | 2 +- include/neural.h | 1 + src/cx.c | 15 ++-- src/main.c | 1 - src/model.c | 2 +- src/neural.c | 200 ++++++++++++++++++++++++++++++++++++++++++----- 7 files changed, 192 insertions(+), 33 deletions(-) diff --git a/include/cx.h b/include/cx.h index 29a2cf2..408bc78 100644 --- a/include/cx.h +++ b/include/cx.h @@ -1,6 +1,8 @@ #ifndef CX_H #define CX_H +#define __STDC_WANT_IEC_60559_BFP_EXT__ + // Include standard headers #include #include @@ -10,6 +12,8 @@ #include #include #include +#include +#include // Include GLEW #include diff --git a/include/model.h b/include/model.h index 03d1468..64d0fcb 100644 --- a/include/model.h +++ b/include/model.h @@ -22,7 +22,7 @@ int modelRegistry_register(ModelRegistry *, Model *); void modelRegistry_free(ModelRegistry *); GLfloat * model_applyTransformations(Model *); void model_colorFromPosition(Model *); -void model_colorXYZ(Model *, int R, int G, int B); +void model_colorXYZ(Model *, float R, float G, float B); void model_colorRed(Model *); void model_colorGreen(Model *); void model_colorBlue(Model *); diff --git a/include/neural.h b/include/neural.h index eb1bf3d..d2aafd4 100644 --- a/include/neural.h +++ b/include/neural.h @@ -32,6 +32,7 @@ float *neural_loadData(Neural_Network *, const char *); float *neural_process(Neural_Network *, float *); Neural_Data *neural_getData(Neural_Network *, size_t); int neural_getMesh(Neural_Network *, ModelRegistry *); +char *neural_getXML(Neural_Network *); #endif diff --git a/src/cx.c b/src/cx.c index 2e200a9..493e932 100644 --- a/src/cx.c +++ b/src/cx.c @@ -279,26 +279,22 @@ cx_nnthread(void *self) { CX_Thread *self_t = self; CX_NN_CTX *nn_ctx = self_t->ctx; float *input, *output; + char *export; cx_nninit(&nn_ctx->nn); input = neural_loadData(nn_ctx->nn, "../training_data/0"); - for (int i = 0; i < 64; i++) { - nn_ctx->nn->layers[0]->neurons[i].value = input[i]; - } - output = neural_process(nn_ctx->nn, input); - for (int i = 0; i < 4; i++) { - nn_ctx->nn->layers[7]->neurons[i].value = output[i]; - } + export = neural_getXML(nn_ctx->nn); - return NULL; + return export; } int cx_run(CX_Context *ctx) { CX_ThreadGroup *tg[2]; + void *neural_xml; // Establish a model registry ctx->gl_ctx->mr = modelRegistry_new(); @@ -308,7 +304,7 @@ cx_run(CX_Context *ctx) { tg[1] = cx_threadGroup_new(&cx_nnthread, ctx->nn_ctx); - pthread_join(tg[1]->group_manager->thread, NULL); + pthread_join(tg[1]->group_manager->thread, &neural_xml); ctx->gl_ctx->master_lock = 0; @@ -323,6 +319,7 @@ cx_run(CX_Context *ctx) { free(ctx->threads); free(ctx); + free(neural_xml); return 0; } diff --git a/src/main.c b/src/main.c index 9d5f21e..a49b46f 100644 --- a/src/main.c +++ b/src/main.c @@ -4,7 +4,6 @@ int main(void) { // CX context (Window, neural network, threads.) CX_Context *cx_ctx; - int retval; if (cx_init(&cx_ctx)) { diff --git a/src/model.c b/src/model.c index 78b13f5..3f4d885 100644 --- a/src/model.c +++ b/src/model.c @@ -129,7 +129,7 @@ model_colorFromPosition(Model *self) { } } -void model_colorXYZ(Model *self, int R, int G, int B) { +void model_colorXYZ(Model *self, float R, float G, float B) { for (int i = 0; i < self->bufsize; i++) { for (int j = 0; j < 4; j++) { switch(j) { diff --git a/src/neural.c b/src/neural.c index 1fc6816..32ec5f1 100644 --- a/src/neural.c +++ b/src/neural.c @@ -73,7 +73,6 @@ neural_randomize(Neural_Network *self) { Neural_Layer *nl; uint64_t *rand_vals; - f = fopen("/dev/urandom", "r"); for (int i = 0; i < self->layer_count; i++) { @@ -83,7 +82,7 @@ neural_randomize(Neural_Network *self) { fread(rand_vals, sizeof(uint64_t), nl->layer_size_next, f); for (int k = 0; k < nl->layer_size_next; k++) { - nl->neurons[j].synapses[k] = (float)rand_vals[k] / UINT64_MAX; + nl->neurons[j].synapses[k] = (float)rand_vals[k] / UINT64_MAX / nl->layer_size; } free(rand_vals); } @@ -135,8 +134,8 @@ neural_process(Neural_Network *self, float *input) { for (int i = 0; i < self->layers[0]->layer_size; i++) { nl->neurons[i].value = input[i]; } - neural_vector = tensor_new(1, nl->layer_size, 0); for (int i = 0; i < self->layer_count; i++) { + neural_vector = tensor_new(nl->layer_size, 1, 0); nl = self->layers[i]; synapse_matrix = tensor_new(nl->layer_size_next, nl->layer_size, 0); for (int j = 0; j < nl->layer_size; j++) { @@ -147,9 +146,16 @@ neural_process(Neural_Network *self, float *input) { } temp_buffer = tensor_multip(synapse_matrix, neural_vector); + neural_vector = temp_buffer; + if (nl->layer_size_next) { + Neural_Layer *nl_next = self->layers[i+1]; + for (int j = 0; j < nl_next->layer_size; j++) { + nl_next->neurons[j].value = neural_vector->data[j]; + } + + } tensor_free(neural_vector); tensor_free(synapse_matrix); - neural_vector = temp_buffer; } retval = malloc(nl->layer_size * sizeof(float)); @@ -160,48 +166,81 @@ neural_process(Neural_Network *self, float *input) { return retval; } -// These two will be merged into one once I have -// enough patience to create more dynamic objects. static void * -neural_backprop_up(Neural_Network *self, size_t neuron, size_t layer) { - return NULL; -} +neural_backpropagation(Neural_Network *self, int neuron, int layer, float ratio) { + Neural_Layer *nl; + Neural_Data *nd; + float *ratios; + int *neurons; + float *synapses; + + + for (int i = layer-1; i >= 0; i--) { + nl = self->layers[i]; + for (int j = 0; j < nl->layer_size; j++) { + synapses = nl->neurons[j].synapses; + for (int k = 0; k < nl->layer_size_next; i++) { + synapses[k] = 0; + } + } + + } -static void * -neural_backprop_down(Neural_Network *self, size_t neuron, size_t layer) { return NULL; } int neural_train(Neural_Network *self, + const char *input_path, const float *expected_result) { - Neural_Data *input_data; // What the neural network received Neural_Data *result_data; // What the neural network computed + float backprop_ratio; - input_data = neural_getData(self, 0); - result_data = neural_getData(self, self->layer_count-1); + for (int i = self->layer_count-1; i >= 0; i--) { + Neural_Layer *nl = self->layers[i]; + result_data = neural_getData(self, i); + + for (int j = nl->layer_size-1; j >= 0; j--) { + backprop_ratio = nl->neurons[i].value / expected_result[i]; + neural_backpropagation(self, j, i, backprop_ratio); + } + } return 0; } +Neural_Data * +neural_data_new(int layer_size, int layer_size_next) { + Neural_Data *self; + + self = calloc(1, sizeof(Neural_Data)); + self->neural_vector = malloc(layer_size * sizeof(float)); + self->vect_len = layer_size; + + if (layer_size_next) { + self->synapse_matrix = malloc(layer_size * layer_size_next + * sizeof(float)); + self->mat_len = layer_size_next; + } + return self; +} + Neural_Data * neural_getData(Neural_Network *self, size_t layer) { Neural_Layer *nl; Neural_Data *retval; - retval = malloc(1 * sizeof(Neural_Data)); nl = self->layers[layer]; - retval->neural_vector = malloc(nl->layer_size * sizeof(float)); + retval = neural_data_new(nl->layer_size, nl->layer_size_next); + retval->vect_len = nl->layer_size; if (!nl->layer_size_next) { retval->synapse_matrix = NULL; retval->mat_len = 0; } else { - retval->synapse_matrix = malloc(nl->layer_size * nl->layer_size_next - * sizeof(float)); for (int i = 0; i < nl->layer_size; i++) { for (int j = 0; j < nl->layer_size_next; j++) { retval->synapse_matrix[i*j+i] = nl->neurons[i].synapses[j]; @@ -222,7 +261,7 @@ neural_getMesh(Neural_Network *nn, ModelRegistry *mr) { for (int j = 0; j < nn->layer_count; j++) { Neural_Layer *nl = nn->layers[j]; for (int i = 0; i < nl->layer_size; i++) { - unsigned int brightness; + float brightness; for (int k = 0; k < nl->layer_size_next; k++) { model = model_line((-.90) + ((GLfloat)2 * i * .90/(nl->layer_size-1)), @@ -236,7 +275,7 @@ neural_getMesh(Neural_Network *nn, ModelRegistry *mr) { .001 // girth ); - brightness = nl->neurons[i].synapses[k] * 255; + brightness = nl->neurons[i].synapses[k]; if (brightness) { model_colorXYZ(model, brightness, 0, 0); } @@ -245,7 +284,7 @@ neural_getMesh(Neural_Network *nn, ModelRegistry *mr) { model = model_circle(0, (GLfloat)1/64); brightness = nl->neurons[i].value <= 1.0 ? - nl->neurons[i].value : 255; + nl->neurons[i].value : 1.0; model_colorXYZ(model, 0, brightness, 0); Tensor *translation_matrix = tensor_new(4, 4, 1); Tensor *aspectRatio_matrix = tensor_new(4, 4, 1); @@ -270,3 +309,122 @@ neural_getMesh(Neural_Network *nn, ModelRegistry *mr) { return 0; } +static char* +indented_line(char *str, const char *line, int *indent) { + for (int m = 0; m < *indent; m++) + str = strcat(str, " "); + str = strcat(str, line); + + return str; + + +} + +static char* +indented_tag(char *str, const char *tag, int *indent) { + if (tag[1] == '/') { + *indent -= 4; + } + + indented_line(str, tag, indent); + + if (tag[1] != '/') { + *indent += 4; + } + + return str; +} + +// TODO +/* This XML implementation has potential bugs and has not + * been checked very thoroughly, fix, please. + */ +char * +neural_getXML(Neural_Network *nn) { + char *retval; + const char *to_write; + int volume = 0; + int indent = 0; + + retval = malloc(0xff * sizeof(char)); + + to_write = "\n\n"; + retval = strcpy(retval, to_write); + to_write = "\n"; + retval = indented_tag(retval, to_write, &indent); + + for (int i = 0; i < nn->layer_count; i++) { + Neural_Layer *nl; + Neural_Data *nd; + char *line_prep; + + nl = nn->layers[i]; + nd = neural_getData(nn, i); + + retval = realloc(retval, strlen(retval) + + (nl->layer_size * 32 * nl->layer_size_next)// Matrix + + (nl->layer_size * 32) // Vector + + 0x3ff * nl->layer_size // Expected tag garbage. + + indent); // Space waster + + to_write = "\n"; + retval = indented_tag(retval, to_write, &indent); + + to_write = "\n"; + retval = indented_tag(retval, to_write, &indent); + for (int j = 0; j < nd->mat_len; j++) { + char number_buffer[32]; + line_prep = malloc((nl->layer_size * 32 * nl->layer_size_next) // Matrix + + (nl->layer_size * 32)); + *line_prep = '\0'; + + line_prep = strcat(line_prep, "[ "); + for (int k = 0; k < nd->vect_len; k++) { + + strfromf(number_buffer, 32, "%.2f ", nd->synapse_matrix[k+j*nd->mat_len]); + line_prep = strcat(line_prep, number_buffer); + if (k < nd->vect_len - 1) { + line_prep = strcat(line_prep, ", "); + } + + } + line_prep = strcat(line_prep, " ]\n"); + retval = indented_line(retval, line_prep, &indent); + free(line_prep); + } + to_write = "\n"; + retval = indented_tag(retval, to_write, &indent); + + + to_write = "\n"; + retval = indented_tag(retval, to_write, &indent); + char number_buffer[32]; + line_prep = malloc((nl->layer_size * 32 * nl->layer_size_next) // Matrix + + (nl->layer_size * 32)); + *line_prep = '\0'; + line_prep = strcat(line_prep, "[ "); + + for (int k = 0; k < nd->vect_len; k++) { + strfromf(number_buffer, 32, "%.4f", nd->neural_vector[k]); + line_prep = strcat(line_prep, number_buffer); + + if (k < nd->vect_len - 1) { + line_prep = strcat(line_prep, ", "); + } + + } + line_prep = strcat(line_prep, " ]\n"); + retval = indented_line(retval, line_prep, &indent); + free(line_prep); + to_write = "\n"; + retval = indented_tag(retval, to_write, &indent); + + to_write = "\n"; + retval = indented_tag(retval, to_write, &indent); + } + to_write = "\n"; + retval = indented_tag(retval, to_write, &indent); + + return retval; +} +