Implement XML export
This allows to save the neural network once it has been trained.
This commit is contained in:
parent
ef18b57d61
commit
264fcb407b
7 changed files with 192 additions and 33 deletions
|
@ -1,6 +1,8 @@
|
||||||
#ifndef CX_H
|
#ifndef CX_H
|
||||||
#define CX_H
|
#define CX_H
|
||||||
|
|
||||||
|
#define __STDC_WANT_IEC_60559_BFP_EXT__
|
||||||
|
|
||||||
// Include standard headers
|
// Include standard headers
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
|
@ -10,6 +12,8 @@
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include <pthread.h>
|
#include <pthread.h>
|
||||||
|
#include <inttypes.h>
|
||||||
|
#include <string.h>
|
||||||
|
|
||||||
// Include GLEW
|
// Include GLEW
|
||||||
#include <GL/glew.h>
|
#include <GL/glew.h>
|
||||||
|
|
|
@ -22,7 +22,7 @@ int modelRegistry_register(ModelRegistry *, Model *);
|
||||||
void modelRegistry_free(ModelRegistry *);
|
void modelRegistry_free(ModelRegistry *);
|
||||||
GLfloat * model_applyTransformations(Model *);
|
GLfloat * model_applyTransformations(Model *);
|
||||||
void model_colorFromPosition(Model *);
|
void model_colorFromPosition(Model *);
|
||||||
void model_colorXYZ(Model *, int R, int G, int B);
|
void model_colorXYZ(Model *, float R, float G, float B);
|
||||||
void model_colorRed(Model *);
|
void model_colorRed(Model *);
|
||||||
void model_colorGreen(Model *);
|
void model_colorGreen(Model *);
|
||||||
void model_colorBlue(Model *);
|
void model_colorBlue(Model *);
|
||||||
|
|
|
@ -32,6 +32,7 @@ float *neural_loadData(Neural_Network *, const char *);
|
||||||
float *neural_process(Neural_Network *, float *);
|
float *neural_process(Neural_Network *, float *);
|
||||||
Neural_Data *neural_getData(Neural_Network *, size_t);
|
Neural_Data *neural_getData(Neural_Network *, size_t);
|
||||||
int neural_getMesh(Neural_Network *, ModelRegistry *);
|
int neural_getMesh(Neural_Network *, ModelRegistry *);
|
||||||
|
char *neural_getXML(Neural_Network *);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
15
src/cx.c
15
src/cx.c
|
@ -279,26 +279,22 @@ cx_nnthread(void *self) {
|
||||||
CX_Thread *self_t = self;
|
CX_Thread *self_t = self;
|
||||||
CX_NN_CTX *nn_ctx = self_t->ctx;
|
CX_NN_CTX *nn_ctx = self_t->ctx;
|
||||||
float *input, *output;
|
float *input, *output;
|
||||||
|
char *export;
|
||||||
|
|
||||||
cx_nninit(&nn_ctx->nn);
|
cx_nninit(&nn_ctx->nn);
|
||||||
input = neural_loadData(nn_ctx->nn, "../training_data/0");
|
input = neural_loadData(nn_ctx->nn, "../training_data/0");
|
||||||
|
|
||||||
for (int i = 0; i < 64; i++) {
|
|
||||||
nn_ctx->nn->layers[0]->neurons[i].value = input[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
output = neural_process(nn_ctx->nn, input);
|
output = neural_process(nn_ctx->nn, input);
|
||||||
|
|
||||||
for (int i = 0; i < 4; i++) {
|
export = neural_getXML(nn_ctx->nn);
|
||||||
nn_ctx->nn->layers[7]->neurons[i].value = output[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
return NULL;
|
return export;
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
cx_run(CX_Context *ctx) {
|
cx_run(CX_Context *ctx) {
|
||||||
CX_ThreadGroup *tg[2];
|
CX_ThreadGroup *tg[2];
|
||||||
|
void *neural_xml;
|
||||||
|
|
||||||
// Establish a model registry
|
// Establish a model registry
|
||||||
ctx->gl_ctx->mr = modelRegistry_new();
|
ctx->gl_ctx->mr = modelRegistry_new();
|
||||||
|
@ -308,7 +304,7 @@ cx_run(CX_Context *ctx) {
|
||||||
|
|
||||||
tg[1] = cx_threadGroup_new(&cx_nnthread, ctx->nn_ctx);
|
tg[1] = cx_threadGroup_new(&cx_nnthread, ctx->nn_ctx);
|
||||||
|
|
||||||
pthread_join(tg[1]->group_manager->thread, NULL);
|
pthread_join(tg[1]->group_manager->thread, &neural_xml);
|
||||||
|
|
||||||
ctx->gl_ctx->master_lock = 0;
|
ctx->gl_ctx->master_lock = 0;
|
||||||
|
|
||||||
|
@ -323,6 +319,7 @@ cx_run(CX_Context *ctx) {
|
||||||
|
|
||||||
free(ctx->threads);
|
free(ctx->threads);
|
||||||
free(ctx);
|
free(ctx);
|
||||||
|
free(neural_xml);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,7 +4,6 @@ int
|
||||||
main(void) {
|
main(void) {
|
||||||
// CX context (Window, neural network, threads.)
|
// CX context (Window, neural network, threads.)
|
||||||
CX_Context *cx_ctx;
|
CX_Context *cx_ctx;
|
||||||
|
|
||||||
int retval;
|
int retval;
|
||||||
|
|
||||||
if (cx_init(&cx_ctx)) {
|
if (cx_init(&cx_ctx)) {
|
||||||
|
|
|
@ -129,7 +129,7 @@ model_colorFromPosition(Model *self) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void model_colorXYZ(Model *self, int R, int G, int B) {
|
void model_colorXYZ(Model *self, float R, float G, float B) {
|
||||||
for (int i = 0; i < self->bufsize; i++) {
|
for (int i = 0; i < self->bufsize; i++) {
|
||||||
for (int j = 0; j < 4; j++) {
|
for (int j = 0; j < 4; j++) {
|
||||||
switch(j) {
|
switch(j) {
|
||||||
|
|
200
src/neural.c
200
src/neural.c
|
@ -73,7 +73,6 @@ neural_randomize(Neural_Network *self) {
|
||||||
Neural_Layer *nl;
|
Neural_Layer *nl;
|
||||||
uint64_t *rand_vals;
|
uint64_t *rand_vals;
|
||||||
|
|
||||||
|
|
||||||
f = fopen("/dev/urandom", "r");
|
f = fopen("/dev/urandom", "r");
|
||||||
|
|
||||||
for (int i = 0; i < self->layer_count; i++) {
|
for (int i = 0; i < self->layer_count; i++) {
|
||||||
|
@ -83,7 +82,7 @@ neural_randomize(Neural_Network *self) {
|
||||||
fread(rand_vals, sizeof(uint64_t),
|
fread(rand_vals, sizeof(uint64_t),
|
||||||
nl->layer_size_next, f);
|
nl->layer_size_next, f);
|
||||||
for (int k = 0; k < nl->layer_size_next; k++) {
|
for (int k = 0; k < nl->layer_size_next; k++) {
|
||||||
nl->neurons[j].synapses[k] = (float)rand_vals[k] / UINT64_MAX;
|
nl->neurons[j].synapses[k] = (float)rand_vals[k] / UINT64_MAX / nl->layer_size;
|
||||||
}
|
}
|
||||||
free(rand_vals);
|
free(rand_vals);
|
||||||
}
|
}
|
||||||
|
@ -135,8 +134,8 @@ neural_process(Neural_Network *self, float *input) {
|
||||||
for (int i = 0; i < self->layers[0]->layer_size; i++) {
|
for (int i = 0; i < self->layers[0]->layer_size; i++) {
|
||||||
nl->neurons[i].value = input[i];
|
nl->neurons[i].value = input[i];
|
||||||
}
|
}
|
||||||
neural_vector = tensor_new(1, nl->layer_size, 0);
|
|
||||||
for (int i = 0; i < self->layer_count; i++) {
|
for (int i = 0; i < self->layer_count; i++) {
|
||||||
|
neural_vector = tensor_new(nl->layer_size, 1, 0);
|
||||||
nl = self->layers[i];
|
nl = self->layers[i];
|
||||||
synapse_matrix = tensor_new(nl->layer_size_next, nl->layer_size, 0);
|
synapse_matrix = tensor_new(nl->layer_size_next, nl->layer_size, 0);
|
||||||
for (int j = 0; j < nl->layer_size; j++) {
|
for (int j = 0; j < nl->layer_size; j++) {
|
||||||
|
@ -147,9 +146,16 @@ neural_process(Neural_Network *self, float *input) {
|
||||||
}
|
}
|
||||||
|
|
||||||
temp_buffer = tensor_multip(synapse_matrix, neural_vector);
|
temp_buffer = tensor_multip(synapse_matrix, neural_vector);
|
||||||
|
neural_vector = temp_buffer;
|
||||||
|
if (nl->layer_size_next) {
|
||||||
|
Neural_Layer *nl_next = self->layers[i+1];
|
||||||
|
for (int j = 0; j < nl_next->layer_size; j++) {
|
||||||
|
nl_next->neurons[j].value = neural_vector->data[j];
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
tensor_free(neural_vector);
|
tensor_free(neural_vector);
|
||||||
tensor_free(synapse_matrix);
|
tensor_free(synapse_matrix);
|
||||||
neural_vector = temp_buffer;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
retval = malloc(nl->layer_size * sizeof(float));
|
retval = malloc(nl->layer_size * sizeof(float));
|
||||||
|
@ -160,48 +166,81 @@ neural_process(Neural_Network *self, float *input) {
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
// These two will be merged into one once I have
|
|
||||||
// enough patience to create more dynamic objects.
|
|
||||||
static void *
|
static void *
|
||||||
neural_backprop_up(Neural_Network *self, size_t neuron, size_t layer) {
|
neural_backpropagation(Neural_Network *self, int neuron, int layer, float ratio) {
|
||||||
return NULL;
|
Neural_Layer *nl;
|
||||||
}
|
Neural_Data *nd;
|
||||||
|
float *ratios;
|
||||||
|
int *neurons;
|
||||||
|
float *synapses;
|
||||||
|
|
||||||
|
|
||||||
|
for (int i = layer-1; i >= 0; i--) {
|
||||||
|
nl = self->layers[i];
|
||||||
|
for (int j = 0; j < nl->layer_size; j++) {
|
||||||
|
synapses = nl->neurons[j].synapses;
|
||||||
|
for (int k = 0; k < nl->layer_size_next; i++) {
|
||||||
|
synapses[k] = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
static void *
|
|
||||||
neural_backprop_down(Neural_Network *self, size_t neuron, size_t layer) {
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
neural_train(Neural_Network *self,
|
neural_train(Neural_Network *self,
|
||||||
|
const char *input_path,
|
||||||
const float *expected_result) {
|
const float *expected_result) {
|
||||||
Neural_Data *input_data; // What the neural network received
|
|
||||||
Neural_Data *result_data; // What the neural network computed
|
Neural_Data *result_data; // What the neural network computed
|
||||||
|
float backprop_ratio;
|
||||||
|
|
||||||
input_data = neural_getData(self, 0);
|
for (int i = self->layer_count-1; i >= 0; i--) {
|
||||||
result_data = neural_getData(self, self->layer_count-1);
|
Neural_Layer *nl = self->layers[i];
|
||||||
|
result_data = neural_getData(self, i);
|
||||||
|
|
||||||
|
for (int j = nl->layer_size-1; j >= 0; j--) {
|
||||||
|
backprop_ratio = nl->neurons[i].value / expected_result[i];
|
||||||
|
neural_backpropagation(self, j, i, backprop_ratio);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Neural_Data *
|
||||||
|
neural_data_new(int layer_size, int layer_size_next) {
|
||||||
|
Neural_Data *self;
|
||||||
|
|
||||||
|
self = calloc(1, sizeof(Neural_Data));
|
||||||
|
self->neural_vector = malloc(layer_size * sizeof(float));
|
||||||
|
self->vect_len = layer_size;
|
||||||
|
|
||||||
|
if (layer_size_next) {
|
||||||
|
self->synapse_matrix = malloc(layer_size * layer_size_next
|
||||||
|
* sizeof(float));
|
||||||
|
self->mat_len = layer_size_next;
|
||||||
|
}
|
||||||
|
return self;
|
||||||
|
}
|
||||||
|
|
||||||
Neural_Data *
|
Neural_Data *
|
||||||
neural_getData(Neural_Network *self, size_t layer) {
|
neural_getData(Neural_Network *self, size_t layer) {
|
||||||
Neural_Layer *nl;
|
Neural_Layer *nl;
|
||||||
Neural_Data *retval;
|
Neural_Data *retval;
|
||||||
|
|
||||||
retval = malloc(1 * sizeof(Neural_Data));
|
|
||||||
|
|
||||||
nl = self->layers[layer];
|
nl = self->layers[layer];
|
||||||
|
|
||||||
retval->neural_vector = malloc(nl->layer_size * sizeof(float));
|
retval = neural_data_new(nl->layer_size, nl->layer_size_next);
|
||||||
|
|
||||||
retval->vect_len = nl->layer_size;
|
retval->vect_len = nl->layer_size;
|
||||||
if (!nl->layer_size_next) {
|
if (!nl->layer_size_next) {
|
||||||
retval->synapse_matrix = NULL;
|
retval->synapse_matrix = NULL;
|
||||||
retval->mat_len = 0;
|
retval->mat_len = 0;
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
retval->synapse_matrix = malloc(nl->layer_size * nl->layer_size_next
|
|
||||||
* sizeof(float));
|
|
||||||
for (int i = 0; i < nl->layer_size; i++) {
|
for (int i = 0; i < nl->layer_size; i++) {
|
||||||
for (int j = 0; j < nl->layer_size_next; j++) {
|
for (int j = 0; j < nl->layer_size_next; j++) {
|
||||||
retval->synapse_matrix[i*j+i] = nl->neurons[i].synapses[j];
|
retval->synapse_matrix[i*j+i] = nl->neurons[i].synapses[j];
|
||||||
|
@ -222,7 +261,7 @@ neural_getMesh(Neural_Network *nn, ModelRegistry *mr) {
|
||||||
for (int j = 0; j < nn->layer_count; j++) {
|
for (int j = 0; j < nn->layer_count; j++) {
|
||||||
Neural_Layer *nl = nn->layers[j];
|
Neural_Layer *nl = nn->layers[j];
|
||||||
for (int i = 0; i < nl->layer_size; i++) {
|
for (int i = 0; i < nl->layer_size; i++) {
|
||||||
unsigned int brightness;
|
float brightness;
|
||||||
for (int k = 0; k < nl->layer_size_next; k++) {
|
for (int k = 0; k < nl->layer_size_next; k++) {
|
||||||
model = model_line((-.90)
|
model = model_line((-.90)
|
||||||
+ ((GLfloat)2 * i * .90/(nl->layer_size-1)),
|
+ ((GLfloat)2 * i * .90/(nl->layer_size-1)),
|
||||||
|
@ -236,7 +275,7 @@ neural_getMesh(Neural_Network *nn, ModelRegistry *mr) {
|
||||||
|
|
||||||
.001 // girth
|
.001 // girth
|
||||||
);
|
);
|
||||||
brightness = nl->neurons[i].synapses[k] * 255;
|
brightness = nl->neurons[i].synapses[k];
|
||||||
if (brightness) {
|
if (brightness) {
|
||||||
model_colorXYZ(model, brightness, 0, 0);
|
model_colorXYZ(model, brightness, 0, 0);
|
||||||
}
|
}
|
||||||
|
@ -245,7 +284,7 @@ neural_getMesh(Neural_Network *nn, ModelRegistry *mr) {
|
||||||
|
|
||||||
model = model_circle(0, (GLfloat)1/64);
|
model = model_circle(0, (GLfloat)1/64);
|
||||||
brightness = nl->neurons[i].value <= 1.0 ?
|
brightness = nl->neurons[i].value <= 1.0 ?
|
||||||
nl->neurons[i].value : 255;
|
nl->neurons[i].value : 1.0;
|
||||||
model_colorXYZ(model, 0, brightness, 0);
|
model_colorXYZ(model, 0, brightness, 0);
|
||||||
Tensor *translation_matrix = tensor_new(4, 4, 1);
|
Tensor *translation_matrix = tensor_new(4, 4, 1);
|
||||||
Tensor *aspectRatio_matrix = tensor_new(4, 4, 1);
|
Tensor *aspectRatio_matrix = tensor_new(4, 4, 1);
|
||||||
|
@ -270,3 +309,122 @@ neural_getMesh(Neural_Network *nn, ModelRegistry *mr) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static char*
|
||||||
|
indented_line(char *str, const char *line, int *indent) {
|
||||||
|
for (int m = 0; m < *indent; m++)
|
||||||
|
str = strcat(str, " ");
|
||||||
|
str = strcat(str, line);
|
||||||
|
|
||||||
|
return str;
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
static char*
|
||||||
|
indented_tag(char *str, const char *tag, int *indent) {
|
||||||
|
if (tag[1] == '/') {
|
||||||
|
*indent -= 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
indented_line(str, tag, indent);
|
||||||
|
|
||||||
|
if (tag[1] != '/') {
|
||||||
|
*indent += 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
return str;
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO
|
||||||
|
/* This XML implementation has potential bugs and has not
|
||||||
|
* been checked very thoroughly, fix, please.
|
||||||
|
*/
|
||||||
|
char *
|
||||||
|
neural_getXML(Neural_Network *nn) {
|
||||||
|
char *retval;
|
||||||
|
const char *to_write;
|
||||||
|
int volume = 0;
|
||||||
|
int indent = 0;
|
||||||
|
|
||||||
|
retval = malloc(0xff * sizeof(char));
|
||||||
|
|
||||||
|
to_write = "<?xml version=\"1.0\"?>\n\n";
|
||||||
|
retval = strcpy(retval, to_write);
|
||||||
|
to_write = "<Network>\n";
|
||||||
|
retval = indented_tag(retval, to_write, &indent);
|
||||||
|
|
||||||
|
for (int i = 0; i < nn->layer_count; i++) {
|
||||||
|
Neural_Layer *nl;
|
||||||
|
Neural_Data *nd;
|
||||||
|
char *line_prep;
|
||||||
|
|
||||||
|
nl = nn->layers[i];
|
||||||
|
nd = neural_getData(nn, i);
|
||||||
|
|
||||||
|
retval = realloc(retval, strlen(retval)
|
||||||
|
+ (nl->layer_size * 32 * nl->layer_size_next)// Matrix
|
||||||
|
+ (nl->layer_size * 32) // Vector
|
||||||
|
+ 0x3ff * nl->layer_size // Expected tag garbage.
|
||||||
|
+ indent); // Space waster
|
||||||
|
|
||||||
|
to_write = "<Layer>\n";
|
||||||
|
retval = indented_tag(retval, to_write, &indent);
|
||||||
|
|
||||||
|
to_write = "<Synapse_Matrix>\n";
|
||||||
|
retval = indented_tag(retval, to_write, &indent);
|
||||||
|
for (int j = 0; j < nd->mat_len; j++) {
|
||||||
|
char number_buffer[32];
|
||||||
|
line_prep = malloc((nl->layer_size * 32 * nl->layer_size_next) // Matrix
|
||||||
|
+ (nl->layer_size * 32));
|
||||||
|
*line_prep = '\0';
|
||||||
|
|
||||||
|
line_prep = strcat(line_prep, "[ ");
|
||||||
|
for (int k = 0; k < nd->vect_len; k++) {
|
||||||
|
|
||||||
|
strfromf(number_buffer, 32, "%.2f ", nd->synapse_matrix[k+j*nd->mat_len]);
|
||||||
|
line_prep = strcat(line_prep, number_buffer);
|
||||||
|
if (k < nd->vect_len - 1) {
|
||||||
|
line_prep = strcat(line_prep, ", ");
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
line_prep = strcat(line_prep, " ]\n");
|
||||||
|
retval = indented_line(retval, line_prep, &indent);
|
||||||
|
free(line_prep);
|
||||||
|
}
|
||||||
|
to_write = "</Synapse_Matrix>\n";
|
||||||
|
retval = indented_tag(retval, to_write, &indent);
|
||||||
|
|
||||||
|
|
||||||
|
to_write = "<Neural_Vector>\n";
|
||||||
|
retval = indented_tag(retval, to_write, &indent);
|
||||||
|
char number_buffer[32];
|
||||||
|
line_prep = malloc((nl->layer_size * 32 * nl->layer_size_next) // Matrix
|
||||||
|
+ (nl->layer_size * 32));
|
||||||
|
*line_prep = '\0';
|
||||||
|
line_prep = strcat(line_prep, "[ ");
|
||||||
|
|
||||||
|
for (int k = 0; k < nd->vect_len; k++) {
|
||||||
|
strfromf(number_buffer, 32, "%.4f", nd->neural_vector[k]);
|
||||||
|
line_prep = strcat(line_prep, number_buffer);
|
||||||
|
|
||||||
|
if (k < nd->vect_len - 1) {
|
||||||
|
line_prep = strcat(line_prep, ", ");
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
line_prep = strcat(line_prep, " ]\n");
|
||||||
|
retval = indented_line(retval, line_prep, &indent);
|
||||||
|
free(line_prep);
|
||||||
|
to_write = "</Neural_Vector>\n";
|
||||||
|
retval = indented_tag(retval, to_write, &indent);
|
||||||
|
|
||||||
|
to_write = "</Layer>\n";
|
||||||
|
retval = indented_tag(retval, to_write, &indent);
|
||||||
|
}
|
||||||
|
to_write = "</Network>\n";
|
||||||
|
retval = indented_tag(retval, to_write, &indent);
|
||||||
|
|
||||||
|
return retval;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue