diff --git a/include/cx.h b/include/cx.h index 6d2cfdf..cbcdf10 100644 --- a/include/cx.h +++ b/include/cx.h @@ -8,6 +8,7 @@ #include #include #include +#include // Include GLEW #include diff --git a/src/neural.c b/src/neural.c index 496453b..7b6f0cc 100644 --- a/src/neural.c +++ b/src/neural.c @@ -54,13 +54,20 @@ void neural_randomize(Neural_Network *self) { FILE *f; Neural_Layer *nl; + uint64_t *rand_vals; + f = fopen("/dev/urandom", "r"); for (int i = 0; i < self->layer_count; i++) { nl = self->layers[i]; + rand_vals = malloc(nl->layer_size_next * sizeof(uint64_t)); + fread(rand_vals, sizeof(uint64_t), + nl->layer_size_next, f); for (int j = 0; j < nl->layer_size; j++) { - fread(nl->neurons[j].synapses, sizeof(float), nl->layer_size_next, f); + for (int k = 0; k < nl->layer_size_next; k++) { + nl->neurons[j].synapses[k] = UINT64_MAX / rand_vals[k] + } } } } @@ -137,11 +144,25 @@ neural_process(Neural_Network *self, float *input) { return retval; } +// These two will be merged into one once I have +// enough patience to create more dynamic objects. +static void * +neural_backprop_up(Neural_Network *self, size_t neuron, size_t layer) { + return NULL; +} + +static void * +neural_backprop_down(Neural_Network *self, size_t neuron, size_t layer) { + return NULL; +} + int neural_train(Neural_Network *self, const char *testdata, const float *testresult) { - // Insert algorithm you lazy fuck. + float *testdata_converted; + testdata_converted = malloc(self->layers[self->layer_count-1]->layer_size * sizeof(float)); + return 0; }