diff --git a/src/neural.c b/src/neural.c index 7b6f0cc..c68391c 100644 --- a/src/neural.c +++ b/src/neural.c @@ -66,7 +66,7 @@ neural_randomize(Neural_Network *self) { nl->layer_size_next, f); for (int j = 0; j < nl->layer_size; j++) { for (int k = 0; k < nl->layer_size_next; k++) { - nl->neurons[j].synapses[k] = UINT64_MAX / rand_vals[k] + nl->neurons[j].synapses[k] = UINT64_MAX / rand_vals[k]; } } } @@ -74,7 +74,6 @@ neural_randomize(Neural_Network *self) { float * neural_loadData(Neural_Network *self, const char *filename) { - Neural_Layer *nl; FILE *f; char *file_data; float *retval; @@ -87,8 +86,6 @@ neural_loadData(Neural_Network *self, const char *filename) { f = fopen(filename, "r"); - nl = self->layers[0]; - fread(file_data, sizeof(char), 9*8, f); // 9*8 - 8*8 value matrix + newlines for (int i = 0; i < 8*8; i++) { if (file_data[read_cursor] == '\n') { @@ -158,12 +155,16 @@ neural_backprop_down(Neural_Network *self, size_t neuron, size_t layer) { int neural_train(Neural_Network *self, - const char *testdata, + const float *testdata, const float *testresult) { - float *testdata_converted; - testdata_converted = malloc(self->layers[self->layer_count-1]->layer_size * sizeof(float)); + float *retval; + int k, i; - return 0; + retval = malloc(self->layers[self->layer_count-1]->layer_size * sizeof(float)); + k = 0; + i = 0; + + return retval; } int