Improve neural randomization
Start implementing neural training algorithm.
This commit is contained in:
parent
fa0d6291fe
commit
04bf753f09
2 changed files with 24 additions and 2 deletions
|
@ -8,6 +8,7 @@
|
|||
#include <math.h>
|
||||
#include <time.h>
|
||||
#include <unistd.h>
|
||||
#include <stdint.h>
|
||||
|
||||
// Include GLEW
|
||||
#include <GL/glew.h>
|
||||
|
|
25
src/neural.c
25
src/neural.c
|
@ -54,13 +54,20 @@ void
|
|||
neural_randomize(Neural_Network *self) {
|
||||
FILE *f;
|
||||
Neural_Layer *nl;
|
||||
uint64_t *rand_vals;
|
||||
|
||||
|
||||
f = fopen("/dev/urandom", "r");
|
||||
|
||||
for (int i = 0; i < self->layer_count; i++) {
|
||||
nl = self->layers[i];
|
||||
rand_vals = malloc(nl->layer_size_next * sizeof(uint64_t));
|
||||
fread(rand_vals, sizeof(uint64_t),
|
||||
nl->layer_size_next, f);
|
||||
for (int j = 0; j < nl->layer_size; j++) {
|
||||
fread(nl->neurons[j].synapses, sizeof(float), nl->layer_size_next, f);
|
||||
for (int k = 0; k < nl->layer_size_next; k++) {
|
||||
nl->neurons[j].synapses[k] = UINT64_MAX / rand_vals[k]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -137,11 +144,25 @@ neural_process(Neural_Network *self, float *input) {
|
|||
return retval;
|
||||
}
|
||||
|
||||
// These two will be merged into one once I have
|
||||
// enough patience to create more dynamic objects.
|
||||
static void *
|
||||
neural_backprop_up(Neural_Network *self, size_t neuron, size_t layer) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *
|
||||
neural_backprop_down(Neural_Network *self, size_t neuron, size_t layer) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int
|
||||
neural_train(Neural_Network *self,
|
||||
const char *testdata,
|
||||
const float *testresult) {
|
||||
// Insert algorithm you lazy fuck.
|
||||
float *testdata_converted;
|
||||
testdata_converted = malloc(self->layers[self->layer_count-1]->layer_size * sizeof(float));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue