Add a basic neural interface.
This commit is contained in:
parent
29251ba60c
commit
87a03582cd
4 changed files with 100 additions and 0 deletions
|
@ -36,6 +36,7 @@ add_executable(
|
|||
src/tensor.c
|
||||
src/model.c
|
||||
src/shader.c
|
||||
src/neural.c
|
||||
)
|
||||
|
||||
target_link_libraries(
|
||||
|
|
|
@ -21,11 +21,15 @@
|
|||
#include <model.h>
|
||||
#include <tensor.h>
|
||||
#include <shader.h>
|
||||
#include <neural.h>
|
||||
|
||||
// Declare functions
|
||||
|
||||
int cx_glinit(GLFWwindow **);
|
||||
int cx_glrun(GLFWwindow *);
|
||||
|
||||
int cx_nninit(Neural_Network **);
|
||||
int cx_nnrun(Neural_Network *);
|
||||
|
||||
#endif
|
||||
|
||||
|
|
24
include/neural.h
Normal file
24
include/neural.h
Normal file
|
@ -0,0 +1,24 @@
|
|||
#ifndef NEURAL_H
|
||||
#define NEURAL_H
|
||||
|
||||
typedef struct _neuron {
|
||||
float value;
|
||||
float threshold;
|
||||
float **in_values;
|
||||
float *weights;
|
||||
ssize_t in_values_size;
|
||||
} Neuron;
|
||||
|
||||
typedef struct _neural_network {
|
||||
Neuron *n;
|
||||
ssize_t layer_size; // Neurons Per Layer
|
||||
ssize_t layers;
|
||||
} Neural_Network;
|
||||
|
||||
Neural_Network *neural_new(size_t, size_t);
|
||||
void neural_randomize(Neural_Network *);
|
||||
float *neural_process(Neural_Network *, float *);
|
||||
|
||||
#endif
|
||||
|
||||
|
71
src/neural.c
Normal file
71
src/neural.c
Normal file
|
@ -0,0 +1,71 @@
|
|||
#include <cx.h>
|
||||
#include <neural.h>
|
||||
|
||||
Neural_Network *
|
||||
neural_new(size_t layer_size, size_t layers) {
|
||||
Neural_Network *self = malloc(sizeof(Neural_Network));
|
||||
Neuron *n = NULL;
|
||||
|
||||
self->layer_size = layer_size;
|
||||
self->layers = layers;
|
||||
self->n = calloc(layer_size*layers, sizeof(Neuron));
|
||||
|
||||
for (int j = 0; j < layers; j++) {
|
||||
n = &(self->n[j*layer_size]);
|
||||
for (int i = 0; i < layers; i++) {
|
||||
n->value = 0;
|
||||
n->threshold = 0;
|
||||
if (j) {
|
||||
n->in_values = calloc(layer_size, sizeof(float *));
|
||||
n->weights = calloc(layer_size, sizeof(float));
|
||||
n->in_values_size = layer_size;
|
||||
for (int k = 0; k < layer_size; k++) {
|
||||
n->in_values[k] = &(self->n[(j-1)*layer_size + k].value);
|
||||
n->weights[k] = 0.5;
|
||||
}
|
||||
}
|
||||
else {
|
||||
n->in_values = NULL;
|
||||
n->weights = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return self;
|
||||
|
||||
}
|
||||
|
||||
void
|
||||
neural_randomize(Neural_Network *self) {
|
||||
// Does not randomize, just sets 0.5, but it doesn't matter for now.
|
||||
for (int i = 0; i < self->layers; i++) {
|
||||
Neuron *n = &(self->n[i*self->layer_size]);
|
||||
for (int j = 0; j < self->layer_size; j++) {
|
||||
n[j].threshold = 0.5;
|
||||
}
|
||||
}
|
||||
}
|
||||
float *
|
||||
neural_process(Neural_Network *self, float *input) {
|
||||
float *retval = NULL;
|
||||
|
||||
for (int i = 0; i < self->layer_size; i++) {
|
||||
self->n[i].value = input[i];
|
||||
}
|
||||
for (int i = 1; i < self->layers; i++) {
|
||||
float dot_prod = 0;
|
||||
for (int j = 0; j < self->layer_size; j++) {
|
||||
// MATH GOES BRRRRRRRR
|
||||
dot_prod += *(self->n[i*self->layer_size + j].in_values)[j] *
|
||||
self->n[i*self->layer_size + j].weights[j];
|
||||
}
|
||||
}
|
||||
|
||||
retval = malloc(self->layer_size * sizeof(float));
|
||||
for (int i = 0; i < self->layer_size; i++) {
|
||||
retval[i] = self->n[self->layer_size*(self->layers-1)].value;
|
||||
}
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
Loading…
Reference in a new issue