From 87a03582cd09ee4fd7105577afb94a948acb007b Mon Sep 17 00:00:00 2001 From: Marcel Plch Date: Sat, 6 Jul 2024 17:59:22 +0200 Subject: [PATCH] Add a basic neural interface. --- CMakeLists.txt | 1 + include/cx.h | 4 +++ include/neural.h | 24 ++++++++++++++++ src/neural.c | 71 ++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 100 insertions(+) create mode 100644 include/neural.h create mode 100644 src/neural.c diff --git a/CMakeLists.txt b/CMakeLists.txt index 431d598..3362a59 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -36,6 +36,7 @@ add_executable( src/tensor.c src/model.c src/shader.c + src/neural.c ) target_link_libraries( diff --git a/include/cx.h b/include/cx.h index 2d9898a..96579c0 100644 --- a/include/cx.h +++ b/include/cx.h @@ -21,11 +21,15 @@ #include #include #include +#include // Declare functions int cx_glinit(GLFWwindow **); int cx_glrun(GLFWwindow *); +int cx_nninit(Neural_Network **); +int cx_nnrun(Neural_Network *); + #endif diff --git a/include/neural.h b/include/neural.h new file mode 100644 index 0000000..877d2ca --- /dev/null +++ b/include/neural.h @@ -0,0 +1,24 @@ +#ifndef NEURAL_H +#define NEURAL_H + +typedef struct _neuron { + float value; + float threshold; + float **in_values; + float *weights; + ssize_t in_values_size; +} Neuron; + +typedef struct _neural_network { + Neuron *n; + ssize_t layer_size; // Neurons Per Layer + ssize_t layers; +} Neural_Network; + +Neural_Network *neural_new(size_t, size_t); +void neural_randomize(Neural_Network *); +float *neural_process(Neural_Network *, float *); + +#endif + + diff --git a/src/neural.c b/src/neural.c new file mode 100644 index 0000000..16b741c --- /dev/null +++ b/src/neural.c @@ -0,0 +1,71 @@ +#include +#include + +Neural_Network * +neural_new(size_t layer_size, size_t layers) { + Neural_Network *self = malloc(sizeof(Neural_Network)); + Neuron *n = NULL; + + self->layer_size = layer_size; + self->layers = layers; + self->n = calloc(layer_size*layers, sizeof(Neuron)); + + for (int j = 0; j < layers; j++) { + n = &(self->n[j*layer_size]); + for (int i = 0; i < layers; i++) { + n->value = 0; + n->threshold = 0; + if (j) { + n->in_values = calloc(layer_size, sizeof(float *)); + n->weights = calloc(layer_size, sizeof(float)); + n->in_values_size = layer_size; + for (int k = 0; k < layer_size; k++) { + n->in_values[k] = &(self->n[(j-1)*layer_size + k].value); + n->weights[k] = 0.5; + } + } + else { + n->in_values = NULL; + n->weights = NULL; + } + } + } + + return self; + +} + +void +neural_randomize(Neural_Network *self) { + // Does not randomize, just sets 0.5, but it doesn't matter for now. + for (int i = 0; i < self->layers; i++) { + Neuron *n = &(self->n[i*self->layer_size]); + for (int j = 0; j < self->layer_size; j++) { + n[j].threshold = 0.5; + } + } +} +float * +neural_process(Neural_Network *self, float *input) { + float *retval = NULL; + + for (int i = 0; i < self->layer_size; i++) { + self->n[i].value = input[i]; + } + for (int i = 1; i < self->layers; i++) { + float dot_prod = 0; + for (int j = 0; j < self->layer_size; j++) { + // MATH GOES BRRRRRRRR + dot_prod += *(self->n[i*self->layer_size + j].in_values)[j] * + self->n[i*self->layer_size + j].weights[j]; + } + } + + retval = malloc(self->layer_size * sizeof(float)); + for (int i = 0; i < self->layer_size; i++) { + retval[i] = self->n[self->layer_size*(self->layers-1)].value; + } + + return retval; +} +