Refactor neural layers

This commit is contained in:
Marcel Plch 2024-10-12 11:17:52 +02:00
parent 6a5a7a3b95
commit 109c94a865
Signed by: dormouse
GPG key ID: 2CA77596BC4BDFFE
3 changed files with 89 additions and 64 deletions

View file

@ -3,19 +3,22 @@
typedef struct _neuron { typedef struct _neuron {
float value; float value;
float threshold; float *weights; // Biases of the neuron towards the next layer,
float **in_values; // NULL if output layer
float *weights;
ssize_t in_values_size;
} Neuron; } Neuron;
typedef struct _neural_layer {
Neuron *neurons;
size_t layer_size; // Neurons Per Layer
size_t layer_size_next; // Neurons in next layer, 0 if output layer,
} Neural_Layer;
typedef struct _neural_network { typedef struct _neural_network {
Neuron *n; Neural_Layer **layers;
ssize_t layer_size; // Neurons Per Layer ssize_t layer_count;
ssize_t layers;
} Neural_Network; } Neural_Network;
Neural_Network *neural_new(size_t, size_t); Neural_Network *neural_new(size_t, size_t, size_t);
void neural_randomize(Neural_Network *); void neural_randomize(Neural_Network *);
float *neural_process(Neural_Network *, float *); float *neural_process(Neural_Network *, float *);

View file

@ -135,6 +135,7 @@ cx_glrun(GLFWwindow *window) {
mr = modelRegistry_new(); mr = modelRegistry_new();
// Fill the model registry with mesh models // Fill the model registry with mesh models
for (int j = 0; j < 8; j++) {
for (int i = 0; i < 64; i++) { for (int i = 0; i < 64; i++) {
// Load model to render from file // Load model to render from file
//Model *model = model_load("../3d_assets/triangle.obj"); //Model *model = model_load("../3d_assets/triangle.obj");
@ -146,7 +147,7 @@ cx_glrun(GLFWwindow *window) {
translation_matrix[3] = (((GLfloat)-1*16/9)*.90) translation_matrix[3] = (((GLfloat)-1*16/9)*.90)
+ ((GLfloat)1/32 * i * (((GLfloat)16/9))*.90); + ((GLfloat)1/32 * i * (((GLfloat)16/9))*.90);
translation_matrix[7] = .90 - ((GLfloat)1/8 * i * .90); translation_matrix[7] = .90 + ((GLfloat)1/8 * j *.90);
model->transformations[0] = translation_matrix; model->transformations[0] = translation_matrix;
model->transformations[1] = aspectRatio_matrix; model->transformations[1] = aspectRatio_matrix;
@ -155,6 +156,7 @@ cx_glrun(GLFWwindow *window) {
modelRegistry_register(mr, model); modelRegistry_register(mr, model);
} }
}
// Remainder from cursor experiments, might be useful later // Remainder from cursor experiments, might be useful later
@ -181,7 +183,7 @@ cx_glrun(GLFWwindow *window) {
int int
cx_nninit(Neural_Network **nn) { cx_nninit(Neural_Network **nn) {
// Allocate a Neural Network // Allocate a Neural Network
*nn = neural_new(64, 1); *nn = neural_new(64, 4, 8);
if(!*nn) { if(!*nn) {
fprintf(stderr, "Failed to initialize Neural Network.\n"); fprintf(stderr, "Failed to initialize Neural Network.\n");
return -1; return -1;

View file

@ -1,34 +1,47 @@
#include <cx.h> #include <cx.h>
#include <neural.h>
Neural_Network * static Neural_Layer *
neural_new(size_t layer_size, size_t layers) { nl_new(size_t layer_size, size_t layer_size_next) {
Neural_Network *self = malloc(sizeof(Neural_Network)); Neural_Layer *self;
Neuron *n = NULL; self = malloc(sizeof(Neural_Layer));
self->neurons = calloc(layer_size, sizeof(Neuron));
for (int i = 0; i < layer_size; i++) {
self->neurons[i].weights = calloc(layer_size_next, sizeof(float));
}
self->layer_size = layer_size; self->layer_size = layer_size;
self->layers = layers; self->layer_size_next = layer_size_next;
self->n = calloc(layer_size*layers, sizeof(Neuron)); return self;
}
for (int j = 0; j < layers; j++) { static void
n = &(self->n[j*layer_size]); nl_free(Neural_Layer *self) {
for (int i = 0; i < layers; i++) { free(self->neurons);
n->value = 0; free(self);
n->threshold = 0;
if (j) {
n->in_values = calloc(layer_size, sizeof(float *));
n->weights = calloc(layer_size, sizeof(float));
n->in_values_size = layer_size;
for (int k = 0; k < layer_size; k++) {
n->in_values[k] = &(self->n[(j-1)*layer_size + k].value);
n->weights[k] = 0.5;
}
}
else {
n->in_values = NULL;
n->weights = NULL;
} }
Neural_Network *
neural_new(size_t input_size, size_t output_size, size_t layer_count) {
Neural_Network *self = malloc(sizeof(Neural_Network));
if (!self) {
// Failed to allocate.
return NULL;
} }
// The difference between layer sizes, hidden layers step between the two
// sizes in linear fashion.
ssize_t layer_diff;
self->layers = malloc(layer_count * sizeof(Neural_Layer *));
layer_diff = (ssize_t) output_size - input_size;
// Calculate sizes of individual layers and allocate them.
for (int i = 0; i < layer_count; i++) {
self->layers[i] = nl_new(input_size
+ (layer_diff / ((ssize_t)layer_count-(i))),
input_size +
(layer_diff / ((ssize_t)layer_count-(i+1)))
? i < i-1 : 0);
} }
return self; return self;
@ -36,33 +49,40 @@ neural_new(size_t layer_size, size_t layers) {
void void
neural_randomize(Neural_Network *self) { neural_randomize(Neural_Network *self) {
// Does not randomize, just sets 0.5, but it doesn't matter for now. FILE *f;
for (int i = 0; i < self->layers; i++) { Neural_Layer *nl;
Neuron *n = &(self->n[self->layer_size*i]);
for (int j = 0; j < self->layer_size; j++) { f = fopen("/dev/urandom", "r");
n[j].threshold = 0.5;
for (int i = 0; i < self->layer_count; i++) {
nl = self->layers[i];
for (int j = 0; j < nl->layer_size; j++) {
fread(nl->neurons[j].weights, sizeof(float), nl->layer_size_next, f);
} }
} }
} }
float * float *
neural_process(Neural_Network *self, float *input) { neural_process(Neural_Network *self, float *input) {
float *retval = NULL; float *retval = NULL;
Neural_Layer *nl = self->layers[0];
for (int i = 0; i < self->layer_size; i++) { for (int i = 0; i < self->layers[0]->layer_size; i++) {
self->n[i].value = input[i]; nl->neurons[i].value = input[i];
} }
for (int i = 1; i < self->layers; i++) { for (int i = 0; i < self->layer_count; i++) {
nl = self->layers[i];
float dot_prod = 0; float dot_prod = 0;
for (int j = 0; j < self->layer_size; j++) { for (int j = 0; j < nl->layer_size; j++) {
// MATH GOES BRRRRRRRR // MATH GOES BRRRRRRRR
dot_prod += *(self->n[i*self->layer_size + j].in_values)[j] * dot_prod += nl->neurons[j].value
self->n[i*self->layer_size + j].weights[j]; * nl->neurons[j].weights[j];
} }
} }
retval = malloc(self->layer_size * sizeof(float)); retval = malloc(nl->layer_size * sizeof(float));
for (int i = 0; i < self->layer_size; i++) { for (int i = 0; i < nl->layer_size; i++) {
retval[i] = self->n[self->layer_size*(self->layers-1)].value; retval[i] = nl->neurons[i].value;
} }
return retval; return retval;