Refactor neural layers

This commit is contained in:
Marcel Plch 2024-10-12 11:17:52 +02:00
parent 6a5a7a3b95
commit 109c94a865
Signed by: dormouse
GPG key ID: 2CA77596BC4BDFFE
3 changed files with 89 additions and 64 deletions

View file

@ -3,19 +3,22 @@
typedef struct _neuron {
float value;
float threshold;
float **in_values;
float *weights;
ssize_t in_values_size;
float *weights; // Biases of the neuron towards the next layer,
// NULL if output layer
} Neuron;
typedef struct _neural_layer {
Neuron *neurons;
size_t layer_size; // Neurons Per Layer
size_t layer_size_next; // Neurons in next layer, 0 if output layer,
} Neural_Layer;
typedef struct _neural_network {
Neuron *n;
ssize_t layer_size; // Neurons Per Layer
ssize_t layers;
Neural_Layer **layers;
ssize_t layer_count;
} Neural_Network;
Neural_Network *neural_new(size_t, size_t);
Neural_Network *neural_new(size_t, size_t, size_t);
void neural_randomize(Neural_Network *);
float *neural_process(Neural_Network *, float *);

View file

@ -135,6 +135,7 @@ cx_glrun(GLFWwindow *window) {
mr = modelRegistry_new();
// Fill the model registry with mesh models
for (int j = 0; j < 8; j++) {
for (int i = 0; i < 64; i++) {
// Load model to render from file
//Model *model = model_load("../3d_assets/triangle.obj");
@ -146,7 +147,7 @@ cx_glrun(GLFWwindow *window) {
translation_matrix[3] = (((GLfloat)-1*16/9)*.90)
+ ((GLfloat)1/32 * i * (((GLfloat)16/9))*.90);
translation_matrix[7] = .90 - ((GLfloat)1/8 * i * .90);
translation_matrix[7] = .90 + ((GLfloat)1/8 * j *.90);
model->transformations[0] = translation_matrix;
model->transformations[1] = aspectRatio_matrix;
@ -155,6 +156,7 @@ cx_glrun(GLFWwindow *window) {
modelRegistry_register(mr, model);
}
}
// Remainder from cursor experiments, might be useful later
@ -181,7 +183,7 @@ cx_glrun(GLFWwindow *window) {
int
cx_nninit(Neural_Network **nn) {
// Allocate a Neural Network
*nn = neural_new(64, 1);
*nn = neural_new(64, 4, 8);
if(!*nn) {
fprintf(stderr, "Failed to initialize Neural Network.\n");
return -1;

View file

@ -1,34 +1,47 @@
#include <cx.h>
#include <neural.h>
Neural_Network *
neural_new(size_t layer_size, size_t layers) {
Neural_Network *self = malloc(sizeof(Neural_Network));
Neuron *n = NULL;
static Neural_Layer *
nl_new(size_t layer_size, size_t layer_size_next) {
Neural_Layer *self;
self = malloc(sizeof(Neural_Layer));
self->neurons = calloc(layer_size, sizeof(Neuron));
for (int i = 0; i < layer_size; i++) {
self->neurons[i].weights = calloc(layer_size_next, sizeof(float));
}
self->layer_size = layer_size;
self->layers = layers;
self->n = calloc(layer_size*layers, sizeof(Neuron));
self->layer_size_next = layer_size_next;
return self;
}
for (int j = 0; j < layers; j++) {
n = &(self->n[j*layer_size]);
for (int i = 0; i < layers; i++) {
n->value = 0;
n->threshold = 0;
if (j) {
n->in_values = calloc(layer_size, sizeof(float *));
n->weights = calloc(layer_size, sizeof(float));
n->in_values_size = layer_size;
for (int k = 0; k < layer_size; k++) {
n->in_values[k] = &(self->n[(j-1)*layer_size + k].value);
n->weights[k] = 0.5;
}
}
else {
n->in_values = NULL;
n->weights = NULL;
}
static void
nl_free(Neural_Layer *self) {
free(self->neurons);
free(self);
}
Neural_Network *
neural_new(size_t input_size, size_t output_size, size_t layer_count) {
Neural_Network *self = malloc(sizeof(Neural_Network));
if (!self) {
// Failed to allocate.
return NULL;
}
// The difference between layer sizes, hidden layers step between the two
// sizes in linear fashion.
ssize_t layer_diff;
self->layers = malloc(layer_count * sizeof(Neural_Layer *));
layer_diff = (ssize_t) output_size - input_size;
// Calculate sizes of individual layers and allocate them.
for (int i = 0; i < layer_count; i++) {
self->layers[i] = nl_new(input_size
+ (layer_diff / ((ssize_t)layer_count-(i))),
input_size +
(layer_diff / ((ssize_t)layer_count-(i+1)))
? i < i-1 : 0);
}
return self;
@ -36,33 +49,40 @@ neural_new(size_t layer_size, size_t layers) {
void
neural_randomize(Neural_Network *self) {
// Does not randomize, just sets 0.5, but it doesn't matter for now.
for (int i = 0; i < self->layers; i++) {
Neuron *n = &(self->n[self->layer_size*i]);
for (int j = 0; j < self->layer_size; j++) {
n[j].threshold = 0.5;
FILE *f;
Neural_Layer *nl;
f = fopen("/dev/urandom", "r");
for (int i = 0; i < self->layer_count; i++) {
nl = self->layers[i];
for (int j = 0; j < nl->layer_size; j++) {
fread(nl->neurons[j].weights, sizeof(float), nl->layer_size_next, f);
}
}
}
float *
neural_process(Neural_Network *self, float *input) {
float *retval = NULL;
Neural_Layer *nl = self->layers[0];
for (int i = 0; i < self->layer_size; i++) {
self->n[i].value = input[i];
for (int i = 0; i < self->layers[0]->layer_size; i++) {
nl->neurons[i].value = input[i];
}
for (int i = 1; i < self->layers; i++) {
for (int i = 0; i < self->layer_count; i++) {
nl = self->layers[i];
float dot_prod = 0;
for (int j = 0; j < self->layer_size; j++) {
for (int j = 0; j < nl->layer_size; j++) {
// MATH GOES BRRRRRRRR
dot_prod += *(self->n[i*self->layer_size + j].in_values)[j] *
self->n[i*self->layer_size + j].weights[j];
dot_prod += nl->neurons[j].value
* nl->neurons[j].weights[j];
}
}
retval = malloc(self->layer_size * sizeof(float));
for (int i = 0; i < self->layer_size; i++) {
retval[i] = self->n[self->layer_size*(self->layers-1)].value;
retval = malloc(nl->layer_size * sizeof(float));
for (int i = 0; i < nl->layer_size; i++) {
retval[i] = nl->neurons[i].value;
}
return retval;