CX/src/neural.c

91 lines
2.4 KiB
C
Raw Normal View History

2024-07-06 15:59:22 +00:00
#include <cx.h>
2024-10-12 09:17:52 +00:00
static Neural_Layer *
nl_new(size_t layer_size, size_t layer_size_next) {
Neural_Layer *self;
self = malloc(sizeof(Neural_Layer));
self->neurons = calloc(layer_size, sizeof(Neuron));
for (int i = 0; i < layer_size; i++) {
self->neurons[i].weights = calloc(layer_size_next, sizeof(float));
}
self->layer_size = layer_size;
self->layer_size_next = layer_size_next;
return self;
}
static void
nl_free(Neural_Layer *self) {
free(self->neurons);
free(self);
}
2024-07-06 15:59:22 +00:00
Neural_Network *
2024-10-12 09:17:52 +00:00
neural_new(size_t input_size, size_t output_size, size_t layer_count) {
2024-07-06 15:59:22 +00:00
Neural_Network *self = malloc(sizeof(Neural_Network));
2024-10-12 09:17:52 +00:00
if (!self) {
// Failed to allocate.
return NULL;
}
// The difference between layer sizes, hidden layers step between the two
// sizes in linear fashion.
ssize_t layer_diff;
2024-07-06 15:59:22 +00:00
2024-10-12 09:17:52 +00:00
self->layers = malloc(layer_count * sizeof(Neural_Layer *));
layer_diff = (ssize_t) output_size - input_size;
// Calculate sizes of individual layers and allocate them.
for (int i = 0; i < layer_count; i++) {
self->layers[i] = nl_new(input_size
+ (layer_diff / ((ssize_t)layer_count-(i))),
input_size +
(layer_diff / ((ssize_t)layer_count-(i+1)))
? i < i-1 : 0);
2024-07-06 15:59:22 +00:00
}
return self;
}
void
neural_randomize(Neural_Network *self) {
2024-10-12 09:17:52 +00:00
FILE *f;
Neural_Layer *nl;
f = fopen("/dev/urandom", "r");
for (int i = 0; i < self->layer_count; i++) {
nl = self->layers[i];
for (int j = 0; j < nl->layer_size; j++) {
fread(nl->neurons[j].weights, sizeof(float), nl->layer_size_next, f);
2024-07-06 15:59:22 +00:00
}
}
}
2024-10-12 09:17:52 +00:00
2024-07-06 15:59:22 +00:00
float *
neural_process(Neural_Network *self, float *input) {
float *retval = NULL;
2024-10-12 09:17:52 +00:00
Neural_Layer *nl = self->layers[0];
2024-07-06 15:59:22 +00:00
2024-10-12 09:17:52 +00:00
for (int i = 0; i < self->layers[0]->layer_size; i++) {
nl->neurons[i].value = input[i];
2024-07-06 15:59:22 +00:00
}
2024-10-12 09:17:52 +00:00
for (int i = 0; i < self->layer_count; i++) {
nl = self->layers[i];
2024-07-06 15:59:22 +00:00
float dot_prod = 0;
2024-10-12 09:17:52 +00:00
for (int j = 0; j < nl->layer_size; j++) {
2024-07-06 15:59:22 +00:00
// MATH GOES BRRRRRRRR
2024-10-12 09:17:52 +00:00
dot_prod += nl->neurons[j].value
* nl->neurons[j].weights[j];
2024-07-06 15:59:22 +00:00
}
}
2024-10-12 09:17:52 +00:00
retval = malloc(nl->layer_size * sizeof(float));
for (int i = 0; i < nl->layer_size; i++) {
retval[i] = nl->neurons[i].value;
2024-07-06 15:59:22 +00:00
}
return retval;
}