Implement training data loading

This commit is contained in:
Marcel Plch 2024-11-20 01:00:18 +01:00
parent 9fe8afb68a
commit ef18b57d61
Signed by: dormouse
GPG key ID: 2CA77596BC4BDFFE
6 changed files with 34 additions and 12 deletions

View file

@ -29,11 +29,13 @@
// Declare common data structures.
typedef struct _cx_gl_ctx {
void (*free)(void *self);
uint8_t master_lock;
uint8_t *worker_locks;
CX_ThreadGroup **workers;
GLFWwindow *window;
ModelRegistry *mr;
GLuint *VertexArrayIDs;
void (*free)(void *self);
uint8_t master_lock;
size_t VertexArray_count;
size_t VertexArray_size;
GLuint *programIDs;
@ -42,11 +44,13 @@ typedef struct _cx_gl_ctx {
} CX_GL_CTX;
typedef struct _cx_nn_ctx {
void (*free)(void *self);
uint8_t master_lock;
uint8_t *worker_locks;
CX_ThreadGroup **workers;
Neural_Network *nn;
float *input_buffer;
float *output_buffer;
void (*free)(void *self);
uint8_t master_lock;
} CX_NN_CTX;
typedef struct _cx_ctx {

View file

@ -28,6 +28,7 @@ typedef struct _neural_data {
Neural_Network *neural_new(size_t, size_t, size_t);
void neural_free(Neural_Network *);
void neural_randomize(Neural_Network *);
float *neural_loadData(Neural_Network *, const char *);
float *neural_process(Neural_Network *, float *);
Neural_Data *neural_getData(Neural_Network *, size_t);
int neural_getMesh(Neural_Network *, ModelRegistry *);

View file

@ -7,7 +7,7 @@ typedef struct _tensor {
size_t width;
} Tensor;
Tensor *tensor_new(size_t, size_t);
Tensor *tensor_new(size_t, size_t, int);
Tensor *tensor_fromVertexBuffer(float *, size_t);

View file

@ -278,8 +278,20 @@ static void *
cx_nnthread(void *self) {
CX_Thread *self_t = self;
CX_NN_CTX *nn_ctx = self_t->ctx;
float *input, *output;
cx_nninit(&nn_ctx->nn);
input = neural_loadData(nn_ctx->nn, "../training_data/0");
for (int i = 0; i < 64; i++) {
nn_ctx->nn->layers[0]->neurons[i].value = input[i];
}
output = neural_process(nn_ctx->nn, input);
for (int i = 0; i < 4; i++) {
nn_ctx->nn->layers[7]->neurons[i].value = output[i];
}
return NULL;
}

View file

@ -121,6 +121,7 @@ neural_loadData(Neural_Network *self, const char *filename) {
return NULL;
break;
}
read_cursor++;
}
return retval;
}
@ -134,10 +135,10 @@ neural_process(Neural_Network *self, float *input) {
for (int i = 0; i < self->layers[0]->layer_size; i++) {
nl->neurons[i].value = input[i];
}
neural_vector = tensor_new(1, nl->layer_size);
neural_vector = tensor_new(1, nl->layer_size, 0);
for (int i = 0; i < self->layer_count; i++) {
nl = self->layers[i];
synapse_matrix = tensor_new(nl->layer_size_next, nl->layer_size);
synapse_matrix = tensor_new(nl->layer_size_next, nl->layer_size, 0);
for (int j = 0; j < nl->layer_size; j++) {
neural_vector->data[j] = nl->neurons[j].value;
for (int k = 0; k < nl->layer_size_next; k++) {
@ -246,8 +247,8 @@ neural_getMesh(Neural_Network *nn, ModelRegistry *mr) {
brightness = nl->neurons[i].value <= 1.0 ?
nl->neurons[i].value : 255;
model_colorXYZ(model, 0, brightness, 0);
Tensor *translation_matrix = tensor_new(4, 4);
Tensor *aspectRatio_matrix = tensor_new(4, 4);
Tensor *translation_matrix = tensor_new(4, 4, 1);
Tensor *aspectRatio_matrix = tensor_new(4, 4, 1);
aspectRatio_matrix->data[0] = (GLfloat)9/16;
translation_matrix->data[3] = (((GLfloat)-1*16/9)*.90)

View file

@ -1,7 +1,7 @@
#include "cx.h"
Tensor *
tensor_new(size_t len, size_t width) {
tensor_new(size_t len, size_t width, int is_identity) {
Tensor *mat;
mat = malloc(1 * sizeof(Tensor));
@ -10,6 +10,10 @@ tensor_new(size_t len, size_t width) {
mat->len = len;
mat->width = width;
if (!is_identity) {
return mat;
}
for (int i = 0; i < len; i++) {
mat->data[i*width+(i % width)] = 1;
}
@ -24,7 +28,7 @@ tensor_fromVertexBuffer(float *buffer, size_t bufsize) {
mat_width = bufsize;
mat = tensor_new(4, mat_width);
mat = tensor_new(4, mat_width, 0);
for (int i = 0; i < bufsize; i++) {
for (int j = 0; j < 4; j++) {
@ -44,7 +48,7 @@ tensor_multip(Tensor *mat2, Tensor *mat1) {
Tensor *result;
float dot_prod;
result = tensor_new(mat2->len, mat1->width);
result = tensor_new(mat2->len, mat1->width, 0);
for (int i = 0; i < mat1->width; i++) {