Refactor tensors (matrices).

Tensors now have dynamic length and width.
This commit is contained in:
Marcel Plch 2024-10-23 15:27:18 +02:00
parent 1241bca52f
commit 9702382a16
Signed by: dormouse
GPG key ID: 2CA77596BC4BDFFE
5 changed files with 93 additions and 67 deletions

View file

@ -4,7 +4,7 @@
typedef struct _model {
GLfloat *object;
GLfloat *colors;
GLfloat **transformations;
Tensor **transformations;
size_t bufsize;
size_t transformation_count;
size_t transformation_size;

View file

@ -1,11 +1,19 @@
#ifndef MATRIX_H
#define MATRIX_H
#ifndef TENSOR_H
#define TENSOR_H
float *matrix_new(void);
typedef struct _tensor {
float *data;
size_t len;
size_t width;
} Tensor;
float *matrix_multip(float *, float *);
Tensor *tensor_new(size_t, size_t);
float *matrix_transform(float *, int, float *);
Tensor *tensor_fromVertexBuffer(float *, size_t);
Tensor *tensor_multip(Tensor *, Tensor *);
void tensor_free(Tensor *);
#endif

View file

@ -6,7 +6,7 @@ model_new(size_t size) {
self->object = calloc((size ? size : 1) *4 , sizeof(GLfloat));
self->colors = calloc((size ? size : 1) *3 , sizeof(GLfloat));
self->bufsize = size;
self->transformations = calloc(8 , sizeof(GLfloat*));
self->transformations = calloc(8 , sizeof(Tensor *));
self->transformation_size = 8;
self->transformation_count = 0;
return self;
@ -79,26 +79,33 @@ model_load(const char *path) {
GLfloat *
model_applyTransformations(Model *self) {
// Temporary storage of transformation results
GLfloat *temp_buffer[2] = {NULL};
Tensor *temp_buffer[2] = {NULL};
GLfloat *retval;
// BANANA, ROH-TAH-TEH
temp_buffer[1] = malloc(self->bufsize * 4 * sizeof(GLfloat));
memcpy(temp_buffer[1], self->object, self->bufsize * 4 * sizeof(GLfloat));
temp_buffer[1] = tensor_fromVertexBuffer(self->object, self->bufsize);
// No transformation, create a GLfloat buffer and return the object data.
if (!self->transformation_count) {
return temp_buffer[1];
retval = malloc(self->bufsize * 4 * sizeof(GLfloat));
memcpy(retval, self->object, self->bufsize * 4 * sizeof(GLfloat));
return retval;
}
int i = 0;
do {
temp_buffer[i%2] = matrix_transform(temp_buffer[(i+1)%2],
self->bufsize,
self->transformations[i]);
free(temp_buffer[(i+1)%2]);
temp_buffer[i%2] = tensor_multip(self->transformations[i],
temp_buffer[(i+1)%2]);
tensor_free(temp_buffer[(i+1)%2]);
} while (++i < self->transformation_count);
return temp_buffer[(i+1)%2];
retval = malloc(self->bufsize * 4 * sizeof(GLfloat));
for (int k = 0; k < self->bufsize; k++) {
for (int j = 0; j < 4; j++) {
retval[k*4+j] = temp_buffer[(i+1)%2]->data[j*temp_buffer[(i+1)%2]->width+k];
}
}
return retval;
}

View file

@ -79,9 +79,12 @@ neural_process(Neural_Network *self, float *input) {
nl = self->layers[i];
float dot_prod = 0;
for (int j = 0; j < nl->layer_size; j++) {
for (int k = 0; k < nl->layer_size_next; k++) {
// MATH GOES BRRRRRRRR
dot_prod += nl->neurons[j].value
* nl->neurons[j].synapses[j];
}
}
}
@ -116,14 +119,14 @@ neural_getMesh(ModelRegistry *mr, Neural_Network *nn) {
}
model = model_circle(0, (GLfloat)1/64);
GLfloat *translation_matrix = matrix_new();
GLfloat *aspectRatio_matrix = matrix_new();
aspectRatio_matrix[0] = (GLfloat)9/16;
Tensor *translation_matrix = tensor_new(4, 4);
Tensor *aspectRatio_matrix = tensor_new(4, 4);
aspectRatio_matrix->data[0] = (GLfloat)9/16;
translation_matrix[3] = (((GLfloat)-1*16/9)*.90)
translation_matrix->data[3] = (((GLfloat)-1*16/9)*.90)
+ ((GLfloat)1/(nl->layer_size-1)*2 * i * (((GLfloat)16/9))*.90);
translation_matrix[7] = .90 - ((GLfloat)1/(nn->layer_count)*2 * j *.90);
translation_matrix->data[7] = .90 - ((GLfloat)1/(nn->layer_count)*2 * j *.90);
model->transformations[0] = translation_matrix;
model->transformations[1] = aspectRatio_matrix;
@ -132,7 +135,9 @@ neural_getMesh(ModelRegistry *mr, Neural_Network *nn) {
modelRegistry_register(mr, model);
}
}
return 0;
}

View file

@ -1,62 +1,68 @@
#include "cx.h"
float *
matrix_new() {
float *mat;
Tensor *
tensor_new(size_t len, size_t width) {
Tensor *mat;
mat = calloc(16, sizeof(float));
mat = malloc(1 * sizeof(Tensor));
for (int i = 0; i < 4; i++) {
mat[i*4+i] = 1;
mat->data = calloc(width * len, sizeof(float));
mat->len = len;
mat->width = width;
for (int i = 0; i < len; i++) {
mat->data[i*width+(i % width)] = 1;
}
return mat;
}
float *
matrix_multip(float *mat1, float *mat2) {
float *result;
float dot_prod;
Tensor *
tensor_fromVertexBuffer(float *buffer, size_t bufsize) {
int mat_width;
Tensor *mat;
result = matrix_new();
mat_width = bufsize;
for (int i = 0; i < 4; i++) {
mat = tensor_new(4, mat_width);
for (int j = 0; j < 4; j++) {
dot_prod = 0;
for (int k = 0; k < 4; k++) {
dot_prod += mat1[i*4+k] * mat2[j+k*4];
}
result[j+i*4] = dot_prod;
}
}
return result;
}
float *
matrix_transform(float *vects, int vectcount,
float *mat) {
float dot_prod;
float *result;
result = calloc(vectcount*4, sizeof(float));
for (int k = 0; k < vectcount; k++) {
for (int i = 0; i < bufsize; i++) {
for (int j = 0; j < 4; j++) {
dot_prod = 0;
for (int i = 0; i < 4; i++) {
dot_prod += vects[k*4+i] * mat[i+j*4];
}
result[j+k*4] = dot_prod;
}
if (result[k*4+3] != 0.0f) {
float div = result[k*4+3];
for (int i = 0; i < 4; i++) {
result[k*4+i] /= div;
}
mat->data[j*mat_width+i] = buffer[i*4+j];
}
}
return mat;
}
Tensor *
tensor_multip(Tensor *mat2, Tensor *mat1) {
Tensor *result;
float dot_prod;
result = tensor_new(mat2->len, mat1->width);
for (int i = 0; i < mat1->width; i++) {
for (int j = 0; j < mat2->len; j++) {
dot_prod = 0;
for (int k = 0; k < mat1->len; k++) {
dot_prod += mat2->data[j*mat2->width+k] * mat1->data[i+(k*mat1->width)];
}
result->data[i+(j*mat1->width)] = dot_prod;
}
}
result->len = mat2->len;
result->width = mat1->width;
return result;
}
void
tensor_free(Tensor *self) {
if (self) {
free(self->data);
}
free(self);
}