Refactor tensors (matrices).
Tensors now have dynamic length and width.
This commit is contained in:
parent
1241bca52f
commit
9702382a16
5 changed files with 93 additions and 67 deletions
|
@ -4,7 +4,7 @@
|
||||||
typedef struct _model {
|
typedef struct _model {
|
||||||
GLfloat *object;
|
GLfloat *object;
|
||||||
GLfloat *colors;
|
GLfloat *colors;
|
||||||
GLfloat **transformations;
|
Tensor **transformations;
|
||||||
size_t bufsize;
|
size_t bufsize;
|
||||||
size_t transformation_count;
|
size_t transformation_count;
|
||||||
size_t transformation_size;
|
size_t transformation_size;
|
||||||
|
|
|
@ -1,11 +1,19 @@
|
||||||
#ifndef MATRIX_H
|
#ifndef TENSOR_H
|
||||||
#define MATRIX_H
|
#define TENSOR_H
|
||||||
|
|
||||||
float *matrix_new(void);
|
typedef struct _tensor {
|
||||||
|
float *data;
|
||||||
|
size_t len;
|
||||||
|
size_t width;
|
||||||
|
} Tensor;
|
||||||
|
|
||||||
float *matrix_multip(float *, float *);
|
Tensor *tensor_new(size_t, size_t);
|
||||||
|
|
||||||
float *matrix_transform(float *, int, float *);
|
Tensor *tensor_fromVertexBuffer(float *, size_t);
|
||||||
|
|
||||||
|
Tensor *tensor_multip(Tensor *, Tensor *);
|
||||||
|
|
||||||
|
void tensor_free(Tensor *);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
29
src/model.c
29
src/model.c
|
@ -6,7 +6,7 @@ model_new(size_t size) {
|
||||||
self->object = calloc((size ? size : 1) *4 , sizeof(GLfloat));
|
self->object = calloc((size ? size : 1) *4 , sizeof(GLfloat));
|
||||||
self->colors = calloc((size ? size : 1) *3 , sizeof(GLfloat));
|
self->colors = calloc((size ? size : 1) *3 , sizeof(GLfloat));
|
||||||
self->bufsize = size;
|
self->bufsize = size;
|
||||||
self->transformations = calloc(8 , sizeof(GLfloat*));
|
self->transformations = calloc(8 , sizeof(Tensor *));
|
||||||
self->transformation_size = 8;
|
self->transformation_size = 8;
|
||||||
self->transformation_count = 0;
|
self->transformation_count = 0;
|
||||||
return self;
|
return self;
|
||||||
|
@ -79,26 +79,33 @@ model_load(const char *path) {
|
||||||
GLfloat *
|
GLfloat *
|
||||||
model_applyTransformations(Model *self) {
|
model_applyTransformations(Model *self) {
|
||||||
// Temporary storage of transformation results
|
// Temporary storage of transformation results
|
||||||
GLfloat *temp_buffer[2] = {NULL};
|
Tensor *temp_buffer[2] = {NULL};
|
||||||
|
GLfloat *retval;
|
||||||
|
|
||||||
// BANANA, ROH-TAH-TEH
|
// BANANA, ROH-TAH-TEH
|
||||||
temp_buffer[1] = malloc(self->bufsize * 4 * sizeof(GLfloat));
|
temp_buffer[1] = tensor_fromVertexBuffer(self->object, self->bufsize);
|
||||||
memcpy(temp_buffer[1], self->object, self->bufsize * 4 * sizeof(GLfloat));
|
|
||||||
|
|
||||||
|
// No transformation, create a GLfloat buffer and return the object data.
|
||||||
if (!self->transformation_count) {
|
if (!self->transformation_count) {
|
||||||
return temp_buffer[1];
|
retval = malloc(self->bufsize * 4 * sizeof(GLfloat));
|
||||||
|
memcpy(retval, self->object, self->bufsize * 4 * sizeof(GLfloat));
|
||||||
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
int i = 0;
|
int i = 0;
|
||||||
do {
|
do {
|
||||||
temp_buffer[i%2] = matrix_transform(temp_buffer[(i+1)%2],
|
temp_buffer[i%2] = tensor_multip(self->transformations[i],
|
||||||
self->bufsize,
|
temp_buffer[(i+1)%2]);
|
||||||
self->transformations[i]);
|
tensor_free(temp_buffer[(i+1)%2]);
|
||||||
free(temp_buffer[(i+1)%2]);
|
|
||||||
} while (++i < self->transformation_count);
|
} while (++i < self->transformation_count);
|
||||||
|
|
||||||
return temp_buffer[(i+1)%2];
|
retval = malloc(self->bufsize * 4 * sizeof(GLfloat));
|
||||||
|
for (int k = 0; k < self->bufsize; k++) {
|
||||||
|
for (int j = 0; j < 4; j++) {
|
||||||
|
retval[k*4+j] = temp_buffer[(i+1)%2]->data[j*temp_buffer[(i+1)%2]->width+k];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return retval;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
15
src/neural.c
15
src/neural.c
|
@ -79,9 +79,12 @@ neural_process(Neural_Network *self, float *input) {
|
||||||
nl = self->layers[i];
|
nl = self->layers[i];
|
||||||
float dot_prod = 0;
|
float dot_prod = 0;
|
||||||
for (int j = 0; j < nl->layer_size; j++) {
|
for (int j = 0; j < nl->layer_size; j++) {
|
||||||
|
for (int k = 0; k < nl->layer_size_next; k++) {
|
||||||
|
|
||||||
// MATH GOES BRRRRRRRR
|
// MATH GOES BRRRRRRRR
|
||||||
dot_prod += nl->neurons[j].value
|
dot_prod += nl->neurons[j].value
|
||||||
* nl->neurons[j].synapses[j];
|
* nl->neurons[j].synapses[j];
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -116,14 +119,14 @@ neural_getMesh(ModelRegistry *mr, Neural_Network *nn) {
|
||||||
}
|
}
|
||||||
|
|
||||||
model = model_circle(0, (GLfloat)1/64);
|
model = model_circle(0, (GLfloat)1/64);
|
||||||
GLfloat *translation_matrix = matrix_new();
|
Tensor *translation_matrix = tensor_new(4, 4);
|
||||||
GLfloat *aspectRatio_matrix = matrix_new();
|
Tensor *aspectRatio_matrix = tensor_new(4, 4);
|
||||||
aspectRatio_matrix[0] = (GLfloat)9/16;
|
aspectRatio_matrix->data[0] = (GLfloat)9/16;
|
||||||
|
|
||||||
translation_matrix[3] = (((GLfloat)-1*16/9)*.90)
|
translation_matrix->data[3] = (((GLfloat)-1*16/9)*.90)
|
||||||
+ ((GLfloat)1/(nl->layer_size-1)*2 * i * (((GLfloat)16/9))*.90);
|
+ ((GLfloat)1/(nl->layer_size-1)*2 * i * (((GLfloat)16/9))*.90);
|
||||||
|
|
||||||
translation_matrix[7] = .90 - ((GLfloat)1/(nn->layer_count)*2 * j *.90);
|
translation_matrix->data[7] = .90 - ((GLfloat)1/(nn->layer_count)*2 * j *.90);
|
||||||
|
|
||||||
model->transformations[0] = translation_matrix;
|
model->transformations[0] = translation_matrix;
|
||||||
model->transformations[1] = aspectRatio_matrix;
|
model->transformations[1] = aspectRatio_matrix;
|
||||||
|
@ -132,7 +135,9 @@ neural_getMesh(ModelRegistry *mr, Neural_Network *nn) {
|
||||||
|
|
||||||
modelRegistry_register(mr, model);
|
modelRegistry_register(mr, model);
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
94
src/tensor.c
94
src/tensor.c
|
@ -1,62 +1,68 @@
|
||||||
#include "cx.h"
|
#include "cx.h"
|
||||||
|
|
||||||
float *
|
Tensor *
|
||||||
matrix_new() {
|
tensor_new(size_t len, size_t width) {
|
||||||
float *mat;
|
Tensor *mat;
|
||||||
|
|
||||||
mat = calloc(16, sizeof(float));
|
mat = malloc(1 * sizeof(Tensor));
|
||||||
|
|
||||||
for (int i = 0; i < 4; i++) {
|
mat->data = calloc(width * len, sizeof(float));
|
||||||
mat[i*4+i] = 1;
|
mat->len = len;
|
||||||
|
mat->width = width;
|
||||||
|
|
||||||
|
for (int i = 0; i < len; i++) {
|
||||||
|
mat->data[i*width+(i % width)] = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
return mat;
|
return mat;
|
||||||
}
|
}
|
||||||
|
|
||||||
float *
|
Tensor *
|
||||||
matrix_multip(float *mat1, float *mat2) {
|
tensor_fromVertexBuffer(float *buffer, size_t bufsize) {
|
||||||
float *result;
|
int mat_width;
|
||||||
float dot_prod;
|
Tensor *mat;
|
||||||
|
|
||||||
result = matrix_new();
|
mat_width = bufsize;
|
||||||
|
|
||||||
for (int i = 0; i < 4; i++) {
|
mat = tensor_new(4, mat_width);
|
||||||
|
|
||||||
for (int j = 0; j < 4; j++) {
|
for (int i = 0; i < bufsize; i++) {
|
||||||
dot_prod = 0;
|
|
||||||
for (int k = 0; k < 4; k++) {
|
|
||||||
dot_prod += mat1[i*4+k] * mat2[j+k*4];
|
|
||||||
}
|
|
||||||
result[j+i*4] = dot_prod;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
float *
|
|
||||||
matrix_transform(float *vects, int vectcount,
|
|
||||||
float *mat) {
|
|
||||||
float dot_prod;
|
|
||||||
float *result;
|
|
||||||
|
|
||||||
result = calloc(vectcount*4, sizeof(float));
|
|
||||||
|
|
||||||
for (int k = 0; k < vectcount; k++) {
|
|
||||||
for (int j = 0; j < 4; j++) {
|
for (int j = 0; j < 4; j++) {
|
||||||
dot_prod = 0;
|
mat->data[j*mat_width+i] = buffer[i*4+j];
|
||||||
for (int i = 0; i < 4; i++) {
|
|
||||||
dot_prod += vects[k*4+i] * mat[i+j*4];
|
|
||||||
}
|
|
||||||
result[j+k*4] = dot_prod;
|
|
||||||
}
|
|
||||||
if (result[k*4+3] != 0.0f) {
|
|
||||||
float div = result[k*4+3];
|
|
||||||
for (int i = 0; i < 4; i++) {
|
|
||||||
result[k*4+i] /= div;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return mat;
|
||||||
|
}
|
||||||
|
|
||||||
|
Tensor *
|
||||||
|
tensor_multip(Tensor *mat2, Tensor *mat1) {
|
||||||
|
Tensor *result;
|
||||||
|
float dot_prod;
|
||||||
|
|
||||||
|
result = tensor_new(mat2->len, mat1->width);
|
||||||
|
|
||||||
|
for (int i = 0; i < mat1->width; i++) {
|
||||||
|
|
||||||
|
for (int j = 0; j < mat2->len; j++) {
|
||||||
|
dot_prod = 0;
|
||||||
|
for (int k = 0; k < mat1->len; k++) {
|
||||||
|
dot_prod += mat2->data[j*mat2->width+k] * mat1->data[i+(k*mat1->width)];
|
||||||
|
}
|
||||||
|
result->data[i+(j*mat1->width)] = dot_prod;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
result->len = mat2->len;
|
||||||
|
result->width = mat1->width;
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
tensor_free(Tensor *self) {
|
||||||
|
if (self) {
|
||||||
|
free(self->data);
|
||||||
|
}
|
||||||
|
free(self);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue