Compare commits

..

10 commits

Author SHA1 Message Date
264fcb407b
Implement XML export
This allows to save the neural network
once it has been trained.
2024-12-04 16:01:50 +01:00
ef18b57d61
Implement training data loading 2024-11-20 01:00:18 +01:00
9fe8afb68a
Now I'm kinda happy with the threads. 2024-11-14 12:19:53 +01:00
e47a6d26cd
Document current issues. 2024-11-11 19:02:00 +01:00
4169714b24
Readability tweaks 2024-11-11 18:43:38 +01:00
c2b9dfdd29
Memory management tweaks
There are no memory leaks,
yet, I discover, with my steady course,
thru my small valgrind peeks,
that nvidia are a bunch of stupid a-holes.
2024-11-11 17:42:57 +01:00
68d3d4b692
Stop eating my RAM 2024-11-10 23:20:25 +01:00
0ea88cb6bc
Implement threading 2024-11-10 21:28:17 +01:00
3da9e7df5a
Minor tweaks
Error handling in cx_init().
More warnings with -Wpedantic.
2024-11-10 17:40:30 +01:00
afc9aec314
Clean up trailing whitespace 2024-11-10 12:59:35 +01:00
14 changed files with 528 additions and 155 deletions

View file

@ -1,5 +1,5 @@
# CMake entry point
cmake_minimum_required (VERSION 3.30.5)
cmake_minimum_required(VERSION 3.31.0)
project(CX C)
cmake_policy(SET CMP0072 NEW)
@ -20,7 +20,7 @@ set(ALL_LIBS
pthread
)
set(CMAKE_C_FLAGS "-O0 -ggdb -Wall")
set(CMAKE_C_FLAGS "-O0 -ggdb -Wall -std=gnu99 -Wpedantic")
add_definitions(
-DTW_STATIC
@ -34,6 +34,7 @@ add_executable(
cx
src/main.c
src/cx.c
src/cx_thread.c
src/tensor.c
src/model.c
src/shader.c

19
doc/issues.md Normal file
View file

@ -0,0 +1,19 @@
# Issues
## Error handling
Some errors are being handled, some aren't, some are being handled
partially and some errors (and/or their handling) might break the program
before a proper return. some return values of library functions are being
ignored altogether.
## Context handling
Context handling in it's current form relies on all
context types to have a free() function stored
on a specific place in the data structure.
This will most likely result in a segfault anytime
a new structure is being used that is not properly
aligned.

View file

@ -1,6 +1,8 @@
#ifndef CX_H
#define CX_H
#define __STDC_WANT_IEC_60559_BFP_EXT__
// Include standard headers
#include <stdio.h>
#include <stdlib.h>
@ -10,6 +12,8 @@
#include <unistd.h>
#include <stdint.h>
#include <pthread.h>
#include <inttypes.h>
#include <string.h>
// Include GLEW
#include <GL/glew.h>
@ -19,43 +23,51 @@
#include <GLFW/glfw3.h>
// Include project headers
#include <cx_thread.h>
#include <tensor.h>
#include <model.h>
#include <tensor.h>
#include <shader.h>
#include <neural.h>
#include <shader.h>
// Declare common data structures.
typedef struct _cx_thrd {
pthread_t thread;
void *ctx; // Arbitrary thread context
} CX_Thread;
typedef struct _cx_thrgr {
CX_Thread *group_manager;
CX_Thread **workers;
size_t worker_count;
size_t worker_size;
} CX_ThreadGroup;
typedef struct _cx_ctx {
typedef struct _cx_gl_ctx {
void (*free)(void *self);
uint8_t master_lock;
uint8_t *worker_locks;
CX_ThreadGroup **workers;
GLFWwindow *window;
Neural_Network *nn;
CX_ThreadGroup **threads;
ModelRegistry *mr;
GLuint *VertexArrayIDs;
size_t VertexArray_count;
size_t VertexArray_size;
GLuint *programIDs;
size_t ProgramID_count;
size_t ProgramID_size;
} CX_GL_CTX;
typedef struct _cx_nn_ctx {
void (*free)(void *self);
uint8_t master_lock;
uint8_t *worker_locks;
CX_ThreadGroup **workers;
Neural_Network *nn;
float *input_buffer;
float *output_buffer;
} CX_NN_CTX;
typedef struct _cx_ctx {
CX_ThreadGroup **threads;
CX_GL_CTX *gl_ctx;
CX_NN_CTX *nn_ctx;
} CX_Context;
// Declare functions
CX_Context *cx_context_new(void);
int cx_glinit(GLFWwindow **);
int cx_glinit(CX_GL_CTX **);
int cx_nninit(Neural_Network **);
int cx_init(CX_Context **);

20
include/cx_thread.h Normal file
View file

@ -0,0 +1,20 @@
#ifndef CX_THREAD_H
#define CX_THREAD_H
typedef struct _cx_thrd {
pthread_t thread;
void *ctx; // Arbitrary thread context
} CX_Thread;
typedef struct _cx_thrgr {
CX_Thread *group_manager;
CX_Thread **workers;
size_t worker_count;
size_t worker_size;
} CX_ThreadGroup;
CX_ThreadGroup *cx_threadGroup_new(void *(*)(void *), void *);
void cx_threadGroup_free(CX_ThreadGroup *);
#endif

View file

@ -22,7 +22,7 @@ int modelRegistry_register(ModelRegistry *, Model *);
void modelRegistry_free(ModelRegistry *);
GLfloat * model_applyTransformations(Model *);
void model_colorFromPosition(Model *);
void model_colorXYZ(Model *, int R, int G, int B);
void model_colorXYZ(Model *, float R, float G, float B);
void model_colorRed(Model *);
void model_colorGreen(Model *);
void model_colorBlue(Model *);

View file

@ -26,10 +26,13 @@ typedef struct _neural_data {
} Neural_Data;
Neural_Network *neural_new(size_t, size_t, size_t);
void neural_free(Neural_Network *);
void neural_randomize(Neural_Network *);
float *neural_loadData(Neural_Network *, const char *);
float *neural_process(Neural_Network *, float *);
Neural_Data *neural_getData(Neural_Network *, size_t);
int neural_getMesh(Neural_Network *, ModelRegistry *);
char *neural_getXML(Neural_Network *);
#endif

View file

@ -7,7 +7,7 @@ typedef struct _tensor {
size_t width;
} Tensor;
Tensor *tensor_new(size_t, size_t);
Tensor *tensor_new(size_t, size_t, int);
Tensor *tensor_fromVertexBuffer(float *, size_t);

264
src/cx.c
View file

@ -1,41 +1,5 @@
#include <cx.h>
static CX_Thread *
cx_thread_new(void *(*target)(void *),
void *ctx) {
CX_Thread *self;
int err;
self = malloc(sizeof(CX_Thread));
if (!self) {
goto err;
}
err = pthread_create(&self->thread, NULL, target, ctx);
if (err) {
goto err;
}
self->ctx = ctx;
err:
free(self);
return NULL;
}
static CX_ThreadGroup *
cx_threadGroup_new(void *(*target)(void *),
void *ctx) {
CX_ThreadGroup *self;
self = malloc(sizeof(CX_ThreadGroup));
self->group_manager = cx_thread_new(target, ctx);
self->workers = malloc(8 * sizeof(CX_Thread *));
self->worker_count = 0;
self->worker_size = 8;
return self;
}
static void
cx_glBindBuffer(GLfloat *render_buffer, GLuint buffer_address,
GLuint gl_index, GLint member_size, GLsizeiptr bufsize) {
@ -116,13 +80,58 @@ cx_loadShaders(GLuint *VertexArrayID, GLuint *programID) {
return 0;
}
void
gl_ctx_free(void *self) {
CX_GL_CTX *gl_ctx;
gl_ctx = self;
if (gl_ctx) {
free(gl_ctx->VertexArrayIDs);
free(gl_ctx->programIDs);
modelRegistry_free(gl_ctx->mr);
}
free(gl_ctx);
}
void
nn_ctx_free(void *self) {
CX_NN_CTX *nn_ctx;
nn_ctx = self;
if (nn_ctx) {
free(nn_ctx->input_buffer);
free(nn_ctx->output_buffer);
neural_free(nn_ctx->nn);
}
free(nn_ctx);
}
int
cx_glinit(GLFWwindow **window) {
cx_glinit(CX_GL_CTX **gl_ctx) {
// Initialize OpenGL context
(*gl_ctx)->VertexArrayIDs = calloc(1, sizeof(GLuint));
if (!(*gl_ctx)->VertexArrayIDs) {
goto err;
}
(*gl_ctx)->VertexArray_count = 0;
(*gl_ctx)->VertexArray_size = 1;
(*gl_ctx)->programIDs = calloc(1, sizeof(GLuint));
if (!(*gl_ctx)->programIDs) {
goto err;
}
(*gl_ctx)->ProgramID_count = 0;
(*gl_ctx)->ProgramID_size = 1;
(*gl_ctx)->free = &gl_ctx_free;
// Initialise GLFW
printf("Initializing OpenGL.\n");
if(!glfwInit()) {
fprintf(stderr, "Failed to initialize GLFW\n");
return -1;
goto err;
}
glfwWindowHint(GLFW_SAMPLES, 4);
@ -134,29 +143,33 @@ cx_glinit(GLFWwindow **window) {
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
// Open a window and create its OpenGL context
*window = glfwCreateWindow(1280, 720, "C-X", NULL, NULL);
if (*window == NULL) {
(*gl_ctx)->window = glfwCreateWindow(1280, 720, "C-X", NULL, NULL);
if ((*gl_ctx)->window == NULL) {
fprintf(stderr, "Failed to open GLFW window.\n");
glfwTerminate();
return -1;
goto err;
}
printf("Window created.\n");
glfwMakeContextCurrent(*window);
glfwMakeContextCurrent((*gl_ctx)->window);
// Initialize GLEW
if (glewInit() != GLEW_OK) {
fprintf(stderr, "Failed to initialize GLEW\n");
glfwTerminate();
return -1;
goto err;
}
// Ensure we can capture the escape key being pressed below
glfwSetInputMode(*window, GLFW_STICKY_KEYS, GL_TRUE);
glfwSetInputMode((*gl_ctx)->window, GLFW_STICKY_KEYS, GL_TRUE);
// Dark grey background
glClearColor(0.15f, 0.15f, 0.15f, 0.0f);
return 0;
err:
return -1;
}
int
@ -175,79 +188,138 @@ cx_nninit(Neural_Network **nn) {
return 0;
}
static void
master_thread(void *ctx) {
}
int
cx_init(CX_Context **cx_ctx) {
printf("Initializing CX.");
CX_GL_CTX *gl_ctx;
CX_NN_CTX *nn_ctx;
printf("Initializing CX.\n");
nn_ctx = calloc(1, sizeof(CX_NN_CTX));
nn_ctx->free = &nn_ctx_free;
*cx_ctx = calloc(1, sizeof(CX_Context));
(*cx_ctx)->VertexArrayIDs = calloc(1, sizeof(GLuint));
(*cx_ctx)->VertexArray_count = 0;
(*cx_ctx)->VertexArray_size = 1;
(*cx_ctx)->programIDs = calloc(1, sizeof(GLuint));
(*cx_ctx)->ProgramID_count = 0;
(*cx_ctx)->ProgramID_size = 1;
(*cx_ctx)->threads = calloc(1, sizeof(CX_ThreadGroup));
gl_ctx = calloc(1, sizeof(CX_GL_CTX));
if (cx_glinit(&(*cx_ctx)->window)) {
return -1;
}
(*cx_ctx)->gl_ctx = gl_ctx;
(*cx_ctx)->nn_ctx = nn_ctx;
if (cx_nninit(&(*cx_ctx)->nn)) {
return -1;
(*cx_ctx)->threads = calloc(1, sizeof(CX_ThreadGroup *));
if (!(*cx_ctx)->threads) {
goto err;
}
return 0;
err:
if ((*cx_ctx)->gl_ctx) {
free((*cx_ctx)->gl_ctx->VertexArrayIDs);
free((*cx_ctx)->gl_ctx->programIDs);
free((*cx_ctx)->threads);
}
free(*cx_ctx);
return -1;
}
static int
cx_glrun() {
return 0;
}
static int
cx_nnrun(Neural_Network *nn) {
// Establish a neural interface.
float *input_buffer = malloc(64*sizeof(float));
float *output_buffer;
output_buffer = neural_process(nn, input_buffer);
return 0;
}
int
cx_run(CX_Context *cx_ctx) {
ModelRegistry *mr;
if (cx_loadShaders(cx_ctx->VertexArrayIDs, cx_ctx->programIDs)) {
return -1;
}
// Establish a model registry
mr = modelRegistry_new();
// Fill the model registry with mesh models
neural_getMesh(cx_ctx->nn, mr);
cx_glrun(CX_GL_CTX *ctx) {
// Remainder from cursor experiments, might be useful later
double xpos, ypos;
glfwGetCursorPos(cx_ctx->window, &xpos, &ypos);
glfwGetCursorPos(ctx->window, &xpos, &ypos);
do {
cx_glrender(cx_ctx->window, cx_ctx->programIDs[0], mr);
// Skip render step if context is locked.
if (!ctx->master_lock) {
cx_glrender(ctx->window, ctx->programIDs[0], ctx->mr);
}
usleep(1000000/60);
// Check if the ESC key was pressed or the window was closed
} while(glfwGetKey(cx_ctx->window, GLFW_KEY_ESCAPE) != GLFW_PRESS
&& !glfwWindowShouldClose(cx_ctx->window));
} while(glfwGetKey(ctx->window, GLFW_KEY_ESCAPE) != GLFW_PRESS
&& !glfwWindowShouldClose(ctx->window));
// Close OpenGL window and terminate GLFW
glfwTerminate();
modelRegistry_free(mr);
return 0;
}
static int
cx_nnrun(CX_Thread *self) {
// Establish a neural interface.
float *output_buffer;
CX_NN_CTX *ctx = self->ctx;
output_buffer = neural_process(ctx->nn, ctx->input_buffer);
ctx->output_buffer = output_buffer;
return 0;
}
static void *
cx_glthread(void *self) {
CX_Thread *self_t = self;
CX_GL_CTX *gl_ctx = self_t->ctx;
cx_glinit(&gl_ctx);
if (cx_loadShaders(gl_ctx->VertexArrayIDs, gl_ctx->programIDs)) {
return NULL;
}
cx_glrun(gl_ctx);
return NULL;
}
static void *
cx_nnthread(void *self) {
CX_Thread *self_t = self;
CX_NN_CTX *nn_ctx = self_t->ctx;
float *input, *output;
char *export;
cx_nninit(&nn_ctx->nn);
input = neural_loadData(nn_ctx->nn, "../training_data/0");
output = neural_process(nn_ctx->nn, input);
export = neural_getXML(nn_ctx->nn);
return export;
}
int
cx_run(CX_Context *ctx) {
CX_ThreadGroup *tg[2];
void *neural_xml;
// Establish a model registry
ctx->gl_ctx->mr = modelRegistry_new();
ctx->gl_ctx->master_lock = 1;
tg[0] = cx_threadGroup_new(&cx_glthread, ctx->gl_ctx);
tg[1] = cx_threadGroup_new(&cx_nnthread, ctx->nn_ctx);
pthread_join(tg[1]->group_manager->thread, &neural_xml);
ctx->gl_ctx->master_lock = 0;
neural_getMesh(ctx->nn_ctx->nn, ctx->gl_ctx->mr);
pthread_join(tg[0]->group_manager->thread, NULL);
cx_threadGroup_free(tg[0]);
cx_threadGroup_free(tg[1]);
free(ctx->threads);
free(ctx);
free(neural_xml);
return 0;
}

64
src/cx_thread.c Normal file
View file

@ -0,0 +1,64 @@
#include <cx.h>
CX_Thread *
cx_thread_new(void *(*target)(void *),
void *ctx) {
CX_Thread *self;
int err;
self = malloc(sizeof(CX_Thread));
if (!self) {
goto err;
}
self->ctx = ctx;
err = pthread_create(&self->thread, NULL, target, self);
if (err) {
goto err;
}
return self;
err:
free(self);
return NULL;
}
void
cx_thread_free(CX_Thread *self) {
if (self) {
/* TODO */
/* This is naive in its current form and will shatter
* sooner or later.
* Fix the context structures so that this call
* is guaranteed not to touch invalid memory.
*/
((CX_GL_CTX *)self->ctx)->free(self->ctx);
}
free(self);
}
CX_ThreadGroup *
cx_threadGroup_new(void *(*target)(void *),
void *ctx) {
CX_ThreadGroup *self;
self = malloc(sizeof(CX_ThreadGroup));
self->workers = malloc(8 * sizeof(CX_Thread *));
self->worker_count = 0;
self->worker_size = 8;
self->group_manager = cx_thread_new(target, ctx);
return self;
}
void
cx_threadGroup_free(CX_ThreadGroup *self) {
if (self) {
cx_thread_free(self->group_manager);
free(self->workers);
}
free(self);
}

View file

@ -4,10 +4,8 @@ int
main(void) {
// CX context (Window, neural network, threads.)
CX_Context *cx_ctx;
int retval;
if (cx_init(&cx_ctx)) {
return -1;
}

View file

@ -92,6 +92,7 @@ model_applyTransformations(Model *self) {
if (!self->transformation_count) {
retval = malloc(self->bufsize * 4 * sizeof(GLfloat));
memcpy(retval, self->object, self->bufsize * 4 * sizeof(GLfloat));
tensor_free(temp_buffer[1]);
return retval;
}
@ -109,6 +110,7 @@ model_applyTransformations(Model *self) {
->data[j*temp_buffer[(i+1)%2]->width+k];
}
}
tensor_free(temp_buffer[(i+1)%2]);
return retval;
}
@ -127,7 +129,7 @@ model_colorFromPosition(Model *self) {
}
}
void model_colorXYZ(Model *self, int R, int G, int B) {
void model_colorXYZ(Model *self, float R, float G, float B) {
for (int i = 0; i < self->bufsize; i++) {
for (int j = 0; j < 4; j++) {
switch(j) {

View file

@ -17,7 +17,12 @@ nl_new(size_t layer_size, size_t layer_size_next) {
static void
nl_free(Neural_Layer *self) {
if (self) {
for (int i = 0; i < self->layer_size; i++) {
free(self->neurons[i].synapses);
}
free(self->neurons);
}
free(self);
}
@ -39,8 +44,8 @@ neural_new(size_t input_size, size_t output_size, size_t layer_count) {
// Calculate sizes of individual layers and allocate them.
for (int i = 0; i < layer_count; i++) {
self->layers[i] = nl_new(input_size
+ (layer_diff * i / ((ssize_t)layer_count-1)),
self->layers[i] = nl_new(input_size + (layer_diff * i
/ ((ssize_t)layer_count-1)),
i < (layer_count-1) ?
(input_size + (layer_diff * (i+1)
@ -51,13 +56,23 @@ neural_new(size_t input_size, size_t output_size, size_t layer_count) {
return self;
}
void
neural_free(Neural_Network *self) {
if (self) {
for (int i = 0; i < self->layer_count; i++) {
nl_free(self->layers[i]);
}
free(self->layers);
}
free(self);
}
void
neural_randomize(Neural_Network *self) {
FILE *f;
Neural_Layer *nl;
uint64_t *rand_vals;
f = fopen("/dev/urandom", "r");
for (int i = 0; i < self->layer_count; i++) {
@ -67,7 +82,7 @@ neural_randomize(Neural_Network *self) {
fread(rand_vals, sizeof(uint64_t),
nl->layer_size_next, f);
for (int k = 0; k < nl->layer_size_next; k++) {
nl->neurons[j].synapses[k] = (float)rand_vals[k] / UINT64_MAX;
nl->neurons[j].synapses[k] = (float)rand_vals[k] / UINT64_MAX / nl->layer_size;
}
free(rand_vals);
}
@ -105,6 +120,7 @@ neural_loadData(Neural_Network *self, const char *filename) {
return NULL;
break;
}
read_cursor++;
}
return retval;
}
@ -118,10 +134,10 @@ neural_process(Neural_Network *self, float *input) {
for (int i = 0; i < self->layers[0]->layer_size; i++) {
nl->neurons[i].value = input[i];
}
neural_vector = tensor_new(1, nl->layer_size);
for (int i = 0; i < self->layer_count; i++) {
neural_vector = tensor_new(nl->layer_size, 1, 0);
nl = self->layers[i];
synapse_matrix = tensor_new(nl->layer_size_next, nl->layer_size);
synapse_matrix = tensor_new(nl->layer_size_next, nl->layer_size, 0);
for (int j = 0; j < nl->layer_size; j++) {
neural_vector->data[j] = nl->neurons[j].value;
for (int k = 0; k < nl->layer_size_next; k++) {
@ -130,9 +146,16 @@ neural_process(Neural_Network *self, float *input) {
}
temp_buffer = tensor_multip(synapse_matrix, neural_vector);
neural_vector = temp_buffer;
if (nl->layer_size_next) {
Neural_Layer *nl_next = self->layers[i+1];
for (int j = 0; j < nl_next->layer_size; j++) {
nl_next->neurons[j].value = neural_vector->data[j];
}
}
tensor_free(neural_vector);
tensor_free(synapse_matrix);
neural_vector = temp_buffer;
}
retval = malloc(nl->layer_size * sizeof(float));
@ -143,48 +166,81 @@ neural_process(Neural_Network *self, float *input) {
return retval;
}
// These two will be merged into one once I have
// enough patience to create more dynamic objects.
static void *
neural_backprop_up(Neural_Network *self, size_t neuron, size_t layer) {
return NULL;
neural_backpropagation(Neural_Network *self, int neuron, int layer, float ratio) {
Neural_Layer *nl;
Neural_Data *nd;
float *ratios;
int *neurons;
float *synapses;
for (int i = layer-1; i >= 0; i--) {
nl = self->layers[i];
for (int j = 0; j < nl->layer_size; j++) {
synapses = nl->neurons[j].synapses;
for (int k = 0; k < nl->layer_size_next; i++) {
synapses[k] = 0;
}
}
}
static void *
neural_backprop_down(Neural_Network *self, size_t neuron, size_t layer) {
return NULL;
}
int
neural_train(Neural_Network *self,
const char *input_path,
const float *expected_result) {
Neural_Data *input_data; // What the neural network received
Neural_Data *result_data; // What the neural network computed
float backprop_ratio;
input_data = neural_getData(self, 0);
result_data = neural_getData(self, self->layer_count-1);
for (int i = self->layer_count-1; i >= 0; i--) {
Neural_Layer *nl = self->layers[i];
result_data = neural_getData(self, i);
for (int j = nl->layer_size-1; j >= 0; j--) {
backprop_ratio = nl->neurons[i].value / expected_result[i];
neural_backpropagation(self, j, i, backprop_ratio);
}
}
return 0;
}
Neural_Data *
neural_data_new(int layer_size, int layer_size_next) {
Neural_Data *self;
self = calloc(1, sizeof(Neural_Data));
self->neural_vector = malloc(layer_size * sizeof(float));
self->vect_len = layer_size;
if (layer_size_next) {
self->synapse_matrix = malloc(layer_size * layer_size_next
* sizeof(float));
self->mat_len = layer_size_next;
}
return self;
}
Neural_Data *
neural_getData(Neural_Network *self, size_t layer) {
Neural_Layer *nl;
Neural_Data *retval;
retval = malloc(1 * sizeof(Neural_Data));
nl = self->layers[layer];
retval->neural_vector = malloc(nl->layer_size * sizeof(float));
retval = neural_data_new(nl->layer_size, nl->layer_size_next);
retval->vect_len = nl->layer_size;
if (!nl->layer_size_next) {
retval->synapse_matrix = NULL;
retval->mat_len = 0;
}
else {
retval->synapse_matrix = malloc(nl->layer_size * nl->layer_size_next
* sizeof(float));
for (int i = 0; i < nl->layer_size; i++) {
for (int j = 0; j < nl->layer_size_next; j++) {
retval->synapse_matrix[i*j+i] = nl->neurons[i].synapses[j];
@ -205,7 +261,7 @@ neural_getMesh(Neural_Network *nn, ModelRegistry *mr) {
for (int j = 0; j < nn->layer_count; j++) {
Neural_Layer *nl = nn->layers[j];
for (int i = 0; i < nl->layer_size; i++) {
unsigned int brightness;
float brightness;
for (int k = 0; k < nl->layer_size_next; k++) {
model = model_line((-.90)
+ ((GLfloat)2 * i * .90/(nl->layer_size-1)),
@ -219,7 +275,7 @@ neural_getMesh(Neural_Network *nn, ModelRegistry *mr) {
.001 // girth
);
brightness = nl->neurons[i].synapses[k] * 255;
brightness = nl->neurons[i].synapses[k];
if (brightness) {
model_colorXYZ(model, brightness, 0, 0);
}
@ -227,16 +283,19 @@ neural_getMesh(Neural_Network *nn, ModelRegistry *mr) {
}
model = model_circle(0, (GLfloat)1/64);
brightness = nl->neurons[i].value <= 1.0 ? nl->neurons[i].value : 255;
brightness = nl->neurons[i].value <= 1.0 ?
nl->neurons[i].value : 1.0;
model_colorXYZ(model, 0, brightness, 0);
Tensor *translation_matrix = tensor_new(4, 4);
Tensor *aspectRatio_matrix = tensor_new(4, 4);
Tensor *translation_matrix = tensor_new(4, 4, 1);
Tensor *aspectRatio_matrix = tensor_new(4, 4, 1);
aspectRatio_matrix->data[0] = (GLfloat)9/16;
translation_matrix->data[3] = (((GLfloat)-1*16/9)*.90)
+ ((GLfloat)1/(nl->layer_size-1)*2 * i * (((GLfloat)16/9))*.90);
+ ((GLfloat)1/(nl->layer_size-1)
* 2 * i * (((GLfloat)16/9))*.90);
translation_matrix->data[7] = .90 - ((GLfloat)1/(nn->layer_count)*2 * j *.90);
translation_matrix->data[7] = .90 - ((GLfloat)1/(nn->layer_count)
* 2 * j *.90);
model->transformations[0] = translation_matrix;
model->transformations[1] = aspectRatio_matrix;
@ -250,3 +309,122 @@ neural_getMesh(Neural_Network *nn, ModelRegistry *mr) {
return 0;
}
static char*
indented_line(char *str, const char *line, int *indent) {
for (int m = 0; m < *indent; m++)
str = strcat(str, " ");
str = strcat(str, line);
return str;
}
static char*
indented_tag(char *str, const char *tag, int *indent) {
if (tag[1] == '/') {
*indent -= 4;
}
indented_line(str, tag, indent);
if (tag[1] != '/') {
*indent += 4;
}
return str;
}
// TODO
/* This XML implementation has potential bugs and has not
* been checked very thoroughly, fix, please.
*/
char *
neural_getXML(Neural_Network *nn) {
char *retval;
const char *to_write;
int volume = 0;
int indent = 0;
retval = malloc(0xff * sizeof(char));
to_write = "<?xml version=\"1.0\"?>\n\n";
retval = strcpy(retval, to_write);
to_write = "<Network>\n";
retval = indented_tag(retval, to_write, &indent);
for (int i = 0; i < nn->layer_count; i++) {
Neural_Layer *nl;
Neural_Data *nd;
char *line_prep;
nl = nn->layers[i];
nd = neural_getData(nn, i);
retval = realloc(retval, strlen(retval)
+ (nl->layer_size * 32 * nl->layer_size_next)// Matrix
+ (nl->layer_size * 32) // Vector
+ 0x3ff * nl->layer_size // Expected tag garbage.
+ indent); // Space waster
to_write = "<Layer>\n";
retval = indented_tag(retval, to_write, &indent);
to_write = "<Synapse_Matrix>\n";
retval = indented_tag(retval, to_write, &indent);
for (int j = 0; j < nd->mat_len; j++) {
char number_buffer[32];
line_prep = malloc((nl->layer_size * 32 * nl->layer_size_next) // Matrix
+ (nl->layer_size * 32));
*line_prep = '\0';
line_prep = strcat(line_prep, "[ ");
for (int k = 0; k < nd->vect_len; k++) {
strfromf(number_buffer, 32, "%.2f ", nd->synapse_matrix[k+j*nd->mat_len]);
line_prep = strcat(line_prep, number_buffer);
if (k < nd->vect_len - 1) {
line_prep = strcat(line_prep, ", ");
}
}
line_prep = strcat(line_prep, " ]\n");
retval = indented_line(retval, line_prep, &indent);
free(line_prep);
}
to_write = "</Synapse_Matrix>\n";
retval = indented_tag(retval, to_write, &indent);
to_write = "<Neural_Vector>\n";
retval = indented_tag(retval, to_write, &indent);
char number_buffer[32];
line_prep = malloc((nl->layer_size * 32 * nl->layer_size_next) // Matrix
+ (nl->layer_size * 32));
*line_prep = '\0';
line_prep = strcat(line_prep, "[ ");
for (int k = 0; k < nd->vect_len; k++) {
strfromf(number_buffer, 32, "%.4f", nd->neural_vector[k]);
line_prep = strcat(line_prep, number_buffer);
if (k < nd->vect_len - 1) {
line_prep = strcat(line_prep, ", ");
}
}
line_prep = strcat(line_prep, " ]\n");
retval = indented_line(retval, line_prep, &indent);
free(line_prep);
to_write = "</Neural_Vector>\n";
retval = indented_tag(retval, to_write, &indent);
to_write = "</Layer>\n";
retval = indented_tag(retval, to_write, &indent);
}
to_write = "</Network>\n";
retval = indented_tag(retval, to_write, &indent);
return retval;
}

View file

@ -1,7 +1,7 @@
#include "cx.h"
Tensor *
tensor_new(size_t len, size_t width) {
tensor_new(size_t len, size_t width, int is_identity) {
Tensor *mat;
mat = malloc(1 * sizeof(Tensor));
@ -10,6 +10,10 @@ tensor_new(size_t len, size_t width) {
mat->len = len;
mat->width = width;
if (!is_identity) {
return mat;
}
for (int i = 0; i < len; i++) {
mat->data[i*width+(i % width)] = 1;
}
@ -24,7 +28,7 @@ tensor_fromVertexBuffer(float *buffer, size_t bufsize) {
mat_width = bufsize;
mat = tensor_new(4, mat_width);
mat = tensor_new(4, mat_width, 0);
for (int i = 0; i < bufsize; i++) {
for (int j = 0; j < 4; j++) {
@ -44,7 +48,7 @@ tensor_multip(Tensor *mat2, Tensor *mat1) {
Tensor *result;
float dot_prod;
result = tensor_new(mat2->len, mat1->width);
result = tensor_new(mat2->len, mat1->width, 0);
for (int i = 0; i < mat1->width; i++) {