Compare commits

..

No commits in common. "264fcb407ba1430a0eb112b13d708c6f5fda98a9" and "4b2db87c9e57cc5cfc4f1abf2c51b02ffd283cdc" have entirely different histories.

14 changed files with 155 additions and 528 deletions

View file

@ -1,5 +1,5 @@
# CMake entry point # CMake entry point
cmake_minimum_required(VERSION 3.31.0) cmake_minimum_required (VERSION 3.30.5)
project(CX C) project(CX C)
cmake_policy(SET CMP0072 NEW) cmake_policy(SET CMP0072 NEW)
@ -20,7 +20,7 @@ set(ALL_LIBS
pthread pthread
) )
set(CMAKE_C_FLAGS "-O0 -ggdb -Wall -std=gnu99 -Wpedantic") set(CMAKE_C_FLAGS "-O0 -ggdb -Wall")
add_definitions( add_definitions(
-DTW_STATIC -DTW_STATIC
@ -34,7 +34,6 @@ add_executable(
cx cx
src/main.c src/main.c
src/cx.c src/cx.c
src/cx_thread.c
src/tensor.c src/tensor.c
src/model.c src/model.c
src/shader.c src/shader.c

View file

@ -1,19 +0,0 @@
# Issues
## Error handling
Some errors are being handled, some aren't, some are being handled
partially and some errors (and/or their handling) might break the program
before a proper return. some return values of library functions are being
ignored altogether.
## Context handling
Context handling in it's current form relies on all
context types to have a free() function stored
on a specific place in the data structure.
This will most likely result in a segfault anytime
a new structure is being used that is not properly
aligned.

View file

@ -1,8 +1,6 @@
#ifndef CX_H #ifndef CX_H
#define CX_H #define CX_H
#define __STDC_WANT_IEC_60559_BFP_EXT__
// Include standard headers // Include standard headers
#include <stdio.h> #include <stdio.h>
#include <stdlib.h> #include <stdlib.h>
@ -12,8 +10,6 @@
#include <unistd.h> #include <unistd.h>
#include <stdint.h> #include <stdint.h>
#include <pthread.h> #include <pthread.h>
#include <inttypes.h>
#include <string.h>
// Include GLEW // Include GLEW
#include <GL/glew.h> #include <GL/glew.h>
@ -23,51 +19,43 @@
#include <GLFW/glfw3.h> #include <GLFW/glfw3.h>
// Include project headers // Include project headers
#include <cx_thread.h>
#include <tensor.h> #include <tensor.h>
#include <model.h> #include <model.h>
#include <tensor.h> #include <tensor.h>
#include <neural.h>
#include <shader.h> #include <shader.h>
#include <neural.h>
// Declare common data structures. // Declare common data structures.
typedef struct _cx_gl_ctx { typedef struct _cx_thrd {
void (*free)(void *self); pthread_t thread;
uint8_t master_lock; void *ctx; // Arbitrary thread context
uint8_t *worker_locks; } CX_Thread;
CX_ThreadGroup **workers;
typedef struct _cx_thrgr {
CX_Thread *group_manager;
CX_Thread **workers;
size_t worker_count;
size_t worker_size;
} CX_ThreadGroup;
typedef struct _cx_ctx {
GLFWwindow *window; GLFWwindow *window;
ModelRegistry *mr; Neural_Network *nn;
CX_ThreadGroup **threads;
GLuint *VertexArrayIDs; GLuint *VertexArrayIDs;
size_t VertexArray_count; size_t VertexArray_count;
size_t VertexArray_size; size_t VertexArray_size;
GLuint *programIDs; GLuint *programIDs;
size_t ProgramID_count; size_t ProgramID_count;
size_t ProgramID_size; size_t ProgramID_size;
} CX_GL_CTX;
typedef struct _cx_nn_ctx {
void (*free)(void *self);
uint8_t master_lock;
uint8_t *worker_locks;
CX_ThreadGroup **workers;
Neural_Network *nn;
float *input_buffer;
float *output_buffer;
} CX_NN_CTX;
typedef struct _cx_ctx {
CX_ThreadGroup **threads;
CX_GL_CTX *gl_ctx;
CX_NN_CTX *nn_ctx;
} CX_Context; } CX_Context;
// Declare functions // Declare functions
CX_Context *cx_context_new(void); CX_Context *cx_context_new(void);
int cx_glinit(CX_GL_CTX **); int cx_glinit(GLFWwindow **);
int cx_nninit(Neural_Network **); int cx_nninit(Neural_Network **);
int cx_init(CX_Context **); int cx_init(CX_Context **);

View file

@ -1,20 +0,0 @@
#ifndef CX_THREAD_H
#define CX_THREAD_H
typedef struct _cx_thrd {
pthread_t thread;
void *ctx; // Arbitrary thread context
} CX_Thread;
typedef struct _cx_thrgr {
CX_Thread *group_manager;
CX_Thread **workers;
size_t worker_count;
size_t worker_size;
} CX_ThreadGroup;
CX_ThreadGroup *cx_threadGroup_new(void *(*)(void *), void *);
void cx_threadGroup_free(CX_ThreadGroup *);
#endif

View file

@ -22,7 +22,7 @@ int modelRegistry_register(ModelRegistry *, Model *);
void modelRegistry_free(ModelRegistry *); void modelRegistry_free(ModelRegistry *);
GLfloat * model_applyTransformations(Model *); GLfloat * model_applyTransformations(Model *);
void model_colorFromPosition(Model *); void model_colorFromPosition(Model *);
void model_colorXYZ(Model *, float R, float G, float B); void model_colorXYZ(Model *, int R, int G, int B);
void model_colorRed(Model *); void model_colorRed(Model *);
void model_colorGreen(Model *); void model_colorGreen(Model *);
void model_colorBlue(Model *); void model_colorBlue(Model *);

View file

@ -26,13 +26,10 @@ typedef struct _neural_data {
} Neural_Data; } Neural_Data;
Neural_Network *neural_new(size_t, size_t, size_t); Neural_Network *neural_new(size_t, size_t, size_t);
void neural_free(Neural_Network *);
void neural_randomize(Neural_Network *); void neural_randomize(Neural_Network *);
float *neural_loadData(Neural_Network *, const char *);
float *neural_process(Neural_Network *, float *); float *neural_process(Neural_Network *, float *);
Neural_Data *neural_getData(Neural_Network *, size_t); Neural_Data *neural_getData(Neural_Network *, size_t);
int neural_getMesh(Neural_Network *, ModelRegistry *); int neural_getMesh(Neural_Network *, ModelRegistry *);
char *neural_getXML(Neural_Network *);
#endif #endif

View file

@ -7,7 +7,7 @@ typedef struct _tensor {
size_t width; size_t width;
} Tensor; } Tensor;
Tensor *tensor_new(size_t, size_t, int); Tensor *tensor_new(size_t, size_t);
Tensor *tensor_fromVertexBuffer(float *, size_t); Tensor *tensor_fromVertexBuffer(float *, size_t);

View file

@ -3,7 +3,7 @@
in vec3 colorF; in vec3 colorF;
out vec3 color; out vec3 color;
void main() { void main() {
color = colorF; color = colorF;
} }

264
src/cx.c
View file

@ -1,5 +1,41 @@
#include <cx.h> #include <cx.h>
static CX_Thread *
cx_thread_new(void *(*target)(void *),
void *ctx) {
CX_Thread *self;
int err;
self = malloc(sizeof(CX_Thread));
if (!self) {
goto err;
}
err = pthread_create(&self->thread, NULL, target, ctx);
if (err) {
goto err;
}
self->ctx = ctx;
err:
free(self);
return NULL;
}
static CX_ThreadGroup *
cx_threadGroup_new(void *(*target)(void *),
void *ctx) {
CX_ThreadGroup *self;
self = malloc(sizeof(CX_ThreadGroup));
self->group_manager = cx_thread_new(target, ctx);
self->workers = malloc(8 * sizeof(CX_Thread *));
self->worker_count = 0;
self->worker_size = 8;
return self;
}
static void static void
cx_glBindBuffer(GLfloat *render_buffer, GLuint buffer_address, cx_glBindBuffer(GLfloat *render_buffer, GLuint buffer_address,
GLuint gl_index, GLint member_size, GLsizeiptr bufsize) { GLuint gl_index, GLint member_size, GLsizeiptr bufsize) {
@ -80,58 +116,13 @@ cx_loadShaders(GLuint *VertexArrayID, GLuint *programID) {
return 0; return 0;
} }
void
gl_ctx_free(void *self) {
CX_GL_CTX *gl_ctx;
gl_ctx = self;
if (gl_ctx) {
free(gl_ctx->VertexArrayIDs);
free(gl_ctx->programIDs);
modelRegistry_free(gl_ctx->mr);
}
free(gl_ctx);
}
void
nn_ctx_free(void *self) {
CX_NN_CTX *nn_ctx;
nn_ctx = self;
if (nn_ctx) {
free(nn_ctx->input_buffer);
free(nn_ctx->output_buffer);
neural_free(nn_ctx->nn);
}
free(nn_ctx);
}
int int
cx_glinit(CX_GL_CTX **gl_ctx) { cx_glinit(GLFWwindow **window) {
// Initialize OpenGL context
(*gl_ctx)->VertexArrayIDs = calloc(1, sizeof(GLuint));
if (!(*gl_ctx)->VertexArrayIDs) {
goto err;
}
(*gl_ctx)->VertexArray_count = 0;
(*gl_ctx)->VertexArray_size = 1;
(*gl_ctx)->programIDs = calloc(1, sizeof(GLuint));
if (!(*gl_ctx)->programIDs) {
goto err;
}
(*gl_ctx)->ProgramID_count = 0;
(*gl_ctx)->ProgramID_size = 1;
(*gl_ctx)->free = &gl_ctx_free;
// Initialise GLFW // Initialise GLFW
printf("Initializing OpenGL.\n"); printf("Initializing OpenGL.\n");
if(!glfwInit()) { if(!glfwInit()) {
fprintf(stderr, "Failed to initialize GLFW\n"); fprintf(stderr, "Failed to initialize GLFW\n");
goto err; return -1;
} }
glfwWindowHint(GLFW_SAMPLES, 4); glfwWindowHint(GLFW_SAMPLES, 4);
@ -143,33 +134,29 @@ cx_glinit(CX_GL_CTX **gl_ctx) {
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE); glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
// Open a window and create its OpenGL context // Open a window and create its OpenGL context
(*gl_ctx)->window = glfwCreateWindow(1280, 720, "C-X", NULL, NULL); *window = glfwCreateWindow(1280, 720, "C-X", NULL, NULL);
if ((*gl_ctx)->window == NULL) { if (*window == NULL) {
fprintf(stderr, "Failed to open GLFW window.\n"); fprintf(stderr, "Failed to open GLFW window.\n");
glfwTerminate(); glfwTerminate();
goto err; return -1;
} }
printf("Window created.\n");
glfwMakeContextCurrent((*gl_ctx)->window); glfwMakeContextCurrent(*window);
// Initialize GLEW // Initialize GLEW
if (glewInit() != GLEW_OK) { if (glewInit() != GLEW_OK) {
fprintf(stderr, "Failed to initialize GLEW\n"); fprintf(stderr, "Failed to initialize GLEW\n");
glfwTerminate(); glfwTerminate();
goto err; return -1;
} }
// Ensure we can capture the escape key being pressed below // Ensure we can capture the escape key being pressed below
glfwSetInputMode((*gl_ctx)->window, GLFW_STICKY_KEYS, GL_TRUE); glfwSetInputMode(*window, GLFW_STICKY_KEYS, GL_TRUE);
// Dark grey background // Dark grey background
glClearColor(0.15f, 0.15f, 0.15f, 0.0f); glClearColor(0.15f, 0.15f, 0.15f, 0.0f);
return 0; return 0;
err:
return -1;
} }
int int
@ -188,138 +175,79 @@ cx_nninit(Neural_Network **nn) {
return 0; return 0;
} }
static void
master_thread(void *ctx) {
}
int int
cx_init(CX_Context **cx_ctx) { cx_init(CX_Context **cx_ctx) {
CX_GL_CTX *gl_ctx; printf("Initializing CX.");
CX_NN_CTX *nn_ctx;
printf("Initializing CX.\n");
nn_ctx = calloc(1, sizeof(CX_NN_CTX));
nn_ctx->free = &nn_ctx_free;
*cx_ctx = calloc(1, sizeof(CX_Context)); *cx_ctx = calloc(1, sizeof(CX_Context));
gl_ctx = calloc(1, sizeof(CX_GL_CTX)); (*cx_ctx)->VertexArrayIDs = calloc(1, sizeof(GLuint));
(*cx_ctx)->VertexArray_count = 0;
(*cx_ctx)->VertexArray_size = 1;
(*cx_ctx)->programIDs = calloc(1, sizeof(GLuint));
(*cx_ctx)->ProgramID_count = 0;
(*cx_ctx)->ProgramID_size = 1;
(*cx_ctx)->threads = calloc(1, sizeof(CX_ThreadGroup));
(*cx_ctx)->gl_ctx = gl_ctx; if (cx_glinit(&(*cx_ctx)->window)) {
(*cx_ctx)->nn_ctx = nn_ctx; return -1;
}
(*cx_ctx)->threads = calloc(1, sizeof(CX_ThreadGroup *)); if (cx_nninit(&(*cx_ctx)->nn)) {
if (!(*cx_ctx)->threads) { return -1;
goto err;
} }
return 0; return 0;
err:
if ((*cx_ctx)->gl_ctx) {
free((*cx_ctx)->gl_ctx->VertexArrayIDs);
free((*cx_ctx)->gl_ctx->programIDs);
free((*cx_ctx)->threads);
}
free(*cx_ctx);
return -1;
} }
static int static int
cx_glrun(CX_GL_CTX *ctx) { cx_glrun() {
return 0;
}
static int
cx_nnrun(Neural_Network *nn) {
// Establish a neural interface.
float *input_buffer = malloc(64*sizeof(float));
float *output_buffer;
output_buffer = neural_process(nn, input_buffer);
return 0;
}
int
cx_run(CX_Context *cx_ctx) {
ModelRegistry *mr;
if (cx_loadShaders(cx_ctx->VertexArrayIDs, cx_ctx->programIDs)) {
return -1;
}
// Establish a model registry
mr = modelRegistry_new();
// Fill the model registry with mesh models
neural_getMesh(cx_ctx->nn, mr);
// Remainder from cursor experiments, might be useful later // Remainder from cursor experiments, might be useful later
double xpos, ypos; double xpos, ypos;
glfwGetCursorPos(ctx->window, &xpos, &ypos); glfwGetCursorPos(cx_ctx->window, &xpos, &ypos);
do { do {
// Skip render step if context is locked. cx_glrender(cx_ctx->window, cx_ctx->programIDs[0], mr);
if (!ctx->master_lock) {
cx_glrender(ctx->window, ctx->programIDs[0], ctx->mr);
}
usleep(1000000/60); usleep(1000000/60);
// Check if the ESC key was pressed or the window was closed // Check if the ESC key was pressed or the window was closed
} while(glfwGetKey(ctx->window, GLFW_KEY_ESCAPE) != GLFW_PRESS } while(glfwGetKey(cx_ctx->window, GLFW_KEY_ESCAPE) != GLFW_PRESS
&& !glfwWindowShouldClose(ctx->window)); && !glfwWindowShouldClose(cx_ctx->window));
// Close OpenGL window and terminate GLFW // Close OpenGL window and terminate GLFW
glfwTerminate(); glfwTerminate();
modelRegistry_free(mr);
return 0;
}
static int
cx_nnrun(CX_Thread *self) {
// Establish a neural interface.
float *output_buffer;
CX_NN_CTX *ctx = self->ctx;
output_buffer = neural_process(ctx->nn, ctx->input_buffer);
ctx->output_buffer = output_buffer;
return 0;
}
static void *
cx_glthread(void *self) {
CX_Thread *self_t = self;
CX_GL_CTX *gl_ctx = self_t->ctx;
cx_glinit(&gl_ctx);
if (cx_loadShaders(gl_ctx->VertexArrayIDs, gl_ctx->programIDs)) {
return NULL;
}
cx_glrun(gl_ctx);
return NULL;
}
static void *
cx_nnthread(void *self) {
CX_Thread *self_t = self;
CX_NN_CTX *nn_ctx = self_t->ctx;
float *input, *output;
char *export;
cx_nninit(&nn_ctx->nn);
input = neural_loadData(nn_ctx->nn, "../training_data/0");
output = neural_process(nn_ctx->nn, input);
export = neural_getXML(nn_ctx->nn);
return export;
}
int
cx_run(CX_Context *ctx) {
CX_ThreadGroup *tg[2];
void *neural_xml;
// Establish a model registry
ctx->gl_ctx->mr = modelRegistry_new();
ctx->gl_ctx->master_lock = 1;
tg[0] = cx_threadGroup_new(&cx_glthread, ctx->gl_ctx);
tg[1] = cx_threadGroup_new(&cx_nnthread, ctx->nn_ctx);
pthread_join(tg[1]->group_manager->thread, &neural_xml);
ctx->gl_ctx->master_lock = 0;
neural_getMesh(ctx->nn_ctx->nn, ctx->gl_ctx->mr);
pthread_join(tg[0]->group_manager->thread, NULL);
cx_threadGroup_free(tg[0]);
cx_threadGroup_free(tg[1]);
free(ctx->threads);
free(ctx);
free(neural_xml);
return 0; return 0;
} }

View file

@ -1,64 +0,0 @@
#include <cx.h>
CX_Thread *
cx_thread_new(void *(*target)(void *),
void *ctx) {
CX_Thread *self;
int err;
self = malloc(sizeof(CX_Thread));
if (!self) {
goto err;
}
self->ctx = ctx;
err = pthread_create(&self->thread, NULL, target, self);
if (err) {
goto err;
}
return self;
err:
free(self);
return NULL;
}
void
cx_thread_free(CX_Thread *self) {
if (self) {
/* TODO */
/* This is naive in its current form and will shatter
* sooner or later.
* Fix the context structures so that this call
* is guaranteed not to touch invalid memory.
*/
((CX_GL_CTX *)self->ctx)->free(self->ctx);
}
free(self);
}
CX_ThreadGroup *
cx_threadGroup_new(void *(*target)(void *),
void *ctx) {
CX_ThreadGroup *self;
self = malloc(sizeof(CX_ThreadGroup));
self->workers = malloc(8 * sizeof(CX_Thread *));
self->worker_count = 0;
self->worker_size = 8;
self->group_manager = cx_thread_new(target, ctx);
return self;
}
void
cx_threadGroup_free(CX_ThreadGroup *self) {
if (self) {
cx_thread_free(self->group_manager);
free(self->workers);
}
free(self);
}

View file

@ -4,8 +4,10 @@ int
main(void) { main(void) {
// CX context (Window, neural network, threads.) // CX context (Window, neural network, threads.)
CX_Context *cx_ctx; CX_Context *cx_ctx;
int retval; int retval;
if (cx_init(&cx_ctx)) { if (cx_init(&cx_ctx)) {
return -1; return -1;
} }

View file

@ -92,7 +92,6 @@ model_applyTransformations(Model *self) {
if (!self->transformation_count) { if (!self->transformation_count) {
retval = malloc(self->bufsize * 4 * sizeof(GLfloat)); retval = malloc(self->bufsize * 4 * sizeof(GLfloat));
memcpy(retval, self->object, self->bufsize * 4 * sizeof(GLfloat)); memcpy(retval, self->object, self->bufsize * 4 * sizeof(GLfloat));
tensor_free(temp_buffer[1]);
return retval; return retval;
} }
@ -110,7 +109,6 @@ model_applyTransformations(Model *self) {
->data[j*temp_buffer[(i+1)%2]->width+k]; ->data[j*temp_buffer[(i+1)%2]->width+k];
} }
} }
tensor_free(temp_buffer[(i+1)%2]);
return retval; return retval;
} }
@ -129,7 +127,7 @@ model_colorFromPosition(Model *self) {
} }
} }
void model_colorXYZ(Model *self, float R, float G, float B) { void model_colorXYZ(Model *self, int R, int G, int B) {
for (int i = 0; i < self->bufsize; i++) { for (int i = 0; i < self->bufsize; i++) {
for (int j = 0; j < 4; j++) { for (int j = 0; j < 4; j++) {
switch(j) { switch(j) {

View file

@ -17,12 +17,7 @@ nl_new(size_t layer_size, size_t layer_size_next) {
static void static void
nl_free(Neural_Layer *self) { nl_free(Neural_Layer *self) {
if (self) { free(self->neurons);
for (int i = 0; i < self->layer_size; i++) {
free(self->neurons[i].synapses);
}
free(self->neurons);
}
free(self); free(self);
} }
@ -44,11 +39,11 @@ neural_new(size_t input_size, size_t output_size, size_t layer_count) {
// Calculate sizes of individual layers and allocate them. // Calculate sizes of individual layers and allocate them.
for (int i = 0; i < layer_count; i++) { for (int i = 0; i < layer_count; i++) {
self->layers[i] = nl_new(input_size + (layer_diff * i self->layers[i] = nl_new(input_size
/ ((ssize_t)layer_count-1)), + (layer_diff * i / ((ssize_t)layer_count-1)),
i < (layer_count-1) ? i < (layer_count-1) ?
(input_size + (layer_diff * (i+1) (input_size + (layer_diff * (i+1)
/ ((ssize_t)layer_count-1))) / ((ssize_t)layer_count-1)))
: 0); : 0);
} }
@ -56,23 +51,13 @@ neural_new(size_t input_size, size_t output_size, size_t layer_count) {
return self; return self;
} }
void
neural_free(Neural_Network *self) {
if (self) {
for (int i = 0; i < self->layer_count; i++) {
nl_free(self->layers[i]);
}
free(self->layers);
}
free(self);
}
void void
neural_randomize(Neural_Network *self) { neural_randomize(Neural_Network *self) {
FILE *f; FILE *f;
Neural_Layer *nl; Neural_Layer *nl;
uint64_t *rand_vals; uint64_t *rand_vals;
f = fopen("/dev/urandom", "r"); f = fopen("/dev/urandom", "r");
for (int i = 0; i < self->layer_count; i++) { for (int i = 0; i < self->layer_count; i++) {
@ -82,7 +67,7 @@ neural_randomize(Neural_Network *self) {
fread(rand_vals, sizeof(uint64_t), fread(rand_vals, sizeof(uint64_t),
nl->layer_size_next, f); nl->layer_size_next, f);
for (int k = 0; k < nl->layer_size_next; k++) { for (int k = 0; k < nl->layer_size_next; k++) {
nl->neurons[j].synapses[k] = (float)rand_vals[k] / UINT64_MAX / nl->layer_size; nl->neurons[j].synapses[k] = (float)rand_vals[k] / UINT64_MAX;
} }
free(rand_vals); free(rand_vals);
} }
@ -120,7 +105,6 @@ neural_loadData(Neural_Network *self, const char *filename) {
return NULL; return NULL;
break; break;
} }
read_cursor++;
} }
return retval; return retval;
} }
@ -134,10 +118,10 @@ neural_process(Neural_Network *self, float *input) {
for (int i = 0; i < self->layers[0]->layer_size; i++) { for (int i = 0; i < self->layers[0]->layer_size; i++) {
nl->neurons[i].value = input[i]; nl->neurons[i].value = input[i];
} }
neural_vector = tensor_new(1, nl->layer_size);
for (int i = 0; i < self->layer_count; i++) { for (int i = 0; i < self->layer_count; i++) {
neural_vector = tensor_new(nl->layer_size, 1, 0);
nl = self->layers[i]; nl = self->layers[i];
synapse_matrix = tensor_new(nl->layer_size_next, nl->layer_size, 0); synapse_matrix = tensor_new(nl->layer_size_next, nl->layer_size);
for (int j = 0; j < nl->layer_size; j++) { for (int j = 0; j < nl->layer_size; j++) {
neural_vector->data[j] = nl->neurons[j].value; neural_vector->data[j] = nl->neurons[j].value;
for (int k = 0; k < nl->layer_size_next; k++) { for (int k = 0; k < nl->layer_size_next; k++) {
@ -146,16 +130,9 @@ neural_process(Neural_Network *self, float *input) {
} }
temp_buffer = tensor_multip(synapse_matrix, neural_vector); temp_buffer = tensor_multip(synapse_matrix, neural_vector);
neural_vector = temp_buffer;
if (nl->layer_size_next) {
Neural_Layer *nl_next = self->layers[i+1];
for (int j = 0; j < nl_next->layer_size; j++) {
nl_next->neurons[j].value = neural_vector->data[j];
}
}
tensor_free(neural_vector); tensor_free(neural_vector);
tensor_free(synapse_matrix); tensor_free(synapse_matrix);
neural_vector = temp_buffer;
} }
retval = malloc(nl->layer_size * sizeof(float)); retval = malloc(nl->layer_size * sizeof(float));
@ -166,81 +143,48 @@ neural_process(Neural_Network *self, float *input) {
return retval; return retval;
} }
// These two will be merged into one once I have
// enough patience to create more dynamic objects.
static void * static void *
neural_backpropagation(Neural_Network *self, int neuron, int layer, float ratio) { neural_backprop_up(Neural_Network *self, size_t neuron, size_t layer) {
Neural_Layer *nl; return NULL;
Neural_Data *nd; }
float *ratios;
int *neurons;
float *synapses;
for (int i = layer-1; i >= 0; i--) {
nl = self->layers[i];
for (int j = 0; j < nl->layer_size; j++) {
synapses = nl->neurons[j].synapses;
for (int k = 0; k < nl->layer_size_next; i++) {
synapses[k] = 0;
}
}
}
static void *
neural_backprop_down(Neural_Network *self, size_t neuron, size_t layer) {
return NULL; return NULL;
} }
int int
neural_train(Neural_Network *self, neural_train(Neural_Network *self,
const char *input_path,
const float *expected_result) { const float *expected_result) {
Neural_Data *input_data; // What the neural network received
Neural_Data *result_data; // What the neural network computed Neural_Data *result_data; // What the neural network computed
float backprop_ratio;
for (int i = self->layer_count-1; i >= 0; i--) { input_data = neural_getData(self, 0);
Neural_Layer *nl = self->layers[i]; result_data = neural_getData(self, self->layer_count-1);
result_data = neural_getData(self, i);
for (int j = nl->layer_size-1; j >= 0; j--) {
backprop_ratio = nl->neurons[i].value / expected_result[i];
neural_backpropagation(self, j, i, backprop_ratio);
}
}
return 0; return 0;
} }
Neural_Data *
neural_data_new(int layer_size, int layer_size_next) {
Neural_Data *self;
self = calloc(1, sizeof(Neural_Data));
self->neural_vector = malloc(layer_size * sizeof(float));
self->vect_len = layer_size;
if (layer_size_next) {
self->synapse_matrix = malloc(layer_size * layer_size_next
* sizeof(float));
self->mat_len = layer_size_next;
}
return self;
}
Neural_Data * Neural_Data *
neural_getData(Neural_Network *self, size_t layer) { neural_getData(Neural_Network *self, size_t layer) {
Neural_Layer *nl; Neural_Layer *nl;
Neural_Data *retval; Neural_Data *retval;
retval = malloc(1 * sizeof(Neural_Data));
nl = self->layers[layer]; nl = self->layers[layer];
retval = neural_data_new(nl->layer_size, nl->layer_size_next); retval->neural_vector = malloc(nl->layer_size * sizeof(float));
retval->vect_len = nl->layer_size; retval->vect_len = nl->layer_size;
if (!nl->layer_size_next) { if (!nl->layer_size_next) {
retval->synapse_matrix = NULL; retval->synapse_matrix = NULL;
retval->mat_len = 0; retval->mat_len = 0;
} }
else { else {
retval->synapse_matrix = malloc(nl->layer_size * nl->layer_size_next
* sizeof(float));
for (int i = 0; i < nl->layer_size; i++) { for (int i = 0; i < nl->layer_size; i++) {
for (int j = 0; j < nl->layer_size_next; j++) { for (int j = 0; j < nl->layer_size_next; j++) {
retval->synapse_matrix[i*j+i] = nl->neurons[i].synapses[j]; retval->synapse_matrix[i*j+i] = nl->neurons[i].synapses[j];
@ -261,7 +205,7 @@ neural_getMesh(Neural_Network *nn, ModelRegistry *mr) {
for (int j = 0; j < nn->layer_count; j++) { for (int j = 0; j < nn->layer_count; j++) {
Neural_Layer *nl = nn->layers[j]; Neural_Layer *nl = nn->layers[j];
for (int i = 0; i < nl->layer_size; i++) { for (int i = 0; i < nl->layer_size; i++) {
float brightness; unsigned int brightness;
for (int k = 0; k < nl->layer_size_next; k++) { for (int k = 0; k < nl->layer_size_next; k++) {
model = model_line((-.90) model = model_line((-.90)
+ ((GLfloat)2 * i * .90/(nl->layer_size-1)), + ((GLfloat)2 * i * .90/(nl->layer_size-1)),
@ -275,7 +219,7 @@ neural_getMesh(Neural_Network *nn, ModelRegistry *mr) {
.001 // girth .001 // girth
); );
brightness = nl->neurons[i].synapses[k]; brightness = nl->neurons[i].synapses[k] * 255;
if (brightness) { if (brightness) {
model_colorXYZ(model, brightness, 0, 0); model_colorXYZ(model, brightness, 0, 0);
} }
@ -283,19 +227,16 @@ neural_getMesh(Neural_Network *nn, ModelRegistry *mr) {
} }
model = model_circle(0, (GLfloat)1/64); model = model_circle(0, (GLfloat)1/64);
brightness = nl->neurons[i].value <= 1.0 ? brightness = nl->neurons[i].value <= 1.0 ? nl->neurons[i].value : 255;
nl->neurons[i].value : 1.0;
model_colorXYZ(model, 0, brightness, 0); model_colorXYZ(model, 0, brightness, 0);
Tensor *translation_matrix = tensor_new(4, 4, 1); Tensor *translation_matrix = tensor_new(4, 4);
Tensor *aspectRatio_matrix = tensor_new(4, 4, 1); Tensor *aspectRatio_matrix = tensor_new(4, 4);
aspectRatio_matrix->data[0] = (GLfloat)9/16; aspectRatio_matrix->data[0] = (GLfloat)9/16;
translation_matrix->data[3] = (((GLfloat)-1*16/9)*.90) translation_matrix->data[3] = (((GLfloat)-1*16/9)*.90)
+ ((GLfloat)1/(nl->layer_size-1) + ((GLfloat)1/(nl->layer_size-1)*2 * i * (((GLfloat)16/9))*.90);
* 2 * i * (((GLfloat)16/9))*.90);
translation_matrix->data[7] = .90 - ((GLfloat)1/(nn->layer_count) translation_matrix->data[7] = .90 - ((GLfloat)1/(nn->layer_count)*2 * j *.90);
* 2 * j *.90);
model->transformations[0] = translation_matrix; model->transformations[0] = translation_matrix;
model->transformations[1] = aspectRatio_matrix; model->transformations[1] = aspectRatio_matrix;
@ -309,122 +250,3 @@ neural_getMesh(Neural_Network *nn, ModelRegistry *mr) {
return 0; return 0;
} }
static char*
indented_line(char *str, const char *line, int *indent) {
for (int m = 0; m < *indent; m++)
str = strcat(str, " ");
str = strcat(str, line);
return str;
}
static char*
indented_tag(char *str, const char *tag, int *indent) {
if (tag[1] == '/') {
*indent -= 4;
}
indented_line(str, tag, indent);
if (tag[1] != '/') {
*indent += 4;
}
return str;
}
// TODO
/* This XML implementation has potential bugs and has not
* been checked very thoroughly, fix, please.
*/
char *
neural_getXML(Neural_Network *nn) {
char *retval;
const char *to_write;
int volume = 0;
int indent = 0;
retval = malloc(0xff * sizeof(char));
to_write = "<?xml version=\"1.0\"?>\n\n";
retval = strcpy(retval, to_write);
to_write = "<Network>\n";
retval = indented_tag(retval, to_write, &indent);
for (int i = 0; i < nn->layer_count; i++) {
Neural_Layer *nl;
Neural_Data *nd;
char *line_prep;
nl = nn->layers[i];
nd = neural_getData(nn, i);
retval = realloc(retval, strlen(retval)
+ (nl->layer_size * 32 * nl->layer_size_next)// Matrix
+ (nl->layer_size * 32) // Vector
+ 0x3ff * nl->layer_size // Expected tag garbage.
+ indent); // Space waster
to_write = "<Layer>\n";
retval = indented_tag(retval, to_write, &indent);
to_write = "<Synapse_Matrix>\n";
retval = indented_tag(retval, to_write, &indent);
for (int j = 0; j < nd->mat_len; j++) {
char number_buffer[32];
line_prep = malloc((nl->layer_size * 32 * nl->layer_size_next) // Matrix
+ (nl->layer_size * 32));
*line_prep = '\0';
line_prep = strcat(line_prep, "[ ");
for (int k = 0; k < nd->vect_len; k++) {
strfromf(number_buffer, 32, "%.2f ", nd->synapse_matrix[k+j*nd->mat_len]);
line_prep = strcat(line_prep, number_buffer);
if (k < nd->vect_len - 1) {
line_prep = strcat(line_prep, ", ");
}
}
line_prep = strcat(line_prep, " ]\n");
retval = indented_line(retval, line_prep, &indent);
free(line_prep);
}
to_write = "</Synapse_Matrix>\n";
retval = indented_tag(retval, to_write, &indent);
to_write = "<Neural_Vector>\n";
retval = indented_tag(retval, to_write, &indent);
char number_buffer[32];
line_prep = malloc((nl->layer_size * 32 * nl->layer_size_next) // Matrix
+ (nl->layer_size * 32));
*line_prep = '\0';
line_prep = strcat(line_prep, "[ ");
for (int k = 0; k < nd->vect_len; k++) {
strfromf(number_buffer, 32, "%.4f", nd->neural_vector[k]);
line_prep = strcat(line_prep, number_buffer);
if (k < nd->vect_len - 1) {
line_prep = strcat(line_prep, ", ");
}
}
line_prep = strcat(line_prep, " ]\n");
retval = indented_line(retval, line_prep, &indent);
free(line_prep);
to_write = "</Neural_Vector>\n";
retval = indented_tag(retval, to_write, &indent);
to_write = "</Layer>\n";
retval = indented_tag(retval, to_write, &indent);
}
to_write = "</Network>\n";
retval = indented_tag(retval, to_write, &indent);
return retval;
}

View file

@ -1,7 +1,7 @@
#include "cx.h" #include "cx.h"
Tensor * Tensor *
tensor_new(size_t len, size_t width, int is_identity) { tensor_new(size_t len, size_t width) {
Tensor *mat; Tensor *mat;
mat = malloc(1 * sizeof(Tensor)); mat = malloc(1 * sizeof(Tensor));
@ -10,10 +10,6 @@ tensor_new(size_t len, size_t width, int is_identity) {
mat->len = len; mat->len = len;
mat->width = width; mat->width = width;
if (!is_identity) {
return mat;
}
for (int i = 0; i < len; i++) { for (int i = 0; i < len; i++) {
mat->data[i*width+(i % width)] = 1; mat->data[i*width+(i % width)] = 1;
} }
@ -28,8 +24,8 @@ tensor_fromVertexBuffer(float *buffer, size_t bufsize) {
mat_width = bufsize; mat_width = bufsize;
mat = tensor_new(4, mat_width, 0); mat = tensor_new(4, mat_width);
for (int i = 0; i < bufsize; i++) { for (int i = 0; i < bufsize; i++) {
for (int j = 0; j < 4; j++) { for (int j = 0; j < 4; j++) {
mat->data[j*mat_width+i] = buffer[i*4+j]; mat->data[j*mat_width+i] = buffer[i*4+j];
@ -48,7 +44,7 @@ tensor_multip(Tensor *mat2, Tensor *mat1) {
Tensor *result; Tensor *result;
float dot_prod; float dot_prod;
result = tensor_new(mat2->len, mat1->width, 0); result = tensor_new(mat2->len, mat1->width);
for (int i = 0; i < mat1->width; i++) { for (int i = 0; i < mat1->width; i++) {