Compare commits

..

18 commits

Author SHA1 Message Date
fa6e0c6b2d
Repair data handling
I have no clue how these bugs happened.
I have no clue what was the issue exacly.

All I know is that I have multiplied wrong
indeces with wrong dimensions.

Guess even this is maths sometimes.
2024-12-27 10:44:37 +01:00
264fcb407b
Implement XML export
This allows to save the neural network
once it has been trained.
2024-12-04 16:01:50 +01:00
ef18b57d61
Implement training data loading 2024-11-20 01:00:18 +01:00
9fe8afb68a
Now I'm kinda happy with the threads. 2024-11-14 12:19:53 +01:00
e47a6d26cd
Document current issues. 2024-11-11 19:02:00 +01:00
4169714b24
Readability tweaks 2024-11-11 18:43:38 +01:00
c2b9dfdd29
Memory management tweaks
There are no memory leaks,
yet, I discover, with my steady course,
thru my small valgrind peeks,
that nvidia are a bunch of stupid a-holes.
2024-11-11 17:42:57 +01:00
68d3d4b692
Stop eating my RAM 2024-11-10 23:20:25 +01:00
0ea88cb6bc
Implement threading 2024-11-10 21:28:17 +01:00
3da9e7df5a
Minor tweaks
Error handling in cx_init().
More warnings with -Wpedantic.
2024-11-10 17:40:30 +01:00
afc9aec314
Clean up trailing whitespace 2024-11-10 12:59:35 +01:00
4b2db87c9e
Readability tweaks 2024-11-10 00:48:13 +01:00
fee012f56c
More threading stuff
So far it doesn't do anything,
but soon™ it will.
2024-11-09 21:29:02 +01:00
0b4cc27331
Implement basis for thread management. 2024-11-07 22:07:33 +01:00
594b6ef722
Add a newline at the end of main.c
This is aesthetics stuff.

Wow,
such code,
much prettiness,
very C,
wow.
2024-11-06 19:29:09 +01:00
d2ec11859d
Implement a function for acquiring neural data.
* Some good refactoring
2024-10-29 17:07:08 +01:00
58ae618e69
This one's stupid.
I have no clue what I'm doing at this point
and I have gone insane.

It will be all implemented in the next one,
hopefully.
2024-10-26 21:31:57 +02:00
04bf753f09
Improve neural randomization
Start implementing neural training algorithm.
2024-10-26 09:55:43 +02:00
18 changed files with 667 additions and 115 deletions

3
.gitignore vendored
View file

@ -1,5 +1,6 @@
build/ build/
*\.session training_data/
vim.sessions/
*\.tar* *\.tar*
*\.gpg *\.gpg

View file

@ -1,5 +1,5 @@
# CMake entry point # CMake entry point
cmake_minimum_required (VERSION 3.27.1) cmake_minimum_required(VERSION 3.31.0)
project(CX C) project(CX C)
cmake_policy(SET CMP0072 NEW) cmake_policy(SET CMP0072 NEW)
@ -17,9 +17,10 @@ set(ALL_LIBS
glfw glfw
GLEW GLEW
m m
pthread
) )
set(CMAKE_C_FLAGS "-O0 -ggdb -Wall") set(CMAKE_C_FLAGS "-O0 -ggdb -Wall -std=gnu99 -Wpedantic")
add_definitions( add_definitions(
-DTW_STATIC -DTW_STATIC
@ -33,6 +34,7 @@ add_executable(
cx cx
src/main.c src/main.c
src/cx.c src/cx.c
src/cx_thread.c
src/tensor.c src/tensor.c
src/model.c src/model.c
src/shader.c src/shader.c

19
doc/issues.md Normal file
View file

@ -0,0 +1,19 @@
# Issues
## Error handling
Some errors are being handled, some aren't, some are being handled
partially and some errors (and/or their handling) might break the program
before a proper return. some return values of library functions are being
ignored altogether.
## Context handling
Context handling in it's current form relies on all
context types to have a free() function stored
on a specific place in the data structure.
This will most likely result in a segfault anytime
a new structure is being used that is not properly
aligned.

View file

@ -1,6 +1,8 @@
#ifndef CX_H #ifndef CX_H
#define CX_H #define CX_H
#define __STDC_WANT_IEC_60559_BFP_EXT__
// Include standard headers // Include standard headers
#include <stdio.h> #include <stdio.h>
#include <stdlib.h> #include <stdlib.h>
@ -8,6 +10,10 @@
#include <math.h> #include <math.h>
#include <time.h> #include <time.h>
#include <unistd.h> #include <unistd.h>
#include <stdint.h>
#include <pthread.h>
#include <inttypes.h>
#include <string.h>
// Include GLEW // Include GLEW
#include <GL/glew.h> #include <GL/glew.h>
@ -17,19 +23,55 @@
#include <GLFW/glfw3.h> #include <GLFW/glfw3.h>
// Include project headers // Include project headers
#include <cx_thread.h>
#include <tensor.h> #include <tensor.h>
#include <model.h> #include <model.h>
#include <tensor.h> #include <tensor.h>
#include <shader.h>
#include <neural.h> #include <neural.h>
#include <shader.h>
// Declare common data structures.
typedef struct _cx_gl_ctx {
void (*free)(void *self);
uint8_t master_lock;
uint8_t *worker_locks;
CX_ThreadGroup **workers;
GLFWwindow *window;
ModelRegistry *mr;
GLuint *VertexArrayIDs;
size_t VertexArray_count;
size_t VertexArray_size;
GLuint *programIDs;
size_t ProgramID_count;
size_t ProgramID_size;
} CX_GL_CTX;
typedef struct _cx_nn_ctx {
void (*free)(void *self);
uint8_t master_lock;
uint8_t *worker_locks;
CX_ThreadGroup **workers;
Neural_Network *nn;
float *input_buffer;
float *output_buffer;
} CX_NN_CTX;
typedef struct _cx_ctx {
CX_ThreadGroup **threads;
CX_GL_CTX *gl_ctx;
CX_NN_CTX *nn_ctx;
} CX_Context;
// Declare functions // Declare functions
CX_Context *cx_context_new(void);
int cx_glinit(GLFWwindow **); int cx_glinit(CX_GL_CTX **);
int cx_nninit(Neural_Network **); int cx_nninit(Neural_Network **);
int cx_init(CX_Context **);
int cx_run(GLFWwindow *, Neural_Network *); int cx_run(CX_Context *);
#endif #endif

20
include/cx_thread.h Normal file
View file

@ -0,0 +1,20 @@
#ifndef CX_THREAD_H
#define CX_THREAD_H
typedef struct _cx_thrd {
pthread_t thread;
void *ctx; // Arbitrary thread context
} CX_Thread;
typedef struct _cx_thrgr {
CX_Thread *group_manager;
CX_Thread **workers;
size_t worker_count;
size_t worker_size;
} CX_ThreadGroup;
CX_ThreadGroup *cx_threadGroup_new(void *(*)(void *), void *);
void cx_threadGroup_free(CX_ThreadGroup *);
#endif

View file

@ -22,7 +22,7 @@ int modelRegistry_register(ModelRegistry *, Model *);
void modelRegistry_free(ModelRegistry *); void modelRegistry_free(ModelRegistry *);
GLfloat * model_applyTransformations(Model *); GLfloat * model_applyTransformations(Model *);
void model_colorFromPosition(Model *); void model_colorFromPosition(Model *);
void model_colorXYZ(Model *, int R, int G, int B); void model_colorXYZ(Model *, float R, float G, float B);
void model_colorRed(Model *); void model_colorRed(Model *);
void model_colorGreen(Model *); void model_colorGreen(Model *);
void model_colorBlue(Model *); void model_colorBlue(Model *);

View file

@ -18,10 +18,22 @@ typedef struct _neural_network {
ssize_t layer_count; ssize_t layer_count;
} Neural_Network; } Neural_Network;
typedef struct _neural_data {
float *neural_vector;
size_t vect_len;
float *synapse_matrix;
size_t mat_len;
} Neural_Data;
Neural_Network *neural_new(size_t, size_t, size_t); Neural_Network *neural_new(size_t, size_t, size_t);
void neural_free(Neural_Network *);
void neural_populate_sequential(Neural_Network *);
void neural_randomize(Neural_Network *); void neural_randomize(Neural_Network *);
float *neural_loadData(Neural_Network *, const char *);
float *neural_process(Neural_Network *, float *); float *neural_process(Neural_Network *, float *);
int neural_getMesh(ModelRegistry *, Neural_Network *); Neural_Data *neural_getData(Neural_Network *, size_t);
int neural_getMesh(Neural_Network *, ModelRegistry *);
char *neural_getXML(Neural_Network *);
#endif #endif

View file

@ -7,7 +7,7 @@ typedef struct _tensor {
size_t width; size_t width;
} Tensor; } Tensor;
Tensor *tensor_new(size_t, size_t); Tensor *tensor_new(size_t, size_t, int);
Tensor *tensor_fromVertexBuffer(float *, size_t); Tensor *tensor_fromVertexBuffer(float *, size_t);

View file

251
src/cx.c
View file

@ -40,13 +40,13 @@ cx_glrender(GLFWwindow *window, GLuint programID,
glGenBuffers(1, &colorbuffer); glGenBuffers(1, &colorbuffer);
for (int i = 0; i < mr->model_count; i++) { for (int i = 0; i < mr->model_count; i++) {
// Allocate the render buffer // Allocate the render buffer
// GL uses this to feed the GPU // GL uses this to feed the GPU
render_buffer = model_applyTransformations(mr->models[i]); render_buffer = model_applyTransformations(mr->models[i]);
cx_glBindBuffer(render_buffer, vertexbuffer, 0, 4, cx_glBindBuffer(render_buffer, vertexbuffer, 0, 4,
mr->models[i]->bufsize*4*sizeof(GLfloat)); mr->models[i]->bufsize*4*sizeof(GLfloat));
cx_glBindBuffer(mr->models[i]->colors, colorbuffer, 2, 3, cx_glBindBuffer(mr->models[i]->colors, colorbuffer, 2, 3,
mr->models[i]->bufsize*3*sizeof(GLfloat)); mr->models[i]->bufsize*3*sizeof(GLfloat));
@ -80,12 +80,58 @@ cx_loadShaders(GLuint *VertexArrayID, GLuint *programID) {
return 0; return 0;
} }
void
gl_ctx_free(void *self) {
CX_GL_CTX *gl_ctx;
gl_ctx = self;
if (gl_ctx) {
free(gl_ctx->VertexArrayIDs);
free(gl_ctx->programIDs);
modelRegistry_free(gl_ctx->mr);
}
free(gl_ctx);
}
void
nn_ctx_free(void *self) {
CX_NN_CTX *nn_ctx;
nn_ctx = self;
if (nn_ctx) {
free(nn_ctx->input_buffer);
free(nn_ctx->output_buffer);
neural_free(nn_ctx->nn);
}
free(nn_ctx);
}
int int
cx_glinit(GLFWwindow **window) { cx_glinit(CX_GL_CTX **gl_ctx) {
// Initialize OpenGL context
(*gl_ctx)->VertexArrayIDs = calloc(1, sizeof(GLuint));
if (!(*gl_ctx)->VertexArrayIDs) {
goto err;
}
(*gl_ctx)->VertexArray_count = 0;
(*gl_ctx)->VertexArray_size = 1;
(*gl_ctx)->programIDs = calloc(1, sizeof(GLuint));
if (!(*gl_ctx)->programIDs) {
goto err;
}
(*gl_ctx)->ProgramID_count = 0;
(*gl_ctx)->ProgramID_size = 1;
(*gl_ctx)->free = &gl_ctx_free;
// Initialise GLFW // Initialise GLFW
printf("Initializing OpenGL.\n");
if(!glfwInit()) { if(!glfwInit()) {
fprintf(stderr, "Failed to initialize GLFW\n"); fprintf(stderr, "Failed to initialize GLFW\n");
return -1; goto err;
} }
glfwWindowHint(GLFW_SAMPLES, 4); glfwWindowHint(GLFW_SAMPLES, 4);
@ -97,81 +143,39 @@ cx_glinit(GLFWwindow **window) {
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE); glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
// Open a window and create its OpenGL context // Open a window and create its OpenGL context
*window = glfwCreateWindow(1280, 720, "CONTROL-X", NULL, NULL); (*gl_ctx)->window = glfwCreateWindow(1280, 720, "C-X", NULL, NULL);
if (*window == NULL) { if ((*gl_ctx)->window == NULL) {
fprintf(stderr, "Failed to open GLFW window.\n"); fprintf(stderr, "Failed to open GLFW window.\n");
glfwTerminate(); glfwTerminate();
return -1; goto err;
} }
printf("Window created.\n");
glfwMakeContextCurrent(*window); glfwMakeContextCurrent((*gl_ctx)->window);
// Initialize GLEW // Initialize GLEW
if (glewInit() != GLEW_OK) { if (glewInit() != GLEW_OK) {
fprintf(stderr, "Failed to initialize GLEW\n"); fprintf(stderr, "Failed to initialize GLEW\n");
glfwTerminate(); glfwTerminate();
return -1; goto err;
} }
// Ensure we can capture the escape key being pressed below // Ensure we can capture the escape key being pressed below
glfwSetInputMode(*window, GLFW_STICKY_KEYS, GL_TRUE); glfwSetInputMode((*gl_ctx)->window, GLFW_STICKY_KEYS, GL_TRUE);
// Dark grey background // Dark grey background
glClearColor(0.15f, 0.15f, 0.15f, 0.0f); glClearColor(0.15f, 0.15f, 0.15f, 0.0f);
return 0; return 0;
}
static int err:
cx_nnrun(Neural_Network *nn) {
// Establish a neural interface.
float *input_buffer = malloc(64*sizeof(float));
float *output_buffer;
output_buffer = neural_process(nn, input_buffer);
return 0;
}
int
cx_run(GLFWwindow *window, Neural_Network *nn) {
ModelRegistry *mr;
GLuint VertexArrayID;
GLuint programID;
if (cx_loadShaders(&VertexArrayID, &programID)) {
return -1; return -1;
} }
// Establish a model registry
mr = modelRegistry_new();
// Fill the model registry with mesh models
neural_getMesh(mr, nn);
// Remainder from cursor experiments, might be useful later
double xpos, ypos;
glfwGetCursorPos(window, &xpos, &ypos);
int t = 0;
do {
cx_glrender(window, programID, mr);
t++;
usleep(1000000/60);
// Check if the ESC key was pressed or the window was closed
} while(glfwGetKey(window, GLFW_KEY_ESCAPE) != GLFW_PRESS
&& !glfwWindowShouldClose(window));
// Close OpenGL window and terminate GLFW
glfwTerminate();
modelRegistry_free(mr);
return 0;
}
int int
cx_nninit(Neural_Network **nn) { cx_nninit(Neural_Network **nn) {
// Allocate a Neural Network // Allocate a Neural Network
printf("Initializing a neural network.\n");
*nn = neural_new(64, 4, 8); *nn = neural_new(64, 4, 8);
if(!*nn) { if(!*nn) {
fprintf(stderr, "Failed to initialize Neural Network.\n"); fprintf(stderr, "Failed to initialize Neural Network.\n");
@ -179,9 +183,146 @@ cx_nninit(Neural_Network **nn) {
} }
// Populate the neural network with sensible values. // Populate the neural network with sensible values.
neural_randomize(*nn); neural_populate_sequential(*nn);
return 0; return 0;
} }
int
cx_init(CX_Context **cx_ctx) {
CX_GL_CTX *gl_ctx;
CX_NN_CTX *nn_ctx;
printf("Initializing CX.\n");
nn_ctx = calloc(1, sizeof(CX_NN_CTX));
nn_ctx->free = &nn_ctx_free;
*cx_ctx = calloc(1, sizeof(CX_Context));
gl_ctx = calloc(1, sizeof(CX_GL_CTX));
(*cx_ctx)->gl_ctx = gl_ctx;
(*cx_ctx)->nn_ctx = nn_ctx;
(*cx_ctx)->threads = calloc(1, sizeof(CX_ThreadGroup *));
if (!(*cx_ctx)->threads) {
goto err;
}
return 0;
err:
if ((*cx_ctx)->gl_ctx) {
free((*cx_ctx)->gl_ctx->VertexArrayIDs);
free((*cx_ctx)->gl_ctx->programIDs);
free((*cx_ctx)->threads);
}
free(*cx_ctx);
return -1;
}
static int
cx_glrun(CX_GL_CTX *ctx) {
// Remainder from cursor experiments, might be useful later
double xpos, ypos;
glfwGetCursorPos(ctx->window, &xpos, &ypos);
do {
// Skip render step if context is locked.
if (!ctx->master_lock) {
cx_glrender(ctx->window, ctx->programIDs[0], ctx->mr);
}
usleep(1000000/60);
// Check if the ESC key was pressed or the window was closed
} while(glfwGetKey(ctx->window, GLFW_KEY_ESCAPE) != GLFW_PRESS
&& !glfwWindowShouldClose(ctx->window));
// Close OpenGL window and terminate GLFW
glfwTerminate();
return 0;
}
static int
cx_nnrun(CX_Thread *self) {
// Establish a neural interface.
float *output_buffer;
CX_NN_CTX *ctx = self->ctx;
output_buffer = neural_process(ctx->nn, ctx->input_buffer);
ctx->output_buffer = output_buffer;
return 0;
}
static void *
cx_glthread(void *self) {
CX_Thread *self_t = self;
CX_GL_CTX *gl_ctx = self_t->ctx;
cx_glinit(&gl_ctx);
if (cx_loadShaders(gl_ctx->VertexArrayIDs, gl_ctx->programIDs)) {
return NULL;
}
cx_glrun(gl_ctx);
return NULL;
}
static void *
cx_nnthread(void *self) {
CX_Thread *self_t = self;
CX_NN_CTX *nn_ctx = self_t->ctx;
float *input, *output;
char *export;
cx_nninit(&nn_ctx->nn);
input = neural_loadData(nn_ctx->nn, "../training_data/0");
output = neural_process(nn_ctx->nn, input);
export = neural_getXML(nn_ctx->nn);
return export;
}
int
cx_run(CX_Context *ctx) {
CX_ThreadGroup *tg[2];
void *neural_xml;
// Establish a model registry
ctx->gl_ctx->mr = modelRegistry_new();
ctx->gl_ctx->master_lock = 1;
tg[0] = cx_threadGroup_new(&cx_glthread, ctx->gl_ctx);
tg[1] = cx_threadGroup_new(&cx_nnthread, ctx->nn_ctx);
pthread_join(tg[1]->group_manager->thread, &neural_xml);
printf("%s\n", neural_xml);
ctx->gl_ctx->master_lock = 0;
neural_getMesh(ctx->nn_ctx->nn, ctx->gl_ctx->mr);
pthread_join(tg[0]->group_manager->thread, NULL);
cx_threadGroup_free(tg[0]);
cx_threadGroup_free(tg[1]);
free(ctx->threads);
free(ctx);
free(neural_xml);
return 0;
}

64
src/cx_thread.c Normal file
View file

@ -0,0 +1,64 @@
#include <cx.h>
CX_Thread *
cx_thread_new(void *(*target)(void *),
void *ctx) {
CX_Thread *self;
int err;
self = malloc(sizeof(CX_Thread));
if (!self) {
goto err;
}
self->ctx = ctx;
err = pthread_create(&self->thread, NULL, target, self);
if (err) {
goto err;
}
return self;
err:
free(self);
return NULL;
}
void
cx_thread_free(CX_Thread *self) {
if (self) {
/* TODO */
/* This is naive in its current form and will shatter
* sooner or later.
* Fix the context structures so that this call
* is guaranteed not to touch invalid memory.
*/
((CX_GL_CTX *)self->ctx)->free(self->ctx);
}
free(self);
}
CX_ThreadGroup *
cx_threadGroup_new(void *(*target)(void *),
void *ctx) {
CX_ThreadGroup *self;
self = malloc(sizeof(CX_ThreadGroup));
self->workers = malloc(8 * sizeof(CX_Thread *));
self->worker_count = 0;
self->worker_size = 8;
self->group_manager = cx_thread_new(target, ctx);
return self;
}
void
cx_threadGroup_free(CX_ThreadGroup *self) {
if (self) {
cx_thread_free(self->group_manager);
free(self->workers);
}
free(self);
}

View file

@ -1,26 +1,19 @@
// Include standard headers
#include <stdio.h>
#include <stdlib.h>
// Include project headers
#include <cx.h> #include <cx.h>
int int
main(void) { main(void) {
GLFWwindow *window; // CX context (Window, neural network, threads.)
Neural_Network *nn; CX_Context *cx_ctx;
int retval; int retval;
if (cx_glinit(&window)) { if (cx_init(&cx_ctx)) {
return -1; return -1;
} }
if (cx_nninit(&nn)) { // Do magic
return -1; retval = cx_run(cx_ctx);
}
retval = cx_run(window, nn); // Complain about failure
return retval; return retval;
} }

View file

@ -92,6 +92,7 @@ model_applyTransformations(Model *self) {
if (!self->transformation_count) { if (!self->transformation_count) {
retval = malloc(self->bufsize * 4 * sizeof(GLfloat)); retval = malloc(self->bufsize * 4 * sizeof(GLfloat));
memcpy(retval, self->object, self->bufsize * 4 * sizeof(GLfloat)); memcpy(retval, self->object, self->bufsize * 4 * sizeof(GLfloat));
tensor_free(temp_buffer[1]);
return retval; return retval;
} }
@ -109,6 +110,7 @@ model_applyTransformations(Model *self) {
->data[j*temp_buffer[(i+1)%2]->width+k]; ->data[j*temp_buffer[(i+1)%2]->width+k];
} }
} }
tensor_free(temp_buffer[(i+1)%2]);
return retval; return retval;
} }
@ -127,7 +129,7 @@ model_colorFromPosition(Model *self) {
} }
} }
void model_colorXYZ(Model *self, int R, int G, int B) { void model_colorXYZ(Model *self, float R, float G, float B) {
for (int i = 0; i < self->bufsize; i++) { for (int i = 0; i < self->bufsize; i++) {
for (int j = 0; j < 4; j++) { for (int j = 0; j < 4; j++) {
switch(j) { switch(j) {

View file

@ -17,7 +17,12 @@ nl_new(size_t layer_size, size_t layer_size_next) {
static void static void
nl_free(Neural_Layer *self) { nl_free(Neural_Layer *self) {
if (self) {
for (int i = 0; i < self->layer_size; i++) {
free(self->neurons[i].synapses);
}
free(self->neurons); free(self->neurons);
}
free(self); free(self);
} }
@ -28,6 +33,7 @@ neural_new(size_t input_size, size_t output_size, size_t layer_count) {
// Failed to allocate. // Failed to allocate.
return NULL; return NULL;
} }
// The difference between layer sizes, hidden layers step between the two // The difference between layer sizes, hidden layers step between the two
// sizes in linear fashion. // sizes in linear fashion.
ssize_t layer_diff; ssize_t layer_diff;
@ -38,8 +44,8 @@ neural_new(size_t input_size, size_t output_size, size_t layer_count) {
// Calculate sizes of individual layers and allocate them. // Calculate sizes of individual layers and allocate them.
for (int i = 0; i < layer_count; i++) { for (int i = 0; i < layer_count; i++) {
self->layers[i] = nl_new(input_size self->layers[i] = nl_new(input_size + (layer_diff * i
+ (layer_diff * i / ((ssize_t)layer_count-1)), / ((ssize_t)layer_count-1)),
i < (layer_count-1) ? i < (layer_count-1) ?
(input_size + (layer_diff * (i+1) (input_size + (layer_diff * (i+1)
@ -50,24 +56,57 @@ neural_new(size_t input_size, size_t output_size, size_t layer_count) {
return self; return self;
} }
void
neural_free(Neural_Network *self) {
if (self) {
for (int i = 0; i < self->layer_count; i++) {
nl_free(self->layers[i]);
}
free(self->layers);
}
free(self);
}
void
neural_populate_sequential(Neural_Network *self) {
Neural_Layer *nl;
for (int i = 0; i < self->layer_count; i++) {
nl = self->layers[i];
int populator = 0;
for (int j = 0; j < nl->layer_size; j++) {
for (int k = 0; k < nl->layer_size_next; k++) {
nl->neurons[j].synapses[k] = (float)populator;
populator++;
}
}
}
}
void void
neural_randomize(Neural_Network *self) { neural_randomize(Neural_Network *self) {
FILE *f; FILE *f;
Neural_Layer *nl; Neural_Layer *nl;
uint64_t *rand_vals;
f = fopen("/dev/urandom", "r"); f = fopen("/dev/urandom", "r");
for (int i = 0; i < self->layer_count; i++) { for (int i = 0; i < self->layer_count; i++) {
nl = self->layers[i]; nl = self->layers[i];
for (int j = 0; j < nl->layer_size; j++) { for (int j = 0; j < nl->layer_size; j++) {
fread(nl->neurons[j].synapses, sizeof(float), nl->layer_size_next, f); rand_vals = malloc(nl->layer_size_next * sizeof(uint64_t));
fread(rand_vals, sizeof(uint64_t),
nl->layer_size_next, f);
for (int k = 0; k < nl->layer_size_next; k++) {
nl->neurons[j].synapses[k] = (float)rand_vals[k] / UINT64_MAX / nl->layer_size;
}
free(rand_vals);
} }
} }
} }
float * float *
neural_loadData(Neural_Network *self, const char *filename) { neural_loadData(Neural_Network *self, const char *filename) {
Neural_Layer *nl;
FILE *f; FILE *f;
char *file_data; char *file_data;
float *retval; float *retval;
@ -80,8 +119,6 @@ neural_loadData(Neural_Network *self, const char *filename) {
f = fopen(filename, "r"); f = fopen(filename, "r");
nl = self->layers[0];
fread(file_data, sizeof(char), 9*8, f); // 9*8 - 8*8 value matrix + newlines fread(file_data, sizeof(char), 9*8, f); // 9*8 - 8*8 value matrix + newlines
for (int i = 0; i < 8*8; i++) { for (int i = 0; i < 8*8; i++) {
if (file_data[read_cursor] == '\n') { if (file_data[read_cursor] == '\n') {
@ -99,6 +136,7 @@ neural_loadData(Neural_Network *self, const char *filename) {
return NULL; return NULL;
break; break;
} }
read_cursor++;
} }
return retval; return retval;
} }
@ -112,10 +150,10 @@ neural_process(Neural_Network *self, float *input) {
for (int i = 0; i < self->layers[0]->layer_size; i++) { for (int i = 0; i < self->layers[0]->layer_size; i++) {
nl->neurons[i].value = input[i]; nl->neurons[i].value = input[i];
} }
neural_vector = tensor_new(1, nl->layer_size);
for (int i = 0; i < self->layer_count; i++) { for (int i = 0; i < self->layer_count; i++) {
neural_vector = tensor_new(nl->layer_size, 1, 0);
nl = self->layers[i]; nl = self->layers[i];
synapse_matrix = tensor_new(nl->layer_size_next, nl->layer_size); synapse_matrix = tensor_new(nl->layer_size_next, nl->layer_size, 0);
for (int j = 0; j < nl->layer_size; j++) { for (int j = 0; j < nl->layer_size; j++) {
neural_vector->data[j] = nl->neurons[j].value; neural_vector->data[j] = nl->neurons[j].value;
for (int k = 0; k < nl->layer_size_next; k++) { for (int k = 0; k < nl->layer_size_next; k++) {
@ -124,9 +162,16 @@ neural_process(Neural_Network *self, float *input) {
} }
temp_buffer = tensor_multip(synapse_matrix, neural_vector); temp_buffer = tensor_multip(synapse_matrix, neural_vector);
neural_vector = temp_buffer;
if (nl->layer_size_next) {
Neural_Layer *nl_next = self->layers[i+1];
for (int j = 0; j < nl_next->layer_size; j++) {
nl_next->neurons[j].value = neural_vector->data[j];
}
}
tensor_free(neural_vector); tensor_free(neural_vector);
tensor_free(synapse_matrix); tensor_free(synapse_matrix);
neural_vector = temp_buffer;
} }
retval = malloc(nl->layer_size * sizeof(float)); retval = malloc(nl->layer_size * sizeof(float));
@ -137,21 +182,99 @@ neural_process(Neural_Network *self, float *input) {
return retval; return retval;
} }
int static void *
neural_train(Neural_Network *self, neural_backpropagation(Neural_Network *self, int neuron, int layer, float ratio) {
const char *testdata, Neural_Layer *nl;
const float *testresult) { Neural_Data *nd;
// Insert algorithm you lazy fuck. float *ratios;
return 0; int *neurons;
float *synapses;
for (int i = layer-1; i >= 0; i--) {
nl = self->layers[i];
for (int j = 0; j < nl->layer_size; j++) {
synapses = nl->neurons[j].synapses;
for (int k = 0; k < nl->layer_size_next; i++) {
synapses[k] = 0;
}
}
}
return NULL;
} }
int int
neural_getMesh(ModelRegistry *mr, Neural_Network *nn) { neural_train(Neural_Network *self,
const char *input_path,
const float *expected_result) {
Neural_Data *result_data; // What the neural network computed
float backprop_ratio;
for (int i = self->layer_count-1; i >= 0; i--) {
Neural_Layer *nl = self->layers[i];
result_data = neural_getData(self, i);
for (int j = nl->layer_size-1; j >= 0; j--) {
backprop_ratio = nl->neurons[i].value / expected_result[i];
neural_backpropagation(self, j, i, backprop_ratio);
}
}
return 0;
}
Neural_Data *
neural_data_new(int layer_size, int layer_size_next) {
Neural_Data *self;
self = calloc(1, sizeof(Neural_Data));
self->neural_vector = malloc(layer_size * sizeof(float));
self->vect_len = layer_size;
if (layer_size_next) {
self->synapse_matrix = malloc(layer_size * layer_size_next
* sizeof(float));
self->mat_len = layer_size_next;
}
else {
self->synapse_matrix = NULL;
self->mat_len = 0;
}
return self;
}
Neural_Data *
neural_getData(Neural_Network *self, size_t layer) {
Neural_Layer *nl;
Neural_Data *retval;
nl = self->layers[layer];
retval = neural_data_new(nl->layer_size, nl->layer_size_next);
if (retval->mat_len) {
for (int i = 0; i < nl->layer_size; i++) {
for (int j = 0; j < nl->layer_size_next; j++) {
retval->synapse_matrix[i+(nl->layer_size*j)] = nl->neurons[i].synapses[j];
}
}
}
for (int i = 0; i < nl->layer_size; i++) {
retval->neural_vector[i] = nl->neurons[i].value;
}
return retval;
}
int
neural_getMesh(Neural_Network *nn, ModelRegistry *mr) {
Model *model; Model *model;
for (int j = 0; j < nn->layer_count; j++) { for (int j = 0; j < nn->layer_count; j++) {
Neural_Layer *nl = nn->layers[j]; Neural_Layer *nl = nn->layers[j];
for (int i = 0; i < nl->layer_size; i++) { for (int i = 0; i < nl->layer_size; i++) {
unsigned int brightness; float brightness;
for (int k = 0; k < nl->layer_size_next; k++) { for (int k = 0; k < nl->layer_size_next; k++) {
model = model_line((-.90) model = model_line((-.90)
+ ((GLfloat)2 * i * .90/(nl->layer_size-1)), + ((GLfloat)2 * i * .90/(nl->layer_size-1)),
@ -165,22 +288,27 @@ neural_getMesh(ModelRegistry *mr, Neural_Network *nn) {
.001 // girth .001 // girth
); );
brightness = nl->neurons[i].synapses[k] <= 1.0 ? nl->neurons[i].synapses[k] : 255; brightness = nl->neurons[i].synapses[k];
if (brightness) {
model_colorXYZ(model, brightness, 0, 0); model_colorXYZ(model, brightness, 0, 0);
}
modelRegistry_register(mr, model); modelRegistry_register(mr, model);
} }
model = model_circle(0, (GLfloat)1/64); model = model_circle(0, (GLfloat)1/64);
brightness = nl->neurons[i].value <= 1.0 ? nl->neurons[i].value : 255; brightness = nl->neurons[i].value <= 1.0 ?
nl->neurons[i].value : 1.0;
model_colorXYZ(model, 0, brightness, 0); model_colorXYZ(model, 0, brightness, 0);
Tensor *translation_matrix = tensor_new(4, 4); Tensor *translation_matrix = tensor_new(4, 4, 1);
Tensor *aspectRatio_matrix = tensor_new(4, 4); Tensor *aspectRatio_matrix = tensor_new(4, 4, 1);
aspectRatio_matrix->data[0] = (GLfloat)9/16; aspectRatio_matrix->data[0] = (GLfloat)9/16;
translation_matrix->data[3] = (((GLfloat)-1*16/9)*.90) translation_matrix->data[3] = (((GLfloat)-1*16/9)*.90)
+ ((GLfloat)1/(nl->layer_size-1)*2 * i * (((GLfloat)16/9))*.90); + ((GLfloat)1/(nl->layer_size-1)
* 2 * i * (((GLfloat)16/9))*.90);
translation_matrix->data[7] = .90 - ((GLfloat)1/(nn->layer_count)*2 * j *.90); translation_matrix->data[7] = .90 - ((GLfloat)1/(nn->layer_count)
* 2 * j *.90);
model->transformations[0] = translation_matrix; model->transformations[0] = translation_matrix;
model->transformations[1] = aspectRatio_matrix; model->transformations[1] = aspectRatio_matrix;
@ -194,3 +322,122 @@ neural_getMesh(ModelRegistry *mr, Neural_Network *nn) {
return 0; return 0;
} }
static char*
indented_line(char *str, const char *line, int *indent) {
for (int m = 0; m < *indent; m++)
str = strcat(str, " ");
str = strcat(str, line);
return str;
}
static char*
indented_tag(char *str, const char *tag, int *indent) {
if (tag[1] == '/') {
*indent -= 4;
}
indented_line(str, tag, indent);
if (tag[1] != '/') {
*indent += 4;
}
return str;
}
// TODO
/* This XML implementation has potential bugs and has not
* been checked very thoroughly, fix, please.
*/
char *
neural_getXML(Neural_Network *nn) {
char *retval;
const char *to_write;
int volume = 0;
int indent = 0;
retval = malloc(0xff * sizeof(char));
to_write = "<?xml version=\"1.0\"?>\n\n";
retval = strcpy(retval, to_write);
to_write = "<Network>\n";
retval = indented_tag(retval, to_write, &indent);
for (int i = 0; i < nn->layer_count; i++) {
Neural_Layer *nl;
Neural_Data *nd;
char *line_prep;
nl = nn->layers[i];
nd = neural_getData(nn, i);
retval = realloc(retval, strlen(retval)
+ (nl->layer_size * 32 * nl->layer_size_next)// Matrix
+ (nl->layer_size * 32) // Vector
+ 0x3ff * nl->layer_size // Expected tag garbage.
+ indent); // Space waster
to_write = "<Layer>\n";
retval = indented_tag(retval, to_write, &indent);
to_write = "<Synapse_Matrix>\n";
retval = indented_tag(retval, to_write, &indent);
for (int j = 0; j < nd->mat_len; j++) {
char number_buffer[32];
line_prep = malloc((nl->layer_size * 32 * nl->layer_size_next) // Matrix
+ (nl->layer_size * 32));
*line_prep = '\0';
line_prep = strcat(line_prep, "[ ");
for (int k = 0; k < nd->vect_len; k++) {
strfromf(number_buffer, 32, "%.4f ", nd->synapse_matrix[k+(j*nd->vect_len)]);
line_prep = strcat(line_prep, number_buffer);
if (k < nd->vect_len - 1) {
line_prep = strcat(line_prep, ", ");
}
}
line_prep = strcat(line_prep, " ]\n");
retval = indented_line(retval, line_prep, &indent);
free(line_prep);
}
to_write = "</Synapse_Matrix>\n";
retval = indented_tag(retval, to_write, &indent);
to_write = "<Neural_Vector>\n";
retval = indented_tag(retval, to_write, &indent);
char number_buffer[32];
line_prep = malloc((nl->layer_size * 32 * nl->layer_size_next) // Matrix
+ (nl->layer_size * 32));
*line_prep = '\0';
line_prep = strcat(line_prep, "[ ");
for (int k = 0; k < nd->vect_len; k++) {
strfromf(number_buffer, 32, "%.4f", nd->neural_vector[k]);
line_prep = strcat(line_prep, number_buffer);
if (k < nd->vect_len - 1) {
line_prep = strcat(line_prep, ", ");
}
}
line_prep = strcat(line_prep, " ]\n");
retval = indented_line(retval, line_prep, &indent);
free(line_prep);
to_write = "</Neural_Vector>\n";
retval = indented_tag(retval, to_write, &indent);
to_write = "</Layer>\n";
retval = indented_tag(retval, to_write, &indent);
}
to_write = "</Network>\n";
retval = indented_tag(retval, to_write, &indent);
return retval;
}

View file

@ -1,7 +1,7 @@
#include "cx.h" #include "cx.h"
Tensor * Tensor *
tensor_new(size_t len, size_t width) { tensor_new(size_t len, size_t width, int is_identity) {
Tensor *mat; Tensor *mat;
mat = malloc(1 * sizeof(Tensor)); mat = malloc(1 * sizeof(Tensor));
@ -10,6 +10,10 @@ tensor_new(size_t len, size_t width) {
mat->len = len; mat->len = len;
mat->width = width; mat->width = width;
if (!is_identity) {
return mat;
}
for (int i = 0; i < len; i++) { for (int i = 0; i < len; i++) {
mat->data[i*width+(i % width)] = 1; mat->data[i*width+(i % width)] = 1;
} }
@ -24,7 +28,7 @@ tensor_fromVertexBuffer(float *buffer, size_t bufsize) {
mat_width = bufsize; mat_width = bufsize;
mat = tensor_new(4, mat_width); mat = tensor_new(4, mat_width, 0);
for (int i = 0; i < bufsize; i++) { for (int i = 0; i < bufsize; i++) {
for (int j = 0; j < 4; j++) { for (int j = 0; j < 4; j++) {
@ -34,12 +38,17 @@ tensor_fromVertexBuffer(float *buffer, size_t bufsize) {
return mat; return mat;
} }
Tensor *
tensor_fromNeuralData(Neural_Data *nd) {
return NULL;
}
Tensor * Tensor *
tensor_multip(Tensor *mat2, Tensor *mat1) { tensor_multip(Tensor *mat2, Tensor *mat1) {
Tensor *result; Tensor *result;
float dot_prod; float dot_prod;
result = tensor_new(mat2->len, mat1->width); result = tensor_new(mat2->len, mat1->width, 0);
for (int i = 0; i < mat1->width; i++) { for (int i = 0; i < mat1->width; i++) {

View file