Compare commits
No commits in common. "264fcb407ba1430a0eb112b13d708c6f5fda98a9" and "4b2db87c9e57cc5cfc4f1abf2c51b02ffd283cdc" have entirely different histories.
264fcb407b
...
4b2db87c9e
14 changed files with 155 additions and 528 deletions
|
@ -1,5 +1,5 @@
|
|||
# CMake entry point
|
||||
cmake_minimum_required(VERSION 3.31.0)
|
||||
cmake_minimum_required (VERSION 3.30.5)
|
||||
project(CX C)
|
||||
cmake_policy(SET CMP0072 NEW)
|
||||
|
||||
|
@ -20,7 +20,7 @@ set(ALL_LIBS
|
|||
pthread
|
||||
)
|
||||
|
||||
set(CMAKE_C_FLAGS "-O0 -ggdb -Wall -std=gnu99 -Wpedantic")
|
||||
set(CMAKE_C_FLAGS "-O0 -ggdb -Wall")
|
||||
|
||||
add_definitions(
|
||||
-DTW_STATIC
|
||||
|
@ -34,7 +34,6 @@ add_executable(
|
|||
cx
|
||||
src/main.c
|
||||
src/cx.c
|
||||
src/cx_thread.c
|
||||
src/tensor.c
|
||||
src/model.c
|
||||
src/shader.c
|
||||
|
|
|
@ -1,19 +0,0 @@
|
|||
# Issues
|
||||
|
||||
## Error handling
|
||||
|
||||
Some errors are being handled, some aren't, some are being handled
|
||||
partially and some errors (and/or their handling) might break the program
|
||||
before a proper return. some return values of library functions are being
|
||||
ignored altogether.
|
||||
|
||||
## Context handling
|
||||
|
||||
Context handling in it's current form relies on all
|
||||
context types to have a free() function stored
|
||||
on a specific place in the data structure.
|
||||
|
||||
This will most likely result in a segfault anytime
|
||||
a new structure is being used that is not properly
|
||||
aligned.
|
||||
|
46
include/cx.h
46
include/cx.h
|
@ -1,8 +1,6 @@
|
|||
#ifndef CX_H
|
||||
#define CX_H
|
||||
|
||||
#define __STDC_WANT_IEC_60559_BFP_EXT__
|
||||
|
||||
// Include standard headers
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
@ -12,8 +10,6 @@
|
|||
#include <unistd.h>
|
||||
#include <stdint.h>
|
||||
#include <pthread.h>
|
||||
#include <inttypes.h>
|
||||
#include <string.h>
|
||||
|
||||
// Include GLEW
|
||||
#include <GL/glew.h>
|
||||
|
@ -23,51 +19,43 @@
|
|||
#include <GLFW/glfw3.h>
|
||||
|
||||
// Include project headers
|
||||
#include <cx_thread.h>
|
||||
#include <tensor.h>
|
||||
#include <model.h>
|
||||
#include <tensor.h>
|
||||
#include <neural.h>
|
||||
#include <shader.h>
|
||||
#include <neural.h>
|
||||
|
||||
// Declare common data structures.
|
||||
|
||||
typedef struct _cx_gl_ctx {
|
||||
void (*free)(void *self);
|
||||
uint8_t master_lock;
|
||||
uint8_t *worker_locks;
|
||||
CX_ThreadGroup **workers;
|
||||
typedef struct _cx_thrd {
|
||||
pthread_t thread;
|
||||
void *ctx; // Arbitrary thread context
|
||||
} CX_Thread;
|
||||
|
||||
typedef struct _cx_thrgr {
|
||||
CX_Thread *group_manager;
|
||||
CX_Thread **workers;
|
||||
size_t worker_count;
|
||||
size_t worker_size;
|
||||
} CX_ThreadGroup;
|
||||
|
||||
typedef struct _cx_ctx {
|
||||
GLFWwindow *window;
|
||||
ModelRegistry *mr;
|
||||
Neural_Network *nn;
|
||||
CX_ThreadGroup **threads;
|
||||
GLuint *VertexArrayIDs;
|
||||
size_t VertexArray_count;
|
||||
size_t VertexArray_size;
|
||||
GLuint *programIDs;
|
||||
size_t ProgramID_count;
|
||||
size_t ProgramID_size;
|
||||
} CX_GL_CTX;
|
||||
|
||||
typedef struct _cx_nn_ctx {
|
||||
void (*free)(void *self);
|
||||
uint8_t master_lock;
|
||||
uint8_t *worker_locks;
|
||||
CX_ThreadGroup **workers;
|
||||
Neural_Network *nn;
|
||||
float *input_buffer;
|
||||
float *output_buffer;
|
||||
} CX_NN_CTX;
|
||||
|
||||
typedef struct _cx_ctx {
|
||||
CX_ThreadGroup **threads;
|
||||
CX_GL_CTX *gl_ctx;
|
||||
CX_NN_CTX *nn_ctx;
|
||||
} CX_Context;
|
||||
|
||||
// Declare functions
|
||||
|
||||
CX_Context *cx_context_new(void);
|
||||
|
||||
int cx_glinit(CX_GL_CTX **);
|
||||
int cx_glinit(GLFWwindow **);
|
||||
int cx_nninit(Neural_Network **);
|
||||
int cx_init(CX_Context **);
|
||||
|
||||
|
|
|
@ -1,20 +0,0 @@
|
|||
#ifndef CX_THREAD_H
|
||||
#define CX_THREAD_H
|
||||
|
||||
typedef struct _cx_thrd {
|
||||
pthread_t thread;
|
||||
void *ctx; // Arbitrary thread context
|
||||
} CX_Thread;
|
||||
|
||||
typedef struct _cx_thrgr {
|
||||
CX_Thread *group_manager;
|
||||
CX_Thread **workers;
|
||||
size_t worker_count;
|
||||
size_t worker_size;
|
||||
} CX_ThreadGroup;
|
||||
|
||||
CX_ThreadGroup *cx_threadGroup_new(void *(*)(void *), void *);
|
||||
void cx_threadGroup_free(CX_ThreadGroup *);
|
||||
|
||||
#endif
|
||||
|
|
@ -22,7 +22,7 @@ int modelRegistry_register(ModelRegistry *, Model *);
|
|||
void modelRegistry_free(ModelRegistry *);
|
||||
GLfloat * model_applyTransformations(Model *);
|
||||
void model_colorFromPosition(Model *);
|
||||
void model_colorXYZ(Model *, float R, float G, float B);
|
||||
void model_colorXYZ(Model *, int R, int G, int B);
|
||||
void model_colorRed(Model *);
|
||||
void model_colorGreen(Model *);
|
||||
void model_colorBlue(Model *);
|
||||
|
|
|
@ -26,13 +26,10 @@ typedef struct _neural_data {
|
|||
} Neural_Data;
|
||||
|
||||
Neural_Network *neural_new(size_t, size_t, size_t);
|
||||
void neural_free(Neural_Network *);
|
||||
void neural_randomize(Neural_Network *);
|
||||
float *neural_loadData(Neural_Network *, const char *);
|
||||
float *neural_process(Neural_Network *, float *);
|
||||
Neural_Data *neural_getData(Neural_Network *, size_t);
|
||||
int neural_getMesh(Neural_Network *, ModelRegistry *);
|
||||
char *neural_getXML(Neural_Network *);
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@ typedef struct _tensor {
|
|||
size_t width;
|
||||
} Tensor;
|
||||
|
||||
Tensor *tensor_new(size_t, size_t, int);
|
||||
Tensor *tensor_new(size_t, size_t);
|
||||
|
||||
Tensor *tensor_fromVertexBuffer(float *, size_t);
|
||||
|
||||
|
|
250
src/cx.c
250
src/cx.c
|
@ -1,5 +1,41 @@
|
|||
#include <cx.h>
|
||||
|
||||
static CX_Thread *
|
||||
cx_thread_new(void *(*target)(void *),
|
||||
void *ctx) {
|
||||
CX_Thread *self;
|
||||
int err;
|
||||
|
||||
self = malloc(sizeof(CX_Thread));
|
||||
if (!self) {
|
||||
goto err;
|
||||
}
|
||||
err = pthread_create(&self->thread, NULL, target, ctx);
|
||||
if (err) {
|
||||
goto err;
|
||||
}
|
||||
self->ctx = ctx;
|
||||
|
||||
err:
|
||||
free(self);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static CX_ThreadGroup *
|
||||
cx_threadGroup_new(void *(*target)(void *),
|
||||
void *ctx) {
|
||||
CX_ThreadGroup *self;
|
||||
|
||||
self = malloc(sizeof(CX_ThreadGroup));
|
||||
|
||||
self->group_manager = cx_thread_new(target, ctx);
|
||||
self->workers = malloc(8 * sizeof(CX_Thread *));
|
||||
self->worker_count = 0;
|
||||
self->worker_size = 8;
|
||||
|
||||
return self;
|
||||
}
|
||||
|
||||
static void
|
||||
cx_glBindBuffer(GLfloat *render_buffer, GLuint buffer_address,
|
||||
GLuint gl_index, GLint member_size, GLsizeiptr bufsize) {
|
||||
|
@ -80,58 +116,13 @@ cx_loadShaders(GLuint *VertexArrayID, GLuint *programID) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
gl_ctx_free(void *self) {
|
||||
CX_GL_CTX *gl_ctx;
|
||||
|
||||
gl_ctx = self;
|
||||
|
||||
if (gl_ctx) {
|
||||
free(gl_ctx->VertexArrayIDs);
|
||||
free(gl_ctx->programIDs);
|
||||
modelRegistry_free(gl_ctx->mr);
|
||||
}
|
||||
free(gl_ctx);
|
||||
}
|
||||
|
||||
void
|
||||
nn_ctx_free(void *self) {
|
||||
CX_NN_CTX *nn_ctx;
|
||||
|
||||
nn_ctx = self;
|
||||
|
||||
if (nn_ctx) {
|
||||
free(nn_ctx->input_buffer);
|
||||
free(nn_ctx->output_buffer);
|
||||
neural_free(nn_ctx->nn);
|
||||
}
|
||||
free(nn_ctx);
|
||||
}
|
||||
|
||||
int
|
||||
cx_glinit(CX_GL_CTX **gl_ctx) {
|
||||
// Initialize OpenGL context
|
||||
|
||||
(*gl_ctx)->VertexArrayIDs = calloc(1, sizeof(GLuint));
|
||||
if (!(*gl_ctx)->VertexArrayIDs) {
|
||||
goto err;
|
||||
}
|
||||
(*gl_ctx)->VertexArray_count = 0;
|
||||
(*gl_ctx)->VertexArray_size = 1;
|
||||
(*gl_ctx)->programIDs = calloc(1, sizeof(GLuint));
|
||||
if (!(*gl_ctx)->programIDs) {
|
||||
goto err;
|
||||
}
|
||||
(*gl_ctx)->ProgramID_count = 0;
|
||||
(*gl_ctx)->ProgramID_size = 1;
|
||||
|
||||
(*gl_ctx)->free = &gl_ctx_free;
|
||||
|
||||
cx_glinit(GLFWwindow **window) {
|
||||
// Initialise GLFW
|
||||
printf("Initializing OpenGL.\n");
|
||||
if(!glfwInit()) {
|
||||
fprintf(stderr, "Failed to initialize GLFW\n");
|
||||
goto err;
|
||||
return -1;
|
||||
}
|
||||
|
||||
glfwWindowHint(GLFW_SAMPLES, 4);
|
||||
|
@ -143,33 +134,29 @@ cx_glinit(CX_GL_CTX **gl_ctx) {
|
|||
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
|
||||
|
||||
// Open a window and create its OpenGL context
|
||||
(*gl_ctx)->window = glfwCreateWindow(1280, 720, "C-X", NULL, NULL);
|
||||
if ((*gl_ctx)->window == NULL) {
|
||||
*window = glfwCreateWindow(1280, 720, "C-X", NULL, NULL);
|
||||
if (*window == NULL) {
|
||||
fprintf(stderr, "Failed to open GLFW window.\n");
|
||||
glfwTerminate();
|
||||
goto err;
|
||||
return -1;
|
||||
}
|
||||
printf("Window created.\n");
|
||||
|
||||
glfwMakeContextCurrent((*gl_ctx)->window);
|
||||
glfwMakeContextCurrent(*window);
|
||||
|
||||
// Initialize GLEW
|
||||
if (glewInit() != GLEW_OK) {
|
||||
fprintf(stderr, "Failed to initialize GLEW\n");
|
||||
glfwTerminate();
|
||||
goto err;
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Ensure we can capture the escape key being pressed below
|
||||
glfwSetInputMode((*gl_ctx)->window, GLFW_STICKY_KEYS, GL_TRUE);
|
||||
glfwSetInputMode(*window, GLFW_STICKY_KEYS, GL_TRUE);
|
||||
|
||||
// Dark grey background
|
||||
glClearColor(0.15f, 0.15f, 0.15f, 0.0f);
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
return -1;
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -188,138 +175,79 @@ cx_nninit(Neural_Network **nn) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
master_thread(void *ctx) {
|
||||
}
|
||||
|
||||
int
|
||||
cx_init(CX_Context **cx_ctx) {
|
||||
CX_GL_CTX *gl_ctx;
|
||||
CX_NN_CTX *nn_ctx;
|
||||
|
||||
printf("Initializing CX.\n");
|
||||
|
||||
nn_ctx = calloc(1, sizeof(CX_NN_CTX));
|
||||
nn_ctx->free = &nn_ctx_free;
|
||||
printf("Initializing CX.");
|
||||
|
||||
*cx_ctx = calloc(1, sizeof(CX_Context));
|
||||
gl_ctx = calloc(1, sizeof(CX_GL_CTX));
|
||||
|
||||
(*cx_ctx)->gl_ctx = gl_ctx;
|
||||
(*cx_ctx)->nn_ctx = nn_ctx;
|
||||
|
||||
(*cx_ctx)->threads = calloc(1, sizeof(CX_ThreadGroup *));
|
||||
if (!(*cx_ctx)->threads) {
|
||||
goto err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
if ((*cx_ctx)->gl_ctx) {
|
||||
free((*cx_ctx)->gl_ctx->VertexArrayIDs);
|
||||
free((*cx_ctx)->gl_ctx->programIDs);
|
||||
free((*cx_ctx)->threads);
|
||||
}
|
||||
|
||||
free(*cx_ctx);
|
||||
(*cx_ctx)->VertexArrayIDs = calloc(1, sizeof(GLuint));
|
||||
(*cx_ctx)->VertexArray_count = 0;
|
||||
(*cx_ctx)->VertexArray_size = 1;
|
||||
(*cx_ctx)->programIDs = calloc(1, sizeof(GLuint));
|
||||
(*cx_ctx)->ProgramID_count = 0;
|
||||
(*cx_ctx)->ProgramID_size = 1;
|
||||
(*cx_ctx)->threads = calloc(1, sizeof(CX_ThreadGroup));
|
||||
|
||||
if (cx_glinit(&(*cx_ctx)->window)) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int
|
||||
cx_glrun(CX_GL_CTX *ctx) {
|
||||
// Remainder from cursor experiments, might be useful later
|
||||
double xpos, ypos;
|
||||
glfwGetCursorPos(ctx->window, &xpos, &ypos);
|
||||
|
||||
do {
|
||||
// Skip render step if context is locked.
|
||||
if (!ctx->master_lock) {
|
||||
cx_glrender(ctx->window, ctx->programIDs[0], ctx->mr);
|
||||
if (cx_nninit(&(*cx_ctx)->nn)) {
|
||||
return -1;
|
||||
}
|
||||
usleep(1000000/60);
|
||||
// Check if the ESC key was pressed or the window was closed
|
||||
} while(glfwGetKey(ctx->window, GLFW_KEY_ESCAPE) != GLFW_PRESS
|
||||
&& !glfwWindowShouldClose(ctx->window));
|
||||
|
||||
// Close OpenGL window and terminate GLFW
|
||||
glfwTerminate();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
cx_nnrun(CX_Thread *self) {
|
||||
cx_glrun() {
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
cx_nnrun(Neural_Network *nn) {
|
||||
|
||||
// Establish a neural interface.
|
||||
float *input_buffer = malloc(64*sizeof(float));
|
||||
float *output_buffer;
|
||||
CX_NN_CTX *ctx = self->ctx;
|
||||
|
||||
output_buffer = neural_process(ctx->nn, ctx->input_buffer);
|
||||
|
||||
ctx->output_buffer = output_buffer;
|
||||
output_buffer = neural_process(nn, input_buffer);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *
|
||||
cx_glthread(void *self) {
|
||||
CX_Thread *self_t = self;
|
||||
CX_GL_CTX *gl_ctx = self_t->ctx;
|
||||
|
||||
cx_glinit(&gl_ctx);
|
||||
|
||||
if (cx_loadShaders(gl_ctx->VertexArrayIDs, gl_ctx->programIDs)) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
cx_glrun(gl_ctx);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *
|
||||
cx_nnthread(void *self) {
|
||||
CX_Thread *self_t = self;
|
||||
CX_NN_CTX *nn_ctx = self_t->ctx;
|
||||
float *input, *output;
|
||||
char *export;
|
||||
|
||||
cx_nninit(&nn_ctx->nn);
|
||||
input = neural_loadData(nn_ctx->nn, "../training_data/0");
|
||||
|
||||
output = neural_process(nn_ctx->nn, input);
|
||||
|
||||
export = neural_getXML(nn_ctx->nn);
|
||||
|
||||
return export;
|
||||
}
|
||||
|
||||
int
|
||||
cx_run(CX_Context *ctx) {
|
||||
CX_ThreadGroup *tg[2];
|
||||
void *neural_xml;
|
||||
cx_run(CX_Context *cx_ctx) {
|
||||
ModelRegistry *mr;
|
||||
|
||||
if (cx_loadShaders(cx_ctx->VertexArrayIDs, cx_ctx->programIDs)) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Establish a model registry
|
||||
ctx->gl_ctx->mr = modelRegistry_new();
|
||||
ctx->gl_ctx->master_lock = 1;
|
||||
mr = modelRegistry_new();
|
||||
// Fill the model registry with mesh models
|
||||
neural_getMesh(cx_ctx->nn, mr);
|
||||
|
||||
tg[0] = cx_threadGroup_new(&cx_glthread, ctx->gl_ctx);
|
||||
|
||||
tg[1] = cx_threadGroup_new(&cx_nnthread, ctx->nn_ctx);
|
||||
|
||||
pthread_join(tg[1]->group_manager->thread, &neural_xml);
|
||||
|
||||
ctx->gl_ctx->master_lock = 0;
|
||||
|
||||
neural_getMesh(ctx->nn_ctx->nn, ctx->gl_ctx->mr);
|
||||
// Remainder from cursor experiments, might be useful later
|
||||
double xpos, ypos;
|
||||
glfwGetCursorPos(cx_ctx->window, &xpos, &ypos);
|
||||
|
||||
|
||||
pthread_join(tg[0]->group_manager->thread, NULL);
|
||||
do {
|
||||
cx_glrender(cx_ctx->window, cx_ctx->programIDs[0], mr);
|
||||
usleep(1000000/60);
|
||||
// Check if the ESC key was pressed or the window was closed
|
||||
} while(glfwGetKey(cx_ctx->window, GLFW_KEY_ESCAPE) != GLFW_PRESS
|
||||
&& !glfwWindowShouldClose(cx_ctx->window));
|
||||
|
||||
|
||||
cx_threadGroup_free(tg[0]);
|
||||
cx_threadGroup_free(tg[1]);
|
||||
|
||||
free(ctx->threads);
|
||||
free(ctx);
|
||||
free(neural_xml);
|
||||
// Close OpenGL window and terminate GLFW
|
||||
glfwTerminate();
|
||||
modelRegistry_free(mr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1,64 +0,0 @@
|
|||
#include <cx.h>
|
||||
|
||||
CX_Thread *
|
||||
cx_thread_new(void *(*target)(void *),
|
||||
void *ctx) {
|
||||
CX_Thread *self;
|
||||
int err;
|
||||
|
||||
self = malloc(sizeof(CX_Thread));
|
||||
if (!self) {
|
||||
goto err;
|
||||
}
|
||||
self->ctx = ctx;
|
||||
err = pthread_create(&self->thread, NULL, target, self);
|
||||
if (err) {
|
||||
goto err;
|
||||
}
|
||||
|
||||
return self;
|
||||
|
||||
err:
|
||||
free(self);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void
|
||||
cx_thread_free(CX_Thread *self) {
|
||||
if (self) {
|
||||
/* TODO */
|
||||
/* This is naive in its current form and will shatter
|
||||
* sooner or later.
|
||||
* Fix the context structures so that this call
|
||||
* is guaranteed not to touch invalid memory.
|
||||
*/
|
||||
((CX_GL_CTX *)self->ctx)->free(self->ctx);
|
||||
}
|
||||
free(self);
|
||||
}
|
||||
|
||||
CX_ThreadGroup *
|
||||
cx_threadGroup_new(void *(*target)(void *),
|
||||
void *ctx) {
|
||||
CX_ThreadGroup *self;
|
||||
|
||||
self = malloc(sizeof(CX_ThreadGroup));
|
||||
|
||||
self->workers = malloc(8 * sizeof(CX_Thread *));
|
||||
self->worker_count = 0;
|
||||
self->worker_size = 8;
|
||||
|
||||
self->group_manager = cx_thread_new(target, ctx);
|
||||
|
||||
return self;
|
||||
}
|
||||
|
||||
void
|
||||
cx_threadGroup_free(CX_ThreadGroup *self) {
|
||||
if (self) {
|
||||
cx_thread_free(self->group_manager);
|
||||
free(self->workers);
|
||||
}
|
||||
free(self);
|
||||
}
|
||||
|
|
@ -4,8 +4,10 @@ int
|
|||
main(void) {
|
||||
// CX context (Window, neural network, threads.)
|
||||
CX_Context *cx_ctx;
|
||||
|
||||
int retval;
|
||||
|
||||
|
||||
if (cx_init(&cx_ctx)) {
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -92,7 +92,6 @@ model_applyTransformations(Model *self) {
|
|||
if (!self->transformation_count) {
|
||||
retval = malloc(self->bufsize * 4 * sizeof(GLfloat));
|
||||
memcpy(retval, self->object, self->bufsize * 4 * sizeof(GLfloat));
|
||||
tensor_free(temp_buffer[1]);
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
@ -110,7 +109,6 @@ model_applyTransformations(Model *self) {
|
|||
->data[j*temp_buffer[(i+1)%2]->width+k];
|
||||
}
|
||||
}
|
||||
tensor_free(temp_buffer[(i+1)%2]);
|
||||
return retval;
|
||||
|
||||
}
|
||||
|
@ -129,7 +127,7 @@ model_colorFromPosition(Model *self) {
|
|||
}
|
||||
}
|
||||
|
||||
void model_colorXYZ(Model *self, float R, float G, float B) {
|
||||
void model_colorXYZ(Model *self, int R, int G, int B) {
|
||||
for (int i = 0; i < self->bufsize; i++) {
|
||||
for (int j = 0; j < 4; j++) {
|
||||
switch(j) {
|
||||
|
|
232
src/neural.c
232
src/neural.c
|
@ -17,12 +17,7 @@ nl_new(size_t layer_size, size_t layer_size_next) {
|
|||
|
||||
static void
|
||||
nl_free(Neural_Layer *self) {
|
||||
if (self) {
|
||||
for (int i = 0; i < self->layer_size; i++) {
|
||||
free(self->neurons[i].synapses);
|
||||
}
|
||||
free(self->neurons);
|
||||
}
|
||||
free(self);
|
||||
}
|
||||
|
||||
|
@ -44,8 +39,8 @@ neural_new(size_t input_size, size_t output_size, size_t layer_count) {
|
|||
|
||||
// Calculate sizes of individual layers and allocate them.
|
||||
for (int i = 0; i < layer_count; i++) {
|
||||
self->layers[i] = nl_new(input_size + (layer_diff * i
|
||||
/ ((ssize_t)layer_count-1)),
|
||||
self->layers[i] = nl_new(input_size
|
||||
+ (layer_diff * i / ((ssize_t)layer_count-1)),
|
||||
|
||||
i < (layer_count-1) ?
|
||||
(input_size + (layer_diff * (i+1)
|
||||
|
@ -56,23 +51,13 @@ neural_new(size_t input_size, size_t output_size, size_t layer_count) {
|
|||
return self;
|
||||
}
|
||||
|
||||
void
|
||||
neural_free(Neural_Network *self) {
|
||||
if (self) {
|
||||
for (int i = 0; i < self->layer_count; i++) {
|
||||
nl_free(self->layers[i]);
|
||||
}
|
||||
free(self->layers);
|
||||
}
|
||||
free(self);
|
||||
}
|
||||
|
||||
void
|
||||
neural_randomize(Neural_Network *self) {
|
||||
FILE *f;
|
||||
Neural_Layer *nl;
|
||||
uint64_t *rand_vals;
|
||||
|
||||
|
||||
f = fopen("/dev/urandom", "r");
|
||||
|
||||
for (int i = 0; i < self->layer_count; i++) {
|
||||
|
@ -82,7 +67,7 @@ neural_randomize(Neural_Network *self) {
|
|||
fread(rand_vals, sizeof(uint64_t),
|
||||
nl->layer_size_next, f);
|
||||
for (int k = 0; k < nl->layer_size_next; k++) {
|
||||
nl->neurons[j].synapses[k] = (float)rand_vals[k] / UINT64_MAX / nl->layer_size;
|
||||
nl->neurons[j].synapses[k] = (float)rand_vals[k] / UINT64_MAX;
|
||||
}
|
||||
free(rand_vals);
|
||||
}
|
||||
|
@ -120,7 +105,6 @@ neural_loadData(Neural_Network *self, const char *filename) {
|
|||
return NULL;
|
||||
break;
|
||||
}
|
||||
read_cursor++;
|
||||
}
|
||||
return retval;
|
||||
}
|
||||
|
@ -134,10 +118,10 @@ neural_process(Neural_Network *self, float *input) {
|
|||
for (int i = 0; i < self->layers[0]->layer_size; i++) {
|
||||
nl->neurons[i].value = input[i];
|
||||
}
|
||||
neural_vector = tensor_new(1, nl->layer_size);
|
||||
for (int i = 0; i < self->layer_count; i++) {
|
||||
neural_vector = tensor_new(nl->layer_size, 1, 0);
|
||||
nl = self->layers[i];
|
||||
synapse_matrix = tensor_new(nl->layer_size_next, nl->layer_size, 0);
|
||||
synapse_matrix = tensor_new(nl->layer_size_next, nl->layer_size);
|
||||
for (int j = 0; j < nl->layer_size; j++) {
|
||||
neural_vector->data[j] = nl->neurons[j].value;
|
||||
for (int k = 0; k < nl->layer_size_next; k++) {
|
||||
|
@ -146,16 +130,9 @@ neural_process(Neural_Network *self, float *input) {
|
|||
}
|
||||
|
||||
temp_buffer = tensor_multip(synapse_matrix, neural_vector);
|
||||
neural_vector = temp_buffer;
|
||||
if (nl->layer_size_next) {
|
||||
Neural_Layer *nl_next = self->layers[i+1];
|
||||
for (int j = 0; j < nl_next->layer_size; j++) {
|
||||
nl_next->neurons[j].value = neural_vector->data[j];
|
||||
}
|
||||
|
||||
}
|
||||
tensor_free(neural_vector);
|
||||
tensor_free(synapse_matrix);
|
||||
neural_vector = temp_buffer;
|
||||
}
|
||||
|
||||
retval = malloc(nl->layer_size * sizeof(float));
|
||||
|
@ -166,81 +143,48 @@ neural_process(Neural_Network *self, float *input) {
|
|||
return retval;
|
||||
}
|
||||
|
||||
// These two will be merged into one once I have
|
||||
// enough patience to create more dynamic objects.
|
||||
static void *
|
||||
neural_backpropagation(Neural_Network *self, int neuron, int layer, float ratio) {
|
||||
Neural_Layer *nl;
|
||||
Neural_Data *nd;
|
||||
float *ratios;
|
||||
int *neurons;
|
||||
float *synapses;
|
||||
|
||||
|
||||
for (int i = layer-1; i >= 0; i--) {
|
||||
nl = self->layers[i];
|
||||
for (int j = 0; j < nl->layer_size; j++) {
|
||||
synapses = nl->neurons[j].synapses;
|
||||
for (int k = 0; k < nl->layer_size_next; i++) {
|
||||
synapses[k] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
neural_backprop_up(Neural_Network *self, size_t neuron, size_t layer) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *
|
||||
neural_backprop_down(Neural_Network *self, size_t neuron, size_t layer) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int
|
||||
neural_train(Neural_Network *self,
|
||||
const char *input_path,
|
||||
const float *expected_result) {
|
||||
Neural_Data *input_data; // What the neural network received
|
||||
Neural_Data *result_data; // What the neural network computed
|
||||
float backprop_ratio;
|
||||
|
||||
for (int i = self->layer_count-1; i >= 0; i--) {
|
||||
Neural_Layer *nl = self->layers[i];
|
||||
result_data = neural_getData(self, i);
|
||||
|
||||
for (int j = nl->layer_size-1; j >= 0; j--) {
|
||||
backprop_ratio = nl->neurons[i].value / expected_result[i];
|
||||
neural_backpropagation(self, j, i, backprop_ratio);
|
||||
}
|
||||
}
|
||||
input_data = neural_getData(self, 0);
|
||||
result_data = neural_getData(self, self->layer_count-1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
Neural_Data *
|
||||
neural_data_new(int layer_size, int layer_size_next) {
|
||||
Neural_Data *self;
|
||||
|
||||
self = calloc(1, sizeof(Neural_Data));
|
||||
self->neural_vector = malloc(layer_size * sizeof(float));
|
||||
self->vect_len = layer_size;
|
||||
|
||||
if (layer_size_next) {
|
||||
self->synapse_matrix = malloc(layer_size * layer_size_next
|
||||
* sizeof(float));
|
||||
self->mat_len = layer_size_next;
|
||||
}
|
||||
return self;
|
||||
}
|
||||
|
||||
Neural_Data *
|
||||
neural_getData(Neural_Network *self, size_t layer) {
|
||||
Neural_Layer *nl;
|
||||
Neural_Data *retval;
|
||||
|
||||
retval = malloc(1 * sizeof(Neural_Data));
|
||||
|
||||
nl = self->layers[layer];
|
||||
|
||||
retval = neural_data_new(nl->layer_size, nl->layer_size_next);
|
||||
|
||||
retval->neural_vector = malloc(nl->layer_size * sizeof(float));
|
||||
retval->vect_len = nl->layer_size;
|
||||
if (!nl->layer_size_next) {
|
||||
retval->synapse_matrix = NULL;
|
||||
retval->mat_len = 0;
|
||||
}
|
||||
else {
|
||||
retval->synapse_matrix = malloc(nl->layer_size * nl->layer_size_next
|
||||
* sizeof(float));
|
||||
for (int i = 0; i < nl->layer_size; i++) {
|
||||
for (int j = 0; j < nl->layer_size_next; j++) {
|
||||
retval->synapse_matrix[i*j+i] = nl->neurons[i].synapses[j];
|
||||
|
@ -261,7 +205,7 @@ neural_getMesh(Neural_Network *nn, ModelRegistry *mr) {
|
|||
for (int j = 0; j < nn->layer_count; j++) {
|
||||
Neural_Layer *nl = nn->layers[j];
|
||||
for (int i = 0; i < nl->layer_size; i++) {
|
||||
float brightness;
|
||||
unsigned int brightness;
|
||||
for (int k = 0; k < nl->layer_size_next; k++) {
|
||||
model = model_line((-.90)
|
||||
+ ((GLfloat)2 * i * .90/(nl->layer_size-1)),
|
||||
|
@ -275,7 +219,7 @@ neural_getMesh(Neural_Network *nn, ModelRegistry *mr) {
|
|||
|
||||
.001 // girth
|
||||
);
|
||||
brightness = nl->neurons[i].synapses[k];
|
||||
brightness = nl->neurons[i].synapses[k] * 255;
|
||||
if (brightness) {
|
||||
model_colorXYZ(model, brightness, 0, 0);
|
||||
}
|
||||
|
@ -283,19 +227,16 @@ neural_getMesh(Neural_Network *nn, ModelRegistry *mr) {
|
|||
}
|
||||
|
||||
model = model_circle(0, (GLfloat)1/64);
|
||||
brightness = nl->neurons[i].value <= 1.0 ?
|
||||
nl->neurons[i].value : 1.0;
|
||||
brightness = nl->neurons[i].value <= 1.0 ? nl->neurons[i].value : 255;
|
||||
model_colorXYZ(model, 0, brightness, 0);
|
||||
Tensor *translation_matrix = tensor_new(4, 4, 1);
|
||||
Tensor *aspectRatio_matrix = tensor_new(4, 4, 1);
|
||||
Tensor *translation_matrix = tensor_new(4, 4);
|
||||
Tensor *aspectRatio_matrix = tensor_new(4, 4);
|
||||
aspectRatio_matrix->data[0] = (GLfloat)9/16;
|
||||
|
||||
translation_matrix->data[3] = (((GLfloat)-1*16/9)*.90)
|
||||
+ ((GLfloat)1/(nl->layer_size-1)
|
||||
* 2 * i * (((GLfloat)16/9))*.90);
|
||||
+ ((GLfloat)1/(nl->layer_size-1)*2 * i * (((GLfloat)16/9))*.90);
|
||||
|
||||
translation_matrix->data[7] = .90 - ((GLfloat)1/(nn->layer_count)
|
||||
* 2 * j *.90);
|
||||
translation_matrix->data[7] = .90 - ((GLfloat)1/(nn->layer_count)*2 * j *.90);
|
||||
|
||||
model->transformations[0] = translation_matrix;
|
||||
model->transformations[1] = aspectRatio_matrix;
|
||||
|
@ -309,122 +250,3 @@ neural_getMesh(Neural_Network *nn, ModelRegistry *mr) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
static char*
|
||||
indented_line(char *str, const char *line, int *indent) {
|
||||
for (int m = 0; m < *indent; m++)
|
||||
str = strcat(str, " ");
|
||||
str = strcat(str, line);
|
||||
|
||||
return str;
|
||||
|
||||
|
||||
}
|
||||
|
||||
static char*
|
||||
indented_tag(char *str, const char *tag, int *indent) {
|
||||
if (tag[1] == '/') {
|
||||
*indent -= 4;
|
||||
}
|
||||
|
||||
indented_line(str, tag, indent);
|
||||
|
||||
if (tag[1] != '/') {
|
||||
*indent += 4;
|
||||
}
|
||||
|
||||
return str;
|
||||
}
|
||||
|
||||
// TODO
|
||||
/* This XML implementation has potential bugs and has not
|
||||
* been checked very thoroughly, fix, please.
|
||||
*/
|
||||
char *
|
||||
neural_getXML(Neural_Network *nn) {
|
||||
char *retval;
|
||||
const char *to_write;
|
||||
int volume = 0;
|
||||
int indent = 0;
|
||||
|
||||
retval = malloc(0xff * sizeof(char));
|
||||
|
||||
to_write = "<?xml version=\"1.0\"?>\n\n";
|
||||
retval = strcpy(retval, to_write);
|
||||
to_write = "<Network>\n";
|
||||
retval = indented_tag(retval, to_write, &indent);
|
||||
|
||||
for (int i = 0; i < nn->layer_count; i++) {
|
||||
Neural_Layer *nl;
|
||||
Neural_Data *nd;
|
||||
char *line_prep;
|
||||
|
||||
nl = nn->layers[i];
|
||||
nd = neural_getData(nn, i);
|
||||
|
||||
retval = realloc(retval, strlen(retval)
|
||||
+ (nl->layer_size * 32 * nl->layer_size_next)// Matrix
|
||||
+ (nl->layer_size * 32) // Vector
|
||||
+ 0x3ff * nl->layer_size // Expected tag garbage.
|
||||
+ indent); // Space waster
|
||||
|
||||
to_write = "<Layer>\n";
|
||||
retval = indented_tag(retval, to_write, &indent);
|
||||
|
||||
to_write = "<Synapse_Matrix>\n";
|
||||
retval = indented_tag(retval, to_write, &indent);
|
||||
for (int j = 0; j < nd->mat_len; j++) {
|
||||
char number_buffer[32];
|
||||
line_prep = malloc((nl->layer_size * 32 * nl->layer_size_next) // Matrix
|
||||
+ (nl->layer_size * 32));
|
||||
*line_prep = '\0';
|
||||
|
||||
line_prep = strcat(line_prep, "[ ");
|
||||
for (int k = 0; k < nd->vect_len; k++) {
|
||||
|
||||
strfromf(number_buffer, 32, "%.2f ", nd->synapse_matrix[k+j*nd->mat_len]);
|
||||
line_prep = strcat(line_prep, number_buffer);
|
||||
if (k < nd->vect_len - 1) {
|
||||
line_prep = strcat(line_prep, ", ");
|
||||
}
|
||||
|
||||
}
|
||||
line_prep = strcat(line_prep, " ]\n");
|
||||
retval = indented_line(retval, line_prep, &indent);
|
||||
free(line_prep);
|
||||
}
|
||||
to_write = "</Synapse_Matrix>\n";
|
||||
retval = indented_tag(retval, to_write, &indent);
|
||||
|
||||
|
||||
to_write = "<Neural_Vector>\n";
|
||||
retval = indented_tag(retval, to_write, &indent);
|
||||
char number_buffer[32];
|
||||
line_prep = malloc((nl->layer_size * 32 * nl->layer_size_next) // Matrix
|
||||
+ (nl->layer_size * 32));
|
||||
*line_prep = '\0';
|
||||
line_prep = strcat(line_prep, "[ ");
|
||||
|
||||
for (int k = 0; k < nd->vect_len; k++) {
|
||||
strfromf(number_buffer, 32, "%.4f", nd->neural_vector[k]);
|
||||
line_prep = strcat(line_prep, number_buffer);
|
||||
|
||||
if (k < nd->vect_len - 1) {
|
||||
line_prep = strcat(line_prep, ", ");
|
||||
}
|
||||
|
||||
}
|
||||
line_prep = strcat(line_prep, " ]\n");
|
||||
retval = indented_line(retval, line_prep, &indent);
|
||||
free(line_prep);
|
||||
to_write = "</Neural_Vector>\n";
|
||||
retval = indented_tag(retval, to_write, &indent);
|
||||
|
||||
to_write = "</Layer>\n";
|
||||
retval = indented_tag(retval, to_write, &indent);
|
||||
}
|
||||
to_write = "</Network>\n";
|
||||
retval = indented_tag(retval, to_write, &indent);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
|
10
src/tensor.c
10
src/tensor.c
|
@ -1,7 +1,7 @@
|
|||
#include "cx.h"
|
||||
|
||||
Tensor *
|
||||
tensor_new(size_t len, size_t width, int is_identity) {
|
||||
tensor_new(size_t len, size_t width) {
|
||||
Tensor *mat;
|
||||
|
||||
mat = malloc(1 * sizeof(Tensor));
|
||||
|
@ -10,10 +10,6 @@ tensor_new(size_t len, size_t width, int is_identity) {
|
|||
mat->len = len;
|
||||
mat->width = width;
|
||||
|
||||
if (!is_identity) {
|
||||
return mat;
|
||||
}
|
||||
|
||||
for (int i = 0; i < len; i++) {
|
||||
mat->data[i*width+(i % width)] = 1;
|
||||
}
|
||||
|
@ -28,7 +24,7 @@ tensor_fromVertexBuffer(float *buffer, size_t bufsize) {
|
|||
|
||||
mat_width = bufsize;
|
||||
|
||||
mat = tensor_new(4, mat_width, 0);
|
||||
mat = tensor_new(4, mat_width);
|
||||
|
||||
for (int i = 0; i < bufsize; i++) {
|
||||
for (int j = 0; j < 4; j++) {
|
||||
|
@ -48,7 +44,7 @@ tensor_multip(Tensor *mat2, Tensor *mat1) {
|
|||
Tensor *result;
|
||||
float dot_prod;
|
||||
|
||||
result = tensor_new(mat2->len, mat1->width, 0);
|
||||
result = tensor_new(mat2->len, mat1->width);
|
||||
|
||||
for (int i = 0; i < mat1->width; i++) {
|
||||
|
||||
|
|
Loading…
Reference in a new issue