diff --git a/include/cx.h b/include/cx.h index 96579c0..6d2cfdf 100644 --- a/include/cx.h +++ b/include/cx.h @@ -25,11 +25,11 @@ // Declare functions -int cx_glinit(GLFWwindow **); -int cx_glrun(GLFWwindow *); +int cx_glinit(GLFWwindow **); int cx_nninit(Neural_Network **); -int cx_nnrun(Neural_Network *); + +int cx_run(GLFWwindow *, Neural_Network *); #endif diff --git a/include/neural.h b/include/neural.h index 9c43bb2..ab406ed 100644 --- a/include/neural.h +++ b/include/neural.h @@ -3,8 +3,8 @@ typedef struct _neuron { float value; - float *weights; // Biases of the neuron towards the next layer, - // NULL if output layer + float *synapses; // Synapses of the neuron towards the next layer, + // NULL if output layer } Neuron; typedef struct _neural_layer { diff --git a/src/cx.c b/src/cx.c index 599e41e..ca1af3b 100644 --- a/src/cx.c +++ b/src/cx.c @@ -97,7 +97,7 @@ cx_glinit(GLFWwindow **window) { glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE); // Open a window and create its OpenGL context - *window = glfwCreateWindow(1920, 1080, "CONTROL-X", NULL, NULL); + *window = glfwCreateWindow(1280, 720, "CONTROL-X", NULL, NULL); if (*window == NULL) { fprintf(stderr, "Failed to open GLFW window.\n"); glfwTerminate(); @@ -122,8 +122,19 @@ cx_glinit(GLFWwindow **window) { return 0; } +static int +cx_nnrun(Neural_Network *nn) { + + // Establish a neural interface. + float *input_buffer = malloc(64*sizeof(float)); + float *output_buffer; + + output_buffer = neural_process(nn, input_buffer); + return 0; +} + int -cx_glrun(GLFWwindow *window) { +cx_run(GLFWwindow *window, Neural_Network *nn) { Model *model; ModelRegistry *mr; Model *neural_network_model; @@ -137,19 +148,36 @@ cx_glrun(GLFWwindow *window) { // Establish a model registry mr = modelRegistry_new(); // Fill the model registry with mesh models - for (int j = 0; j < 8; j++) { - for (int i = 0; i < 64; i++) { + for (int j = 0; j < nn->layer_count; j++) { + Neural_Layer *nl = nn->layers[j]; + for (int i = 0; i < nl->layer_size; i++) { // Load model to render from file //Model *model = model_load("../3d_assets/triangle.obj"); + for (int k = 0; k < nl->layer_size_next; k++) { + model = model_line((-.90) + + ((GLfloat)2 * i * .90/(nl->layer_size-1)), + + .90 - ((GLfloat)2 * j *.90/(nn->layer_count)), + + (-.90) + + ((GLfloat)2 * k * .90/(nl->layer_size_next-1)), + + .90 - ((GLfloat)2 * (j+1) *.90/(nn->layer_count)), + + .001 // girth + ); + modelRegistry_register(mr, model); + } + model = model_circle(0, (GLfloat)1/64); GLfloat *translation_matrix = matrix_new(); GLfloat *aspectRatio_matrix = matrix_new(); aspectRatio_matrix[0] = (GLfloat)9/16; translation_matrix[3] = (((GLfloat)-1*16/9)*.90) - + ((GLfloat)1/32 * i * (((GLfloat)16/9))*.90); + + ((GLfloat)1/(nl->layer_size-1)*2 * i * (((GLfloat)16/9))*.90); - translation_matrix[7] = .90 - ((GLfloat)1/4 * j *.90); + translation_matrix[7] = .90 - ((GLfloat)1/(nn->layer_count)*2 * j *.90); model->transformations[0] = translation_matrix; model->transformations[1] = aspectRatio_matrix; @@ -157,11 +185,9 @@ cx_glrun(GLFWwindow *window) { model_colorWhite(model); modelRegistry_register(mr, model); + } } - model = model_line(-0.5, 0, 0.5, 0, 0.015); - modelRegistry_register(mr, model); - // Remainder from cursor experiments, might be useful later double xpos, ypos; @@ -199,14 +225,4 @@ cx_nninit(Neural_Network **nn) { return 0; } -int -cx_nnrun(Neural_Network *nn) { - - // Establish a neural interface. - float *input_buffer = malloc(64*sizeof(float)); - float *output_buffer; - - output_buffer = neural_process(nn, input_buffer); - return 0; -} diff --git a/src/main.c b/src/main.c index 29f1d7d..0157e1a 100644 --- a/src/main.c +++ b/src/main.c @@ -21,6 +21,6 @@ main(void) { return -1; } - retval = cx_glrun(window); + retval = cx_run(window, nn); return retval; } diff --git a/src/model.c b/src/model.c index 81f8280..eebe9e1 100644 --- a/src/model.c +++ b/src/model.c @@ -67,9 +67,7 @@ model_load(const char *path) { for (int j = 0; j < 3; j++) { for (int k = 0; k < 3; k++) { self->object[i*12+j*4+k] = vertices[(faces[i*3+j]-1)*3+k]; - printf("%f, ", vertices[(faces[i*3+j]-1)*3+k]); } - printf("\n"); self->object[i*12+j*4+3] = 1; } } @@ -201,9 +199,6 @@ model_line(float x1, float y1, float x2, float y2, float girth) { +(cos(M_PI/2) * y_diff)) / line_length * girth / 2; - printf("%f, ", normal_x); - printf("%f,\n", normal_y); - self = model_new(6); if (self == NULL) { @@ -213,38 +208,27 @@ model_line(float x1, float y1, float x2, float y2, float girth) { self->object[0] = x1 + normal_x; self->object[1] = y1 + normal_y; self->object[3] = 1; - printf("%f, ", self->object[0]); - printf("%f,\n", self->object[1]); self->object[4] = x1 - normal_x; self->object[5] = y1 - normal_y; self->object[7] = 1; - printf("%f, ", self->object[4]); - printf("%f,\n", self->object[5]); self->object[8] = x2 + normal_x; self->object[9] = y2 + normal_y; self->object[11] = 1; - printf("%f, ", self->object[8]); - printf("%f,\n", self->object[9]); self->object[12] = x1 - normal_x; self->object[13] = y1 - normal_y; self->object[15] = 1; - printf("%f, ", self->object[12]); - printf("%f,\n", self->object[13]); self->object[16] = x2 + normal_x; self->object[17] = y2 + normal_y; self->object[19] = 1; - printf("%f, ", self->object[16]); - printf("%f,\n", self->object[17]); self->object[20] = x2 - normal_x; self->object[21] = y2 - normal_y; self->object[23] = 1; - printf("%f, ", self->object[20]); - printf("%f,\n", self->object[21]); + return self; } diff --git a/src/neural.c b/src/neural.c index 5a2f2a6..0bde71e 100644 --- a/src/neural.c +++ b/src/neural.c @@ -7,7 +7,7 @@ nl_new(size_t layer_size, size_t layer_size_next) { self->neurons = calloc(layer_size, sizeof(Neuron)); for (int i = 0; i < layer_size; i++) { - self->neurons[i].weights = calloc(layer_size_next, sizeof(float)); + self->neurons[i].synapses = calloc(layer_size_next, sizeof(float)); } self->layer_size = layer_size; @@ -31,17 +31,21 @@ neural_new(size_t input_size, size_t output_size, size_t layer_count) { // The difference between layer sizes, hidden layers step between the two // sizes in linear fashion. ssize_t layer_diff; + ssize_t layer_step; + self->layer_count = layer_count; self->layers = malloc(layer_count * sizeof(Neural_Layer *)); layer_diff = (ssize_t) output_size - input_size; // Calculate sizes of individual layers and allocate them. for (int i = 0; i < layer_count; i++) { self->layers[i] = nl_new(input_size - + (layer_diff / ((ssize_t)layer_count-(i))), - input_size + - (layer_diff / ((ssize_t)layer_count-(i+1))) - ? i < i-1 : 0); + + (layer_diff * i / ((ssize_t)layer_count-1)), + + i < (layer_count-1) ? + (input_size + (layer_diff * (i+1) + / ((ssize_t)layer_count-1))) + : 0); } return self; @@ -57,7 +61,7 @@ neural_randomize(Neural_Network *self) { for (int i = 0; i < self->layer_count; i++) { nl = self->layers[i]; for (int j = 0; j < nl->layer_size; j++) { - fread(nl->neurons[j].weights, sizeof(float), nl->layer_size_next, f); + fread(nl->neurons[j].synapses, sizeof(float), nl->layer_size_next, f); } } } @@ -76,7 +80,7 @@ neural_process(Neural_Network *self, float *input) { for (int j = 0; j < nl->layer_size; j++) { // MATH GOES BRRRRRRRR dot_prod += nl->neurons[j].value - * nl->neurons[j].weights[j]; + * nl->neurons[j].synapses[j]; } }