Implement basis for thread management.

This commit is contained in:
Marcel Plch 2024-11-07 22:07:33 +01:00
parent 594b6ef722
commit 0b4cc27331
Signed by: dormouse
GPG key ID: 2CA77596BC4BDFFE
7 changed files with 38 additions and 21 deletions

View file

@ -1,5 +1,5 @@
# CMake entry point # CMake entry point
cmake_minimum_required (VERSION 3.27.1) cmake_minimum_required (VERSION 3.30.5)
project(CX C) project(CX C)
cmake_policy(SET CMP0072 NEW) cmake_policy(SET CMP0072 NEW)
@ -17,6 +17,7 @@ set(ALL_LIBS
glfw glfw
GLEW GLEW
m m
pthread
) )
set(CMAKE_C_FLAGS "-O0 -ggdb -Wall") set(CMAKE_C_FLAGS "-O0 -ggdb -Wall")

View file

@ -9,6 +9,7 @@
#include <time.h> #include <time.h>
#include <unistd.h> #include <unistd.h>
#include <stdint.h> #include <stdint.h>
#include <pthread.h>
// Include GLEW // Include GLEW
#include <GL/glew.h> #include <GL/glew.h>
@ -24,13 +25,27 @@
#include <shader.h> #include <shader.h>
#include <neural.h> #include <neural.h>
// Declare common data structures.
typedef struct _cx_thrgr {
pthread_t *group_manager;
pthread_t *workers;
} CX_ThreadGroup;
typedef struct _cx_ctx {
GLFWwindow *window;
Neural_Network *nn;
CX_ThreadGroup **threads;
} CX_Context;
// Declare functions // Declare functions
CX_Context *cx_context_new(void);
int cx_glinit(GLFWwindow **); int cx_glinit(GLFWwindow **);
int cx_nninit(Neural_Network **); int cx_nninit(Neural_Network **);
int cx_run(GLFWwindow *, Neural_Network *); int cx_run(CX_Context *);
#endif #endif

View file

@ -156,7 +156,7 @@ cx_nnrun(Neural_Network *nn) {
} }
int int
cx_run(GLFWwindow *window, Neural_Network *nn) { cx_run(CX_Context *cx_ctx) {
ModelRegistry *mr; ModelRegistry *mr;
GLuint VertexArrayID; GLuint VertexArrayID;
GLuint programID; GLuint programID;
@ -168,19 +168,19 @@ cx_run(GLFWwindow *window, Neural_Network *nn) {
// Establish a model registry // Establish a model registry
mr = modelRegistry_new(); mr = modelRegistry_new();
// Fill the model registry with mesh models // Fill the model registry with mesh models
neural_getMesh(nn, mr); neural_getMesh(cx_ctx->nn, mr);
// Remainder from cursor experiments, might be useful later // Remainder from cursor experiments, might be useful later
double xpos, ypos; double xpos, ypos;
glfwGetCursorPos(window, &xpos, &ypos); glfwGetCursorPos(cx_ctx->window, &xpos, &ypos);
do { do {
cx_glrender(window, programID, mr); cx_glrender(cx_ctx->window, programID, mr);
usleep(1000000/60); usleep(1000000/60);
// Check if the ESC key was pressed or the window was closed // Check if the ESC key was pressed or the window was closed
} while(glfwGetKey(window, GLFW_KEY_ESCAPE) != GLFW_PRESS } while(glfwGetKey(cx_ctx->window, GLFW_KEY_ESCAPE) != GLFW_PRESS
&& !glfwWindowShouldClose(window)); && !glfwWindowShouldClose(cx_ctx->window));
// Close OpenGL window and terminate GLFW // Close OpenGL window and terminate GLFW
glfwTerminate(); glfwTerminate();

View file

@ -1,27 +1,23 @@
// Include standard headers
#include <stdio.h>
#include <stdlib.h>
// Include project headers
#include <cx.h> #include <cx.h>
int int
main(void) { main(void) {
CX_Context *cx_ctx;
cx_ctx = calloc(1, sizeof(CX_Context));
GLFWwindow *window; GLFWwindow *window;
Neural_Network *nn; Neural_Network *nn;
int retval; int retval;
if (cx_glinit(&window)) { if (cx_glinit(&(cx_ctx->window))) {
return -1; return -1;
} }
if (cx_nninit(&nn)) { if (cx_nninit(&(cx_ctx->nn))) {
return -1; return -1;
} }
retval = cx_run(window, nn); retval = cx_run(cx_ctx);
return retval; return retval;
} }

View file

@ -28,6 +28,7 @@ neural_new(size_t input_size, size_t output_size, size_t layer_count) {
// Failed to allocate. // Failed to allocate.
return NULL; return NULL;
} }
// The difference between layer sizes, hidden layers step between the two // The difference between layer sizes, hidden layers step between the two
// sizes in linear fashion. // sizes in linear fashion.
ssize_t layer_diff; ssize_t layer_diff;

View file

@ -34,6 +34,10 @@ tensor_fromVertexBuffer(float *buffer, size_t bufsize) {
return mat; return mat;
} }
Tensor *
tensor_fromNeuralData(Neural_Data *nd) {
}
Tensor * Tensor *
tensor_multip(Tensor *mat2, Tensor *mat1) { tensor_multip(Tensor *mat2, Tensor *mat1) {
Tensor *result; Tensor *result;