This repository has been archived on 2024-05-05. You can view files and clone it, but cannot push or open issues or pull requests.
work-stealing-scheduler/src/sched.c

314 lines
8.4 KiB
C
Raw Normal View History

#include "../includes/sched.h"
2024-03-09 18:47:40 +01:00
2024-04-18 17:41:41 +02:00
#include <errno.h>
#include <pthread.h>
2024-03-15 12:43:44 +01:00
#include <stdio.h>
#include <stdlib.h>
2024-04-21 15:00:45 +02:00
#include <string.h>
2024-03-15 12:43:44 +01:00
struct task_info {
void *closure;
taskfunc f;
};
2024-04-18 17:41:41 +02:00
struct scheduler {
2024-04-21 15:03:00 +02:00
/* Taille des piles */
int qlen;
2024-04-19 15:33:03 +02:00
2024-04-21 15:02:31 +02:00
/* Mutex qui protège les piles */
2024-04-21 14:42:36 +02:00
pthread_mutex_t *mutex;
2024-04-19 15:33:03 +02:00
2024-04-19 16:24:54 +02:00
/* Nombre de threads instanciés */
int nthreads;
2024-04-21 15:02:31 +02:00
/* Piles de tâches */
2024-04-21 15:00:45 +02:00
struct task_info **tasks;
2024-04-19 15:33:03 +02:00
/* Liste des threads */
pthread_t *threads;
2024-04-21 15:02:31 +02:00
/* Positions actuelle dans la pile */
2024-04-21 14:55:36 +02:00
int *top;
};
2024-04-19 15:33:03 +02:00
/* Ordonnanceur partagé */
static struct scheduler sched;
2024-04-19 12:57:21 +02:00
/* Lance une tâche de la pile */
2024-04-19 16:27:58 +02:00
void *sched_worker(void *);
2024-04-19 15:33:03 +02:00
/* Nettoie les opérations effectuées par l'initialisation de l'ordonnanceur */
2024-04-21 14:42:36 +02:00
int sched_init_cleanup(int);
2024-04-21 17:37:54 +02:00
/* sched_spawn sur un thread spécifique */
2024-04-21 15:38:06 +02:00
int sched_spawn_core(taskfunc, void *, struct scheduler *, int);
2024-04-21 16:39:00 +02:00
/* Récupère l'index du thread courant */
2024-04-21 16:51:26 +02:00
int current_thread(struct scheduler *);
2024-04-21 16:39:00 +02:00
2024-04-19 15:33:03 +02:00
int
sched_init(int nthreads, int qlen, taskfunc f, void *closure)
{
2024-04-21 15:00:45 +02:00
sched.mutex = NULL;
sched.tasks = NULL;
sched.threads = NULL;
2024-04-21 15:00:45 +02:00
sched.top = NULL;
if(qlen <= 0) {
fprintf(stderr, "qlen must be greater than 0\n");
return -1;
2024-04-18 17:41:41 +02:00
}
sched.qlen = qlen;
2024-04-18 17:41:41 +02:00
if(nthreads < 0) {
fprintf(stderr, "nthreads must be greater than 0\n");
return -1;
} else if(nthreads == 0) {
nthreads = sched_default_threads();
}
2024-04-19 16:24:54 +02:00
sched.nthreads = nthreads;
2024-04-18 17:41:41 +02:00
2024-04-21 14:42:36 +02:00
// Initialisation des mutex de chaque processus
if(!(sched.mutex = malloc(sched.nthreads * sizeof(pthread_mutex_t)))) {
perror("Mutexes");
return sched_init_cleanup(-1);
}
for(int i = 0; i < sched.nthreads; ++i) {
if(pthread_mutex_init(&sched.mutex[i], NULL) != 0) {
fprintf(stderr, "Can't init mutex for thread %d\n", i);
return sched_init_cleanup(-1);
}
2024-04-19 13:48:50 +02:00
}
2024-04-18 17:41:41 +02:00
2024-04-21 14:55:36 +02:00
// Initialisation du curseur suivant l'état de la pile de chaque processus
if(!(sched.top = malloc(sched.nthreads * sizeof(int)))) {
perror("Cursor top stack\n");
return sched_init_cleanup(-1);
}
for(int i = 0; i < sched.nthreads; ++i) {
sched.top[i] = -1;
}
2024-04-21 14:42:36 +02:00
2024-04-21 15:00:45 +02:00
// Allocation mémoire pour la pile de chaque processus
if(!(sched.tasks = malloc(sched.nthreads * sizeof(struct task_info *)))) {
perror("Stack list");
2024-04-21 14:42:36 +02:00
return sched_init_cleanup(-1);
}
2024-04-21 15:00:45 +02:00
for(int i = 0; i < sched.nthreads; ++i) {
if(!(sched.tasks[i] = malloc(qlen * sizeof(struct task_info)))) {
fprintf(stderr, "Stack for thread %d: %s\n", i, strerror(errno));
return sched_init_cleanup(-1);
}
}
// Ajoute la tâche initiale
if(sched_spawn_core(f, closure, &sched, 0) < 0) {
fprintf(stderr, "Can't create the initial task\n");
return sched_init_cleanup(-1);
}
if(!(sched.threads = malloc(sched.nthreads * sizeof(pthread_t *)))) {
perror("Threads");
return sched_init_cleanup(-1);
}
2024-04-19 15:33:03 +02:00
for(int i = 0; i < nthreads; ++i) {
if(pthread_create(&sched.threads[i], NULL, sched_worker, &sched) != 0) {
2024-04-19 21:59:09 +02:00
fprintf(stderr, "Can't create the thread %d\n", i);
if(i > 0) {
fprintf(stderr, ", cancelling already created threads...\n");
for(int j = 0; j < i; ++j) {
if(pthread_cancel(sched.threads[j]) != 0) {
fprintf(stderr, "Can't cancel the thread %d\n", j);
}
}
} else {
fprintf(stderr, "\n");
}
2024-04-21 14:42:36 +02:00
return sched_init_cleanup(-1);
2024-04-19 15:33:03 +02:00
}
2024-04-18 17:41:41 +02:00
}
2024-04-19 15:33:03 +02:00
for(int i = 0; i < nthreads; ++i) {
if((pthread_join(sched.threads[i], NULL) != 0)) {
2024-04-19 15:33:03 +02:00
fprintf(stderr, "Can't wait the thread %d\n", i);
2024-04-21 14:42:36 +02:00
return sched_init_cleanup(-1);
2024-04-19 15:33:03 +02:00
}
2024-04-18 17:41:41 +02:00
}
2024-04-21 14:42:36 +02:00
return sched_init_cleanup(1);
}
int
2024-04-21 14:42:36 +02:00
sched_init_cleanup(int ret_code)
{
2024-04-21 14:42:36 +02:00
if(sched.mutex) {
for(int i = 0; i < sched.nthreads; ++i) {
pthread_mutex_destroy(&sched.mutex[i]);
}
free(sched.mutex);
sched.mutex = NULL;
}
if(sched.tasks) {
2024-04-21 15:00:45 +02:00
for(int i = 0; i < sched.nthreads; ++i) {
if(sched.tasks[i]) {
free(sched.tasks[i]);
sched.tasks[i] = NULL;
}
}
free(sched.tasks);
sched.tasks = NULL;
}
2024-04-19 21:16:04 +02:00
if(sched.threads) {
free(sched.threads);
sched.threads = NULL;
}
2024-04-21 14:55:36 +02:00
if(sched.top) {
free(sched.top);
sched.top = NULL;
}
return ret_code;
2024-04-18 17:41:41 +02:00
}
2024-04-19 15:33:03 +02:00
int
sched_spawn(taskfunc f, void *closure, struct scheduler *s)
{
2024-04-21 16:51:26 +02:00
int core;
if((core = current_thread(s)) < 0) {
fprintf(stderr, "Thread not in list, who am I?\n");
return -1;
}
2024-04-21 16:39:00 +02:00
// On ajoute la tâche sur la pile du thread courant
2024-04-21 16:51:26 +02:00
return sched_spawn_core(f, closure, s, core);
2024-04-21 16:39:00 +02:00
}
2024-04-21 15:38:06 +02:00
2024-04-21 16:39:00 +02:00
int
2024-04-21 16:51:26 +02:00
current_thread(struct scheduler *s)
2024-04-21 16:39:00 +02:00
{
2024-04-21 16:51:26 +02:00
pthread_t current = pthread_self();
for(int i = 0; i < s->nthreads; i++) {
if(pthread_equal(s->threads[i], current)) {
return i;
}
}
return -1;
2024-04-21 15:38:06 +02:00
}
int
sched_spawn_core(taskfunc f, void *closure, struct scheduler *s, int core)
{
// printf("%d locking (a)\n", core);
2024-04-21 15:38:06 +02:00
pthread_mutex_lock(&s->mutex[core]);
// printf("%d locked (a)\n", core);
2024-04-18 17:41:41 +02:00
2024-04-21 15:38:06 +02:00
if(s->top[core] + 1 >= s->qlen) {
// printf("%d unlock (a)\n", core);
2024-04-21 15:38:06 +02:00
pthread_mutex_unlock(&s->mutex[core]);
2024-04-19 15:33:03 +02:00
errno = EAGAIN;
fprintf(stderr, "Stack is full\n");
return -1;
}
2024-04-18 17:41:41 +02:00
2024-04-21 15:38:06 +02:00
s->top[core]++;
s->tasks[core][s->top[core]] = (struct task_info){closure, f};
// printf("%d unlock (a)\n", core);
2024-04-21 15:38:06 +02:00
pthread_mutex_unlock(&s->mutex[core]);
2024-04-18 17:41:41 +02:00
2024-04-19 15:33:03 +02:00
return 0;
2024-03-09 18:47:40 +01:00
}
void *
2024-04-19 16:27:58 +02:00
sched_worker(void *arg)
{
struct scheduler *s = (struct scheduler *)arg;
2024-04-21 16:51:26 +02:00
// Récupère le processus courant (index tableau)
2024-04-21 18:04:41 +02:00
int curr_th;
if((curr_th = current_thread(s)) < 0) {
fprintf(stderr, "Worker thread not tracked, exiting...\n");
return NULL;
}
2024-04-21 16:51:26 +02:00
while(1) {
// printf("%d locking (b)\n", curr_th);
2024-04-21 15:38:06 +02:00
pthread_mutex_lock(&s->mutex[curr_th]);
// printf("%d locked (b)\n", curr_th);
2024-04-21 18:04:41 +02:00
// Si rien à faire
2024-04-21 15:38:06 +02:00
if(s->top[curr_th] == -1) {
2024-04-21 18:04:41 +02:00
// Cherche un thread (avec le + de tâches en attente) à voler
int stolen = -1;
/* for(int i = 0, size = -1; i < s->nthreads; ++i) {
2024-04-21 18:04:41 +02:00
if(i == curr_th) {
// On ne se vole pas soi-même
continue;
}
printf("%d locking (c)\n", i);
2024-04-21 18:04:41 +02:00
pthread_mutex_lock(&s->mutex[i]);
printf("%d locked (c)\n", i);
2024-04-21 18:04:41 +02:00
if(s->top[i] > size) {
stolen = i;
size = s->top[i];
}
printf("%d unlock (c)\n", i);
2024-04-21 18:04:41 +02:00
pthread_mutex_unlock(&s->mutex[i]);
} */
2024-04-21 18:04:41 +02:00
// Vole une tâche à un autre thread
if(stolen >= 0) {
struct task_info theft;
// printf("%d locking (d)\n", stolen);
2024-04-21 18:04:41 +02:00
pthread_mutex_lock(&s->mutex[stolen]);
// printf("%d locked (d)\n", stolen);
2024-04-21 18:04:41 +02:00
// Actuellement on prend la tâche la plus ancienne en
// inversant la première et la dernière
// TODO: Récupérer la premiere tâche tout en respectant l'ordre
theft = s->tasks[stolen][0];
s->tasks[stolen][0] = s->tasks[stolen][s->top[stolen]];
s->top[stolen]--;
// printf("%d unlock (d)\n", stolen);
2024-04-21 18:04:41 +02:00
pthread_mutex_unlock(&s->mutex[stolen]);
// printf("%d unlock (b)\n", curr_th);
2024-04-21 18:04:41 +02:00
pthread_mutex_unlock(&s->mutex[curr_th]);
// Rajoute la tâche sur notre pile
sched_spawn_core(theft.f, theft.closure, s, curr_th);
2024-04-21 15:38:06 +02:00
continue;
}
pthread_mutex_unlock(&s->mutex[curr_th]);
// printf("%d se tire car R à faire\n", curr_th);
break;
}
// Extrait la tâche de la pile
2024-04-21 15:38:06 +02:00
taskfunc f = s->tasks[curr_th][s->top[curr_th]].f;
void *closure = s->tasks[curr_th][s->top[curr_th]].closure;
s->top[curr_th]--;
// printf("%d unlock (b)\n", curr_th);
2024-04-21 15:38:06 +02:00
pthread_mutex_unlock(&s->mutex[curr_th]);
// Exécute la tâche
f(closure, s);
}
return NULL;
}