This repository has been archived on 2024-05-05. You can view files and clone it, but cannot push or open issues or pull requests.
work-stealing-scheduler/src/sched.c

244 lines
6.1 KiB
C
Raw Normal View History

#include "../includes/sched.h"
2024-03-09 18:47:40 +01:00
2024-04-18 17:41:41 +02:00
#include <errno.h>
2024-03-15 12:43:44 +01:00
#include <stdio.h>
#include <stdlib.h>
2024-04-21 15:00:45 +02:00
#include <string.h>
2024-03-15 12:43:44 +01:00
struct task_info {
void *closure;
taskfunc f;
};
2024-04-18 17:41:41 +02:00
struct scheduler {
2024-04-21 15:02:40 +02:00
/* Indicateurs de changement d'état */
pthread_cond_t *cond;
2024-04-19 15:33:03 +02:00
/* Taille de la pile */
int qlen;
2024-04-19 15:33:03 +02:00
2024-04-21 15:02:31 +02:00
/* Mutex qui protège les piles */
2024-04-21 14:42:36 +02:00
pthread_mutex_t *mutex;
2024-04-19 15:33:03 +02:00
2024-04-19 16:24:54 +02:00
/* Nombre de threads instanciés */
int nthreads;
/* Nombre de threads en attente */
int nthsleep;
2024-04-19 15:33:03 +02:00
2024-04-21 15:02:31 +02:00
/* Piles de tâches */
2024-04-21 15:00:45 +02:00
struct task_info **tasks;
2024-04-19 15:33:03 +02:00
2024-04-21 15:02:31 +02:00
/* Positions actuelle dans la pile */
2024-04-21 14:55:36 +02:00
int *top;
};
2024-04-19 15:33:03 +02:00
/* Ordonnanceur partagé */
static struct scheduler sched;
2024-04-19 12:57:21 +02:00
/* Lance une tâche de la pile */
2024-04-19 16:27:58 +02:00
void *sched_worker(void *);
2024-04-19 15:33:03 +02:00
/* Nettoie les opérations effectuées par l'initialisation de l'ordonnanceur */
2024-04-21 14:42:36 +02:00
int sched_init_cleanup(int);
2024-04-19 15:33:03 +02:00
int
sched_init(int nthreads, int qlen, taskfunc f, void *closure)
{
sched.cond = NULL;
2024-04-21 15:00:45 +02:00
sched.mutex = NULL;
sched.tasks = NULL;
2024-04-21 15:00:45 +02:00
sched.top = NULL;
if(qlen <= 0) {
fprintf(stderr, "qlen must be greater than 0\n");
return -1;
2024-04-18 17:41:41 +02:00
}
sched.qlen = qlen;
2024-04-18 17:41:41 +02:00
if(nthreads < 0) {
fprintf(stderr, "nthreads must be greater than 0\n");
return -1;
} else if(nthreads == 0) {
nthreads = sched_default_threads();
}
2024-04-19 16:24:54 +02:00
sched.nthreads = nthreads;
2024-04-18 17:41:41 +02:00
2024-04-21 14:36:54 +02:00
sched.nthsleep = 0;
2024-04-21 14:42:36 +02:00
// Initialisation des mutex de chaque processus
if(!(sched.mutex = malloc(sched.nthreads * sizeof(pthread_mutex_t)))) {
perror("Mutexes");
return sched_init_cleanup(-1);
}
for(int i = 0; i < sched.nthreads; ++i) {
if(pthread_mutex_init(&sched.mutex[i], NULL) != 0) {
fprintf(stderr, "Can't init mutex for thread %d\n", i);
return sched_init_cleanup(-1);
}
2024-04-19 13:48:50 +02:00
}
2024-04-18 17:41:41 +02:00
// Initialisation des variables de conditions de chaque processus
if(!(sched.cond = malloc(sched.nthreads * sizeof(pthread_cond_t)))) {
perror("Variable conditions");
2024-04-21 14:42:36 +02:00
return sched_init_cleanup(-1);
}
for(int i = 0; i < sched.nthreads; ++i) {
if(pthread_cond_init(&sched.cond[i], NULL) != 0) {
fprintf(stderr, "Can't init condition variable for thread %d\n", i);
2024-04-21 14:42:36 +02:00
return sched_init_cleanup(-1);
}
2024-04-19 15:33:03 +02:00
}
2024-03-09 18:47:40 +01:00
2024-04-21 14:55:36 +02:00
// Initialisation du curseur suivant l'état de la pile de chaque processus
if(!(sched.top = malloc(sched.nthreads * sizeof(int)))) {
perror("Cursor top stack\n");
return sched_init_cleanup(-1);
}
for(int i = 0; i < sched.nthreads; ++i) {
sched.top[i] = -1;
}
2024-04-21 14:42:36 +02:00
2024-04-21 15:00:45 +02:00
// Allocation mémoire pour la pile de chaque processus
if(!(sched.tasks = malloc(sched.nthreads * sizeof(struct task_info *)))) {
perror("Stack list");
2024-04-21 14:42:36 +02:00
return sched_init_cleanup(-1);
}
2024-04-21 15:00:45 +02:00
for(int i = 0; i < sched.nthreads; ++i) {
if(!(sched.tasks[i] = malloc(qlen * sizeof(struct task_info)))) {
fprintf(stderr, "Stack for thread %d: %s\n", i, strerror(errno));
return sched_init_cleanup(-1);
}
}
2024-04-19 15:33:03 +02:00
pthread_t threads[nthreads];
for(int i = 0; i < nthreads; ++i) {
2024-04-19 16:27:58 +02:00
if(pthread_create(&threads[i], NULL, sched_worker, &sched) != 0) {
2024-04-19 21:59:09 +02:00
fprintf(stderr, "Can't create the thread %d\n", i);
if(i > 0) {
fprintf(stderr, ", cancelling already created threads...\n");
for(int j = 0; j < i; ++j) {
if(pthread_cancel(threads[j]) != 0) {
fprintf(stderr, "Can't cancel the thread %d\n", j);
}
}
} else {
fprintf(stderr, "\n");
}
2024-04-21 14:42:36 +02:00
return sched_init_cleanup(-1);
2024-04-19 15:33:03 +02:00
}
2024-04-18 17:41:41 +02:00
}
if(sched_spawn(f, closure, &sched) < 0) {
2024-04-19 15:33:03 +02:00
fprintf(stderr, "Can't create the initial task\n");
2024-04-21 14:42:36 +02:00
return sched_init_cleanup(-1);
2024-04-19 15:33:03 +02:00
}
2024-04-18 17:41:41 +02:00
2024-04-19 15:33:03 +02:00
for(int i = 0; i < nthreads; ++i) {
if((pthread_join(threads[i], NULL) != 0)) {
fprintf(stderr, "Can't wait the thread %d\n", i);
2024-04-21 14:42:36 +02:00
return sched_init_cleanup(-1);
2024-04-19 15:33:03 +02:00
}
2024-04-18 17:41:41 +02:00
}
2024-04-21 14:42:36 +02:00
return sched_init_cleanup(1);
}
int
2024-04-21 14:42:36 +02:00
sched_init_cleanup(int ret_code)
{
2024-04-21 14:42:36 +02:00
if(sched.mutex) {
for(int i = 0; i < sched.nthreads; ++i) {
pthread_mutex_destroy(&sched.mutex[i]);
}
free(sched.mutex);
sched.mutex = NULL;
}
if(sched.cond) {
free(sched.cond);
sched.cond = NULL;
}
2024-04-21 14:42:36 +02:00
if(sched.tasks) {
2024-04-21 15:00:45 +02:00
for(int i = 0; i < sched.nthreads; ++i) {
if(sched.tasks[i]) {
free(sched.tasks[i]);
sched.tasks[i] = NULL;
}
}
free(sched.tasks);
sched.tasks = NULL;
}
2024-04-19 21:16:04 +02:00
2024-04-21 14:55:36 +02:00
if(sched.top) {
free(sched.top);
sched.top = NULL;
}
return ret_code;
2024-04-18 17:41:41 +02:00
}
2024-04-19 15:33:03 +02:00
int
sched_spawn(taskfunc f, void *closure, struct scheduler *s)
{
2024-04-21 14:42:36 +02:00
pthread_mutex_lock(&s->mutex[0]);
2024-04-18 17:41:41 +02:00
2024-04-21 14:55:36 +02:00
if(s->top[0] + 1 >= s->qlen) {
2024-04-21 14:42:36 +02:00
pthread_mutex_unlock(&s->mutex[0]);
2024-04-19 15:33:03 +02:00
errno = EAGAIN;
fprintf(stderr, "Stack is full\n");
return -1;
}
2024-04-18 17:41:41 +02:00
2024-04-21 15:00:45 +02:00
s->tasks[0][++s->top[0]] = (struct task_info){closure, f};
pthread_cond_signal(&s->cond[0]);
2024-04-21 14:42:36 +02:00
pthread_mutex_unlock(&s->mutex[0]);
2024-04-18 17:41:41 +02:00
2024-04-19 15:33:03 +02:00
return 0;
2024-03-09 18:47:40 +01:00
}
void *
2024-04-19 16:27:58 +02:00
sched_worker(void *arg)
{
struct scheduler *s = (struct scheduler *)arg;
while(1) {
2024-04-21 14:42:36 +02:00
pthread_mutex_lock(&s->mutex[0]);
// S'il on a rien à faire
2024-04-21 14:55:36 +02:00
if(s->top[0] == -1) {
s->nthsleep++;
2024-04-19 16:24:54 +02:00
if(s->nthsleep == s->nthreads) {
// Signal a tout les threads que il n'y a plus rien à faire
// si un thread attend une tâche
pthread_cond_broadcast(&s->cond[0]);
2024-04-21 14:42:36 +02:00
pthread_mutex_unlock(&s->mutex[0]);
2024-04-19 16:24:54 +02:00
break;
}
2024-04-21 14:42:36 +02:00
pthread_cond_wait(&s->cond[0], &s->mutex[0]);
2024-04-19 16:24:54 +02:00
s->nthsleep--;
2024-04-21 14:42:36 +02:00
pthread_mutex_unlock(&s->mutex[0]);
continue;
}
// Extrait la tâche de la pile
2024-04-21 15:00:45 +02:00
taskfunc f = s->tasks[0][s->top[0]].f;
void *closure = s->tasks[0][s->top[0]].closure;
2024-04-21 14:55:36 +02:00
s->top[0]--;
2024-04-21 14:42:36 +02:00
pthread_mutex_unlock(&s->mutex[0]);
// Exécute la tâche
f(closure, s);
}
return NULL;
}