MINOR: task: move the niced_tasks counter to the thread group context

This one is only used as a hint to improve scheduling latency, so there
is no more point in keeping it global since each thread group handles
its own run q
This commit is contained in:
Willy Tarreau 2022-07-07 15:25:40 +02:00
parent b0e7712fb2
commit 91a7c164b4
3 changed files with 5 additions and 7 deletions

View File

@ -88,8 +88,6 @@
/* a few exported variables */ /* a few exported variables */
extern unsigned int niced_tasks; /* number of niced tasks in the run queue */
extern struct pool_head *pool_head_task; extern struct pool_head *pool_head_task;
extern struct pool_head *pool_head_tasklet; extern struct pool_head *pool_head_tasklet;
extern struct pool_head *pool_head_notification; extern struct pool_head *pool_head_notification;

View File

@ -73,6 +73,8 @@ struct tgroup_ctx {
HA_RWLOCK_T wq_lock; /* RW lock related to the wait queue below */ HA_RWLOCK_T wq_lock; /* RW lock related to the wait queue below */
struct eb_root timers; /* wait queue (sorted timers tree, global, accessed under wq_lock) */ struct eb_root timers; /* wait queue (sorted timers tree, global, accessed under wq_lock) */
uint niced_tasks; /* number of niced tasks in this group's run queues */
/* pad to cache line (64B) */ /* pad to cache line (64B) */
char __pad[0]; /* unused except to check remaining room */ char __pad[0]; /* unused except to check remaining room */
char __end[0] __attribute__((aligned(64))); char __end[0] __attribute__((aligned(64)));

View File

@ -34,8 +34,6 @@ DECLARE_POOL(pool_head_tasklet, "tasklet", sizeof(struct tasklet));
*/ */
DECLARE_POOL(pool_head_notification, "notification", sizeof(struct notification)); DECLARE_POOL(pool_head_notification, "notification", sizeof(struct notification));
unsigned int niced_tasks = 0; /* number of niced tasks in the run queue */
/* Flags the task <t> for immediate destruction and puts it into its first /* Flags the task <t> for immediate destruction and puts it into its first
* thread's shared tasklet list if not yet queued/running. This will bypass * thread's shared tasklet list if not yet queued/running. This will bypass
@ -229,7 +227,7 @@ void __task_wakeup(struct task *t)
if (likely(t->nice)) { if (likely(t->nice)) {
int offset; int offset;
_HA_ATOMIC_INC(&niced_tasks); _HA_ATOMIC_INC(&tg_ctx->niced_tasks);
offset = t->nice * (int)global.tune.runqueue_depth; offset = t->nice * (int)global.tune.runqueue_depth;
t->rq.key += offset; t->rq.key += offset;
} }
@ -736,7 +734,7 @@ void process_runnable_tasks()
max_processed = global.tune.runqueue_depth; max_processed = global.tune.runqueue_depth;
if (likely(niced_tasks)) if (likely(tg_ctx->niced_tasks))
max_processed = (max_processed + 3) / 4; max_processed = (max_processed + 3) / 4;
if (max_processed < th_ctx->rq_total && th_ctx->rq_total <= 2*max_processed) { if (max_processed < th_ctx->rq_total && th_ctx->rq_total <= 2*max_processed) {
@ -849,7 +847,7 @@ void process_runnable_tasks()
} }
#endif #endif
if (t->nice) if (t->nice)
_HA_ATOMIC_DEC(&niced_tasks); _HA_ATOMIC_DEC(&tg_ctx->niced_tasks);
/* Add it to the local task list */ /* Add it to the local task list */
LIST_APPEND(&tt->tasklets[TL_NORMAL], &((struct tasklet *)t)->list); LIST_APPEND(&tt->tasklets[TL_NORMAL], &((struct tasklet *)t)->list);