MINOR: thread: get rid of MAX_THREADS_MASK
This macro was used both for binding and for lookups. When binding tasks or FDs, using all_threads_mask instead is better as it will later be per group. For lookups, ~0UL always does the job. Thus in practice the macro was already almost not used anymore since the rest of the code could run fine with a constant of all ones there.
This commit is contained in:
parent
e35f03239d
commit
3ccb14d60d
@ -536,7 +536,7 @@ broadcast it to any group.
|
|||||||
|
|
||||||
Right now in the code we have:
|
Right now in the code we have:
|
||||||
- 18 calls of task_new(tid_bit)
|
- 18 calls of task_new(tid_bit)
|
||||||
- 18 calls of task_new(MAX_THREADS_MASK)
|
- 17 calls of task_new_anywhere()
|
||||||
- 2 calls with a single bit
|
- 2 calls with a single bit
|
||||||
|
|
||||||
Thus it looks like "task_new_anywhere()", "task_new_on()" and
|
Thus it looks like "task_new_anywhere()", "task_new_on()" and
|
||||||
|
@ -58,7 +58,7 @@ static inline struct appctx *appctx_new_here(struct applet *applet, struct sedes
|
|||||||
|
|
||||||
static inline struct appctx *appctx_new_anywhere(struct applet *applet, struct sedesc *sedesc)
|
static inline struct appctx *appctx_new_anywhere(struct applet *applet, struct sedesc *sedesc)
|
||||||
{
|
{
|
||||||
return appctx_new(applet, sedesc, MAX_THREADS_MASK);
|
return appctx_new(applet, sedesc, all_threads_mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Helper function to call .init applet callback function, if it exists. Returns 0
|
/* Helper function to call .init applet callback function, if it exists. Returns 0
|
||||||
|
@ -29,7 +29,6 @@
|
|||||||
#ifndef USE_THREAD
|
#ifndef USE_THREAD
|
||||||
/* threads disabled, 1 thread max, 1 group max (note: group ids start at 1) */
|
/* threads disabled, 1 thread max, 1 group max (note: group ids start at 1) */
|
||||||
#define MAX_THREADS 1
|
#define MAX_THREADS 1
|
||||||
#define MAX_THREADS_MASK 1
|
|
||||||
|
|
||||||
#define MAX_TGROUPS 1
|
#define MAX_TGROUPS 1
|
||||||
#define MAX_THREADS_PER_GROUP 1
|
#define MAX_THREADS_PER_GROUP 1
|
||||||
@ -39,7 +38,6 @@
|
|||||||
#ifndef MAX_THREADS
|
#ifndef MAX_THREADS
|
||||||
#define MAX_THREADS LONGBITS
|
#define MAX_THREADS LONGBITS
|
||||||
#endif
|
#endif
|
||||||
#define MAX_THREADS_MASK (~0UL >> (LONGBITS - MAX_THREADS))
|
|
||||||
|
|
||||||
/* still limited to 1 group for now by default (note: group ids start at 1) */
|
/* still limited to 1 group for now by default (note: group ids start at 1) */
|
||||||
#ifndef MAX_TGROUPS
|
#ifndef MAX_TGROUPS
|
||||||
|
@ -581,7 +581,7 @@ static inline struct task *task_new_here()
|
|||||||
*/
|
*/
|
||||||
static inline struct task *task_new_anywhere()
|
static inline struct task *task_new_anywhere()
|
||||||
{
|
{
|
||||||
return task_new(MAX_THREADS_MASK);
|
return task_new(all_threads_mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1217,7 +1217,7 @@ struct task *process_chk_conn(struct task *t, void *context, unsigned int state)
|
|||||||
if (LIST_INLIST(&check->buf_wait.list))
|
if (LIST_INLIST(&check->buf_wait.list))
|
||||||
LIST_DEL_INIT(&check->buf_wait.list);
|
LIST_DEL_INIT(&check->buf_wait.list);
|
||||||
|
|
||||||
task_set_affinity(t, MAX_THREADS_MASK);
|
task_set_affinity(t, all_threads_mask);
|
||||||
check_release_buf(check, &check->bi);
|
check_release_buf(check, &check->bi);
|
||||||
check_release_buf(check, &check->bo);
|
check_release_buf(check, &check->bo);
|
||||||
check->state &= ~(CHK_ST_INPROGRESS|CHK_ST_IN_ALLOC|CHK_ST_OUT_ALLOC);
|
check->state &= ~(CHK_ST_INPROGRESS|CHK_ST_IN_ALLOC|CHK_ST_OUT_ALLOC);
|
||||||
|
@ -74,7 +74,7 @@ static int dns_connect_nameserver(struct dns_nameserver *ns)
|
|||||||
|
|
||||||
/* Add the fd in the fd list and update its parameters */
|
/* Add the fd in the fd list and update its parameters */
|
||||||
dgram->t.sock.fd = fd;
|
dgram->t.sock.fd = fd;
|
||||||
fd_insert(fd, dgram, dgram_fd_handler, MAX_THREADS_MASK);
|
fd_insert(fd, dgram, dgram_fd_handler, all_threads_mask);
|
||||||
fd_want_recv(fd);
|
fd_want_recv(fd);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -902,10 +902,10 @@ void mworker_cleantasks()
|
|||||||
|
|
||||||
#ifdef USE_THREAD
|
#ifdef USE_THREAD
|
||||||
/* cleanup the global run queue */
|
/* cleanup the global run queue */
|
||||||
tmp_rq = eb32sc_first(&rqueue, MAX_THREADS_MASK);
|
tmp_rq = eb32sc_first(&rqueue, ~0UL);
|
||||||
while (tmp_rq) {
|
while (tmp_rq) {
|
||||||
t = eb32sc_entry(tmp_rq, struct task, rq);
|
t = eb32sc_entry(tmp_rq, struct task, rq);
|
||||||
tmp_rq = eb32sc_next(tmp_rq, MAX_THREADS_MASK);
|
tmp_rq = eb32sc_next(tmp_rq, ~0UL);
|
||||||
task_destroy(t);
|
task_destroy(t);
|
||||||
}
|
}
|
||||||
/* cleanup the timers queue */
|
/* cleanup the timers queue */
|
||||||
@ -918,10 +918,10 @@ void mworker_cleantasks()
|
|||||||
#endif
|
#endif
|
||||||
/* clean the per thread run queue */
|
/* clean the per thread run queue */
|
||||||
for (i = 0; i < global.nbthread; i++) {
|
for (i = 0; i < global.nbthread; i++) {
|
||||||
tmp_rq = eb32sc_first(&ha_thread_ctx[i].rqueue, MAX_THREADS_MASK);
|
tmp_rq = eb32sc_first(&ha_thread_ctx[i].rqueue, ~0UL);
|
||||||
while (tmp_rq) {
|
while (tmp_rq) {
|
||||||
t = eb32sc_entry(tmp_rq, struct task, rq);
|
t = eb32sc_entry(tmp_rq, struct task, rq);
|
||||||
tmp_rq = eb32sc_next(tmp_rq, MAX_THREADS_MASK);
|
tmp_rq = eb32sc_next(tmp_rq, ~0UL);
|
||||||
task_destroy(t);
|
task_destroy(t);
|
||||||
}
|
}
|
||||||
/* cleanup the per thread timers queue */
|
/* cleanup the per thread timers queue */
|
||||||
|
Loading…
x
Reference in New Issue
Block a user