MINOR: thread: turn thread_cpu_mask_forced() into an init-time variable

The function is not convenient because it doesn't allow us to undo the
startup changes, and depending on where it's being used, we don't know
whether the values read have already been altered (this is not the case
right now but it's going to evolve).

Let's just compute the status during cpu_detect_usable() and set a
variable accordingly. This way we'll always read the init value, and
if needed we can even afford to reset it. Also, placing it in cpu_topo.c
limits cross-file dependencies (e.g. threads without affinity etc).
This commit is contained in:
Willy Tarreau 2025-03-12 10:40:49 +01:00
parent 3a7cc676fa
commit ac1db9db7d
5 changed files with 14 additions and 22 deletions

View File

@ -9,6 +9,9 @@ extern int cpu_topo_maxcpus;
extern int cpu_topo_lastcpu;
extern struct ha_cpu_topo *ha_cpu_topo;
/* non-zero if we're certain that taskset or similar was used to force CPUs */
extern int cpu_mask_forced;
/* Detects CPUs that are online on the system. It may rely on FS access (e.g.
* /sys on Linux). Returns the number of CPUs detected or 0 if the detection
* failed.

View File

@ -298,11 +298,6 @@ static inline unsigned long thread_isolated()
return _HA_ATOMIC_LOAD(&isolated_thread) == tid;
}
/* Returns 1 if the cpu set is currently restricted for the process else 0.
* Currently only implemented for the Linux platform.
*/
int thread_cpu_mask_forced(void);
#if !defined(DEBUG_THREAD) && !defined(DEBUG_FULL)
/* Thread debugging is DISABLED, these are the regular locking functions */

View File

@ -2922,7 +2922,7 @@ int check_config_validity()
{
int numa_cores = 0;
#if defined(USE_CPU_AFFINITY)
if (global.numa_cpu_mapping && !thread_cpu_mask_forced() && !cpu_map_configured())
if (global.numa_cpu_mapping && !cpu_mask_forced && !cpu_map_configured())
numa_cores = numa_detect_topology();
#endif
global.nbthread = numa_cores ? numa_cores :

View File

@ -17,6 +17,8 @@ int cpu_topo_lastcpu = -1; // last supposed online CPU (no need to look beyond
struct ha_cpu_topo *ha_cpu_topo = NULL;
struct cpu_map *cpu_map;
/* non-zero if we're certain that taskset or similar was used to force CPUs */
int cpu_mask_forced = 0;
/* Detects CPUs that are online on the system. It may rely on FS access (e.g.
* /sys on Linux). Returns the number of CPUs detected or 0 if the detection
@ -91,14 +93,19 @@ int cpu_detect_usable(void)
/* Update the list of currently offline CPUs. Normally it's a subset
* of the unbound ones, but we cannot infer anything if we don't have
* the info so we only update what we know.
* the info so we only update what we know. We take this opportunity
* for detecting that some online CPUs are not bound, indicating that
* taskset or equivalent was used.
*/
if (ha_cpuset_detect_online(&boot_set)) {
for (cpu = 0; cpu < cpu_topo_maxcpus; cpu++) {
if (!ha_cpuset_isset(&boot_set, cpu))
if (!ha_cpuset_isset(&boot_set, cpu)) {
ha_cpu_topo[cpu].st |= HA_CPU_F_OFFLINE;
else
} else {
cpu_topo_lastcpu = cpu;
if (ha_cpu_topo[cpu].st & HA_CPU_F_EXCLUDED)
cpu_mask_forced = 1;
}
}
}

View File

@ -390,19 +390,6 @@ static int thread_cpus_enabled()
return ret;
}
/* Returns 1 if the cpu set is currently restricted for the process else 0.
* Currently only implemented for the Linux platform.
*/
int thread_cpu_mask_forced()
{
#if defined(__linux__)
const int cpus_avail = sysconf(_SC_NPROCESSORS_ONLN);
return cpus_avail != thread_cpus_enabled();
#else
return 0;
#endif
}
/* Below come the lock-debugging functions */
#if defined(DEBUG_THREAD) || defined(DEBUG_FULL)