MINOR: cpu-topo: make sure we don't leave unassigned IDs in the cpu_topo

It's important that we don't leave unassigned IDs in the topology,
because the selection mechanism is based on index-based masks, so an
unassigned ID will never be kept. This is particularly visible on
systems where we cannot access the CPU topology, the package id, node id
and even thread id are set to -1, and all CPUs are evicted due to -1 not
being set in the "only-cpu" sets.

Here in new function "cpu_fixup_topology()", we assign them with the
smallest unassigned value. This function will be used to assign IDs
where missing in general.
This commit is contained in:
Willy Tarreau 2025-03-13 08:54:59 +01:00
parent af648c7b58
commit d169758fa9
3 changed files with 97 additions and 0 deletions

View File

@ -28,6 +28,9 @@ int cpu_detect_usable(void);
/* detect the CPU topology based on info in /sys */
int cpu_detect_topology(void);
/* fix missing info in the CPU topology */
void cpu_fixup_topology(void);
/* compose clusters */
void cpu_compose_clusters(void);

View File

@ -415,6 +415,97 @@ static int cpu_topo_get_maxcpus(void)
return abs_max;
}
/* This function is responsible for trying to fill in the missing info after
* topology detection and making sure we don't leave any ID at -1, but rather
* we assign unused ones.
*/
void cpu_fixup_topology(void)
{
struct hap_cpuset cpuset;
int cpu;
int min_id, neg;
/* fill the package id, node id and thread_id. First we'll build a bitmap
* of all unassigned ones so that we can spot the lowest unassigned one
* and assign it to those currently set to -1.
*/
/* package id */
ha_cpuset_zero(&cpuset);
for (cpu = 0; cpu <= cpu_topo_lastcpu; cpu++)
ha_cpuset_set(&cpuset, cpu);
for (cpu = neg = 0; cpu <= cpu_topo_lastcpu; cpu++) {
if (ha_cpu_topo[cpu].pk_id < 0)
neg++;
else
ha_cpuset_clr(&cpuset, ha_cpu_topo[cpu].pk_id);
}
/* get the first unused pkg id */
min_id = ha_cpuset_ffs(&cpuset) - 1;
for (cpu = 0; neg && cpu <= cpu_topo_lastcpu; cpu++) {
if (ha_cpu_topo[cpu].pk_id < 0) {
ha_cpu_topo[cpu].pk_id = min_id;
neg--;
}
}
/* node id */
ha_cpuset_zero(&cpuset);
for (cpu = 0; cpu <= cpu_topo_lastcpu; cpu++)
ha_cpuset_set(&cpuset, cpu);
for (cpu = neg = 0; cpu <= cpu_topo_lastcpu; cpu++) {
if (ha_cpu_topo[cpu].no_id < 0)
neg++;
else
ha_cpuset_clr(&cpuset, ha_cpu_topo[cpu].no_id);
}
/* get the first unused node id */
min_id = ha_cpuset_ffs(&cpuset) - 1;
for (cpu = 0; neg && cpu <= cpu_topo_lastcpu; cpu++) {
if (ha_cpu_topo[cpu].no_id < 0) {
ha_cpu_topo[cpu].no_id = min_id;
neg--;
}
}
/* thread id */
ha_cpuset_zero(&cpuset);
for (cpu = 0; cpu <= cpu_topo_lastcpu; cpu++)
ha_cpuset_set(&cpuset, cpu);
for (cpu = neg = 0; cpu <= cpu_topo_lastcpu; cpu++) {
if (ha_cpu_topo[cpu].th_id < 0)
neg++;
else
ha_cpuset_clr(&cpuset, ha_cpu_topo[cpu].th_id);
}
/* get the first unused thr id */
min_id = ha_cpuset_ffs(&cpuset) - 1;
for (cpu = 0; neg && cpu <= cpu_topo_lastcpu; cpu++) {
if (ha_cpu_topo[cpu].th_id < 0) {
ha_cpu_topo[cpu].th_id = min_id;
ha_cpu_topo[cpu].th_cnt = min_id + 1;
neg--;
}
}
/* assign capacity if not filled, based on the number of threads on the
* core: in a same package, SMT-capable cores are generally those
* optimized for performers while non-SMT ones are generally those
* optimized for efficiency. We'll reflect that by assigning 100 and 50
* respectively to those.
*/
for (cpu = 0; cpu <= cpu_topo_lastcpu; cpu++) {
if (ha_cpu_topo[cpu].capa < 0)
ha_cpu_topo[cpu].capa = (ha_cpu_topo[cpu].th_cnt > 1) ? 100 : 50;
}
}
/* This function is responsible for composing clusters based on existing info
* on the CPU topology.
*/

View File

@ -2066,6 +2066,9 @@ static void step_init_2(int argc, char** argv)
/* Now detect how CPUs are arranged */
cpu_detect_topology();
/* fixup missing info */
cpu_fixup_topology();
/* compose clusters */
cpu_compose_clusters();
#endif