BUG/MAJOR: fix build on musl with cpu_set_t support

Move cpu_map structure outside of the global struct to a global
variable defined in cpuset.c compilation unit. This allows to reorganize
the includes without having to define _GNU_SOURCE everywhere for the
support of the cpu_set_t.

This fixes the compilation with musl libc, most notably used for the
alpine based docker image.

This fixes the github issue #1235.

No need to backport as this feature is new in the current
2.4-dev.
diff --git a/include/haproxy/cpuset-t.h b/include/haproxy/cpuset-t.h
index 6bc29af..36b4a51 100644
--- a/include/haproxy/cpuset-t.h
+++ b/include/haproxy/cpuset-t.h
@@ -12,6 +12,8 @@
 #endif
 #endif
 
+#include <haproxy/api-t.h>
+
 #if defined(__linux__) || defined(__DragonFly__)
 
 # define CPUSET_REPR cpu_set_t
@@ -37,4 +39,10 @@
 	CPUSET_REPR cpuset;
 };
 
+struct cpu_map {
+	struct hap_cpuset proc[MAX_PROCS];      /* list of CPU masks for the 32/64 first processes */
+	struct hap_cpuset proc_t1[MAX_PROCS];   /* list of CPU masks for the 1st thread of each process */
+	struct hap_cpuset thread[MAX_THREADS];  /* list of CPU masks for the 32/64 first threads of the 1st process */
+};
+
 #endif /* _HAPROXY_CPUSET_T_H */
diff --git a/include/haproxy/cpuset.h b/include/haproxy/cpuset.h
index 6e16271..d29c356 100644
--- a/include/haproxy/cpuset.h
+++ b/include/haproxy/cpuset.h
@@ -3,6 +3,8 @@
 
 #include <haproxy/cpuset-t.h>
 
+extern struct cpu_map cpu_map;
+
 /* Unset all indexes in <set>.
  */
 void ha_cpuset_zero(struct hap_cpuset *set);
diff --git a/include/haproxy/global-t.h b/include/haproxy/global-t.h
index f2cf5ce..bea97dd 100644
--- a/include/haproxy/global-t.h
+++ b/include/haproxy/global-t.h
@@ -24,9 +24,6 @@
 
 #include <haproxy/api-t.h>
 #include <haproxy/buf-t.h>
-#ifdef USE_CPU_AFFINITY
-#include <haproxy/cpuset-t.h>
-#endif
 #include <haproxy/freq_ctr-t.h>
 #include <haproxy/vars-t.h>
 
@@ -161,13 +158,6 @@
 		} ux;
 	} unix_bind;
 	struct proxy *cli_fe;           /* the frontend holding the stats settings */
-#ifdef USE_CPU_AFFINITY
-	struct {
-		struct hap_cpuset proc[MAX_PROCS];      /* list of CPU masks for the 32/64 first processes */
-		struct hap_cpuset proc_t1[MAX_PROCS];   /* list of CPU masks for the 1st thread of each process */
-		struct hap_cpuset thread[MAX_THREADS];  /* list of CPU masks for the 32/64 first threads of the 1st process */
-	} cpu_map;
-#endif
 	int numa_cpu_mapping;
 	/* The info above is config stuff, it doesn't change during the process' life */
 	/* A number of the elements below are updated by all threads in real time and
diff --git a/src/cfgparse-global.c b/src/cfgparse-global.c
index c653fb4..384ad3c 100644
--- a/src/cfgparse-global.c
+++ b/src/cfgparse-global.c
@@ -1,3 +1,4 @@
+#define _GNU_SOURCE  /* for cpu_set_t from haproxy/cpuset.h */
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
@@ -1109,12 +1110,12 @@
 					continue;
 
 				if (!autoinc)
-					ha_cpuset_assign(&global.cpu_map.proc[i], &cpus);
+					ha_cpuset_assign(&cpu_map.proc[i], &cpus);
 				else {
-					ha_cpuset_zero(&global.cpu_map.proc[i]);
+					ha_cpuset_zero(&cpu_map.proc[i]);
 					n = ha_cpuset_ffs(&cpus_copy) - 1;
 					ha_cpuset_clr(&cpus_copy, n);
-					ha_cpuset_set(&global.cpu_map.proc[i], n);
+					ha_cpuset_set(&cpu_map.proc[i], n);
 				}
 			}
 		} else {
@@ -1135,8 +1136,8 @@
 					/* For first process, thread[0] is used.
 					 * Use proc_t1[N] for all others
 					 */
-					dst = i ? &global.cpu_map.proc_t1[i] :
-					          &global.cpu_map.thread[0];
+					dst = i ? &cpu_map.proc_t1[i] :
+					          &cpu_map.thread[0];
 
 					if (!autoinc) {
 						ha_cpuset_assign(dst, &cpus);
@@ -1159,12 +1160,12 @@
 						continue;
 
 					if (!autoinc)
-						ha_cpuset_assign(&global.cpu_map.thread[j], &cpus);
+						ha_cpuset_assign(&cpu_map.thread[j], &cpus);
 					else {
-						ha_cpuset_zero(&global.cpu_map.thread[j]);
+						ha_cpuset_zero(&cpu_map.thread[j]);
 						n = ha_cpuset_ffs(&cpus_copy) - 1;
 						ha_cpuset_clr(&cpus_copy, n);
-						ha_cpuset_set(&global.cpu_map.thread[j], n);
+						ha_cpuset_set(&cpu_map.thread[j], n);
 					}
 				}
 			}
diff --git a/src/cpuset.c b/src/cpuset.c
index e4310b6..46e572d 100644
--- a/src/cpuset.c
+++ b/src/cpuset.c
@@ -5,6 +5,8 @@
 #include <haproxy/cpuset.h>
 #include <haproxy/intops.h>
 
+struct cpu_map cpu_map;
+
 void ha_cpuset_zero(struct hap_cpuset *set)
 {
 #if defined(CPUSET_USE_CPUSET) || defined(CPUSET_USE_FREEBSD_CPUSET)
diff --git a/src/haproxy.c b/src/haproxy.c
index cd0edcf..4dcf837 100644
--- a/src/haproxy.c
+++ b/src/haproxy.c
@@ -1584,9 +1584,9 @@
 	{
 		int i;
 		for (i = 0; i < MAX_PROCS; ++i) {
-			ha_cpuset_zero(&global.cpu_map.proc[i]);
-			ha_cpuset_zero(&global.cpu_map.proc_t1[i]);
-			ha_cpuset_zero(&global.cpu_map.thread[i]);
+			ha_cpuset_zero(&cpu_map.proc[i]);
+			ha_cpuset_zero(&cpu_map.proc_t1[i]);
+			ha_cpuset_zero(&cpu_map.thread[i]);
 		}
 	}
 #endif
@@ -2940,13 +2940,13 @@
 #ifdef USE_CPU_AFFINITY
 		if (proc < global.nbproc &&  /* child */
 		    proc < MAX_PROCS &&       /* only the first 32/64 processes may be pinned */
-		    ha_cpuset_count(&global.cpu_map.proc[proc])) {   /* only do this if the process has a CPU map */
+		    ha_cpuset_count(&cpu_map.proc[proc])) {   /* only do this if the process has a CPU map */
 
 #ifdef __FreeBSD__
-			struct hap_cpuset *set = &global.cpu_map.proc[proc];
+			struct hap_cpuset *set = &cpu_map.proc[proc];
 			ret = cpuset_setaffinity(CPU_LEVEL_WHICH, CPU_WHICH_PID, -1, sizeof(set->cpuset), &set->cpuset);
 #elif defined(__linux__) || defined(__DragonFly__)
-			struct hap_cpuset *set = &global.cpu_map.proc[proc];
+			struct hap_cpuset *set = &cpu_map.proc[proc];
 			sched_setaffinity(0, sizeof(set->cpuset), &set->cpuset);
 #endif
 		}
@@ -3184,17 +3184,17 @@
 		/* If on multiprocess, use proc_t1 except for the first process.
 		 */
 		if ((relative_pid - 1) > 0)
-			global.cpu_map.thread[0] = global.cpu_map.proc_t1[relative_pid-1];
+			cpu_map.thread[0] = cpu_map.proc_t1[relative_pid-1];
 
 		for (i = 0; i < global.nbthread; i++) {
-			if (ha_cpuset_count(&global.cpu_map.proc[relative_pid-1]))
-				ha_cpuset_and(&global.cpu_map.thread[i], &global.cpu_map.proc[relative_pid-1]);
+			if (ha_cpuset_count(&cpu_map.proc[relative_pid-1]))
+				ha_cpuset_and(&cpu_map.thread[i], &cpu_map.proc[relative_pid-1]);
 
 			if (i < MAX_THREADS &&       /* only the first 32/64 threads may be pinned */
-			    ha_cpuset_count(&global.cpu_map.thread[i])) {/* only do this if the thread has a THREAD map */
+			    ha_cpuset_count(&cpu_map.thread[i])) {/* only do this if the thread has a THREAD map */
 #if defined(__APPLE__)
 				int j;
-				unsigned long cpu_map = global.cpu_map.thread[i].cpuset;
+				unsigned long cpu_map = cpu_map.thread[i].cpuset;
 
 				while ((j = ffsl(cpu_map)) > 0) {
 					thread_affinity_policy_data_t cpu_set = { j - 1 };
@@ -3203,7 +3203,7 @@
 					cpu_map &= ~(1UL << (j - 1));
 				}
 #else
-				struct hap_cpuset *set = &global.cpu_map.thread[i];
+				struct hap_cpuset *set = &cpu_map.thread[i];
 				pthread_setaffinity_np(ha_thread_info[i].pthread,
 				                       sizeof(set->cpuset), &set->cpuset);
 #endif