REORG: acitvity: uninline sched_activity_entry()

This one is expensive in code size because it comes with xxhash.h at a
low level of dependency that's inherited at plenty of places, and for
a function does doesn't benefit from inlining and could possibly even
benefit from not being inline given that it's large and called from the
scheduler.

Moving it to activity.c reduces the LoC by 1.2% and the binary size by
~1kB.
diff --git a/include/haproxy/activity.h b/include/haproxy/activity.h
index 421697c..42b0c87 100644
--- a/include/haproxy/activity.h
+++ b/include/haproxy/activity.h
@@ -24,8 +24,6 @@
 
 #include <haproxy/activity-t.h>
 #include <haproxy/api.h>
-#include <haproxy/freq_ctr.h>
-#include <haproxy/xxhash.h>
 
 extern unsigned int profiling;
 extern unsigned long task_profiling_mask;
@@ -34,32 +32,7 @@
 
 void report_stolen_time(uint64_t stolen);
 void activity_count_runtime();
-
-/* Computes the index of function pointer <func> for use with sched_activity[]
- * or any other similar array passed in <array>, and returns a pointer to the
- * entry after having atomically assigned it to this function pointer. Note
- * that in case of collision, the first entry is returned instead ("other").
- */
-static inline struct sched_activity *sched_activity_entry(struct sched_activity *array, const void *func)
-{
-	uint64_t hash = XXH64_avalanche(XXH64_mergeRound((size_t)func, (size_t)func));
-	struct sched_activity *ret;
-	const void *old = NULL;
-
-	hash ^= (hash >> 32);
-	hash ^= (hash >> 16);
-	hash ^= (hash >> 8);
-	hash &= 0xff;
-	ret = &array[hash];
-
-	if (likely(ret->func == func))
-		return ret;
-
-	if (HA_ATOMIC_CAS(&ret->func, &old, func))
-		return ret;
-
-	return array;
-}
+struct sched_activity *sched_activity_entry(struct sched_activity *array, const void *func);
 
 #endif /* _HAPROXY_ACTIVITY_H */
 
diff --git a/src/activity.c b/src/activity.c
index 90a1ef9..c3010c5 100644
--- a/src/activity.c
+++ b/src/activity.c
@@ -19,6 +19,7 @@
 #include <haproxy/stream_interface.h>
 #include <haproxy/time.h>
 #include <haproxy/tools.h>
+#include <haproxy/xxhash.h>
 
 #if defined(DEBUG_MEM_STATS)
 /* these ones are macros in bug.h when DEBUG_MEM_STATS is set, and will
@@ -558,6 +559,32 @@
 }
 #endif // USE_MEMORY_PROFILING
 
+/* Computes the index of function pointer <func> for use with sched_activity[]
+ * or any other similar array passed in <array>, and returns a pointer to the
+ * entry after having atomically assigned it to this function pointer. Note
+ * that in case of collision, the first entry is returned instead ("other").
+ */
+struct sched_activity *sched_activity_entry(struct sched_activity *array, const void *func)
+{
+	uint64_t hash = XXH64_avalanche(XXH64_mergeRound((size_t)func, (size_t)func));
+	struct sched_activity *ret;
+	const void *old = NULL;
+
+	hash ^= (hash >> 32);
+	hash ^= (hash >> 16);
+	hash ^= (hash >> 8);
+	hash &= 0xff;
+	ret = &array[hash];
+
+	if (likely(ret->func == func))
+		return ret;
+
+	if (HA_ATOMIC_CAS(&ret->func, &old, func))
+		return ret;
+
+	return array;
+}
+
 /* This function dumps all profiling settings. It returns 0 if the output
  * buffer is full and it needs to be called again, otherwise non-zero.
  * It dumps some parts depending on the following states: