x86: Drop unnecessary mp_init logic
Now that sequence numbers are set up when devices are bound, this code is
not needed. Also, we should use dev_seq() instead of req_seq. Update the
whole file accordingly.
Also fix up APL cpu while we are here.
Signed-off-by: Simon Glass <sjg@chromium.org>
diff --git a/arch/x86/cpu/mp_init.c b/arch/x86/cpu/mp_init.c
index a0e1121..0e61c7b 100644
--- a/arch/x86/cpu/mp_init.c
+++ b/arch/x86/cpu/mp_init.c
@@ -87,7 +87,7 @@
* intel,apic-id = <2>;
* };
*
- * Here the 'reg' property is the CPU number and then is placed in dev->req_seq
+ * Here the 'reg' property is the CPU number and then is placed in dev_seq(cpu)
* so that we can index into ap_callbacks[] using that. The APIC ID is different
* and may not be sequential (it typically is if hyperthreading is supported).
*
@@ -135,7 +135,7 @@
*
* @func: Function to run
* @arg: Argument to pass to the function
- * @logical_cpu_number: Either a CPU number (i.e. dev->req_seq) or a special
+ * @logical_cpu_number: Either a CPU number (i.e. dev_seq(cpu) or a special
* value like MP_SELECT_BSP. It tells the AP whether it should process this
* callback
*/
@@ -152,7 +152,7 @@
* ap_callbacks - Callback mailbox array
*
* Array of callback, one entry for each available CPU, indexed by the CPU
- * number, which is dev->req_seq. The entry for the main CPU is never used.
+ * number, which is dev_seq(cpu). The entry for the main CPU is never used.
* When this is NULL, there is no pending work for the CPU to run. When
* non-NULL it points to the mp_callback structure. This is shared between all
* CPUs, so should only be written by the main CPU.
@@ -562,7 +562,7 @@
if (cpu_countp)
*cpu_countp = ret;
- return dev->req_seq >= 0 ? dev->req_seq : 0;
+ return dev_seq(dev) >= 0 ? dev_seq(dev) : 0;
}
/**
@@ -614,7 +614,7 @@
static int run_ap_work(struct mp_callback *callback, struct udevice *bsp,
int num_cpus, uint expire_ms)
{
- int cur_cpu = bsp->req_seq;
+ int cur_cpu = dev_seq(bsp);
int num_aps = num_cpus - 1; /* number of non-BSPs to get this message */
int cpus_accepted;
ulong start;
@@ -679,7 +679,7 @@
if (!IS_ENABLED(CONFIG_SMP_AP_WORK))
return 0;
- per_cpu_slot = &ap_callbacks[cpu->req_seq];
+ per_cpu_slot = &ap_callbacks[dev_seq(cpu)];
while (1) {
struct mp_callback *cb = read_callback(per_cpu_slot);
@@ -694,7 +694,7 @@
mfence();
if (lcb.logical_cpu_number == MP_SELECT_ALL ||
lcb.logical_cpu_number == MP_SELECT_APS ||
- cpu->req_seq == lcb.logical_cpu_number)
+ dev_seq(cpu) == lcb.logical_cpu_number)
lcb.func(lcb.arg);
/* Indicate we are finished */
@@ -839,7 +839,6 @@
int num_aps, num_cpus;
atomic_t *ap_count;
struct udevice *cpu;
- struct uclass *uc;
int ret;
if (IS_ENABLED(CONFIG_QFW)) {
@@ -848,14 +847,6 @@
return ret;
}
- /*
- * Multiple APs are brought up simultaneously and they may get the same
- * seq num in the uclass_resolve_seq() during device_probe(). To avoid
- * this, set req_seq to the reg number in the device tree in advance.
- */
- uclass_id_foreach_dev(UCLASS_CPU, cpu, uc)
- cpu->req_seq = dev_read_u32_default(cpu, "reg", -1);
-
ret = get_bsp(&cpu, &num_cpus);
if (ret < 0) {
debug("Cannot init boot CPU: err=%d\n", ret);