blob: fb59e2f67d605e62dde99f80d050eadfaa058128 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001/* SPDX-License-Identifier: GPL-2.0 */
Simon Glassa9a44262015-04-29 22:25:59 -06002/*
3 * Copyright (c) 2015 Google, Inc
4 *
Simon Glassa9a44262015-04-29 22:25:59 -06005 * Taken from coreboot file of the same name
6 */
7
8#ifndef _X86_MP_H_
9#define _X86_MP_H_
10
11#include <asm/atomic.h>
12
13typedef int (*mp_callback_t)(struct udevice *cpu, void *arg);
14
15/*
16 * A mp_flight_record details a sequence of calls for the APs to perform
17 * along with the BSP to coordinate sequencing. Each flight record either
18 * provides a barrier for each AP before calling the callback or the APs
19 * are allowed to perform the callback without waiting. Regardless, each
20 * record has the cpus_entered field incremented for each record. When
21 * the BSP observes that the cpus_entered matches the number of APs
22 * the bsp_call is called with bsp_arg and upon returning releases the
23 * barrier allowing the APs to make further progress.
24 *
25 * Note that ap_call() and bsp_call() can be NULL. In the NULL case the
26 * callback will just not be called.
27 */
28struct mp_flight_record {
29 atomic_t barrier;
30 atomic_t cpus_entered;
31 mp_callback_t ap_call;
32 void *ap_arg;
33 mp_callback_t bsp_call;
34 void *bsp_arg;
35} __attribute__((aligned(ARCH_DMA_MINALIGN)));
36
37#define MP_FLIGHT_RECORD(barrier_, ap_func_, ap_arg_, bsp_func_, bsp_arg_) \
38 { \
39 .barrier = ATOMIC_INIT(barrier_), \
40 .cpus_entered = ATOMIC_INIT(0), \
41 .ap_call = ap_func_, \
42 .ap_arg = ap_arg_, \
43 .bsp_call = bsp_func_, \
44 .bsp_arg = bsp_arg_, \
45 }
46
47#define MP_FR_BLOCK_APS(ap_func, ap_arg, bsp_func, bsp_arg) \
48 MP_FLIGHT_RECORD(0, ap_func, ap_arg, bsp_func, bsp_arg)
49
50#define MP_FR_NOBLOCK_APS(ap_func, ap_arg, bsp_func, bsp_arg) \
51 MP_FLIGHT_RECORD(1, ap_func, ap_arg, bsp_func, bsp_arg)
52
53/*
54 * The mp_params structure provides the arguments to the mp subsystem
55 * for bringing up APs.
56 *
57 * At present this is overkill for U-Boot, but it may make it easier to add
58 * SMM support.
59 */
60struct mp_params {
Simon Glassa9a44262015-04-29 22:25:59 -060061 int parallel_microcode_load;
62 const void *microcode_pointer;
63 /* Flight plan for APs and BSP */
64 struct mp_flight_record *flight_plan;
65 int num_records;
66};
67
68/*
69 * mp_init() will set up the SIPI vector and bring up the APs according to
70 * mp_params. Each flight record will be executed according to the plan. Note
71 * that the MP infrastructure uses SMM default area without saving it. It's
72 * up to the chipset or mainboard to either e820 reserve this area or save this
73 * region prior to calling mp_init() and restoring it after mp_init returns.
74 *
75 * At the time mp_init() is called the MTRR MSRs are mirrored into APs then
76 * caching is enabled before running the flight plan.
77 *
78 * The MP init has the following properties:
79 * 1. APs are brought up in parallel.
80 * 2. The ordering of cpu number and APIC ids is not deterministic.
81 * Therefore, one cannot rely on this property or the order of devices in
82 * the device tree unless the chipset or mainboard know the APIC ids
83 * a priori.
84 *
85 * mp_init() returns < 0 on error, 0 on success.
86 */
87int mp_init(struct mp_params *params);
88
89/* Probes the CPU device */
90int mp_init_cpu(struct udevice *cpu, void *unused);
91
Simon Glass16a624b2017-01-16 07:03:57 -070092/* Set up additional CPUs */
93int x86_mp_init(void);
94
Simon Glassa9a44262015-04-29 22:25:59 -060095#endif /* _X86_MP_H_ */