Tom Rini | 10e4779 | 2018-05-06 17:58:06 -0400 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Simon Glass | a9a4426 | 2015-04-29 22:25:59 -0600 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (c) 2015 Google, Inc |
| 4 | * |
Simon Glass | a9a4426 | 2015-04-29 22:25:59 -0600 | [diff] [blame] | 5 | * Taken from coreboot file of the same name |
| 6 | */ |
| 7 | |
| 8 | #ifndef _X86_MP_H_ |
| 9 | #define _X86_MP_H_ |
| 10 | |
| 11 | #include <asm/atomic.h> |
Simon Glass | 274e0b0 | 2020-05-10 11:39:56 -0600 | [diff] [blame] | 12 | #include <asm/cache.h> |
Simon Glass | 0b13e3c | 2021-06-27 17:51:02 -0600 | [diff] [blame] | 13 | #include <linux/bitops.h> |
Simon Glass | a9a4426 | 2015-04-29 22:25:59 -0600 | [diff] [blame] | 14 | |
Simon Glass | 3ba929a | 2020-10-30 21:38:53 -0600 | [diff] [blame] | 15 | struct udevice; |
| 16 | |
Simon Glass | 4a30bbb | 2020-07-17 08:48:16 -0600 | [diff] [blame] | 17 | enum { |
Simon Glass | 0b13e3c | 2021-06-27 17:51:02 -0600 | [diff] [blame] | 18 | /* |
| 19 | * Indicates that the function should run on all CPUs. We use a large |
| 20 | * number, above the number of real CPUs we expect to find. |
| 21 | */ |
| 22 | MP_SELECT_ALL = BIT(16), |
Simon Glass | 4a30bbb | 2020-07-17 08:48:16 -0600 | [diff] [blame] | 23 | |
| 24 | /* Run on boot CPUs */ |
Simon Glass | 0b13e3c | 2021-06-27 17:51:02 -0600 | [diff] [blame] | 25 | MP_SELECT_BSP, |
Simon Glass | 4a30bbb | 2020-07-17 08:48:16 -0600 | [diff] [blame] | 26 | |
| 27 | /* Run on non-boot CPUs */ |
Simon Glass | 0b13e3c | 2021-06-27 17:51:02 -0600 | [diff] [blame] | 28 | MP_SELECT_APS, |
Simon Glass | 4a30bbb | 2020-07-17 08:48:16 -0600 | [diff] [blame] | 29 | }; |
| 30 | |
Simon Glass | a9a4426 | 2015-04-29 22:25:59 -0600 | [diff] [blame] | 31 | typedef int (*mp_callback_t)(struct udevice *cpu, void *arg); |
| 32 | |
| 33 | /* |
| 34 | * A mp_flight_record details a sequence of calls for the APs to perform |
| 35 | * along with the BSP to coordinate sequencing. Each flight record either |
| 36 | * provides a barrier for each AP before calling the callback or the APs |
| 37 | * are allowed to perform the callback without waiting. Regardless, each |
| 38 | * record has the cpus_entered field incremented for each record. When |
| 39 | * the BSP observes that the cpus_entered matches the number of APs |
| 40 | * the bsp_call is called with bsp_arg and upon returning releases the |
| 41 | * barrier allowing the APs to make further progress. |
| 42 | * |
| 43 | * Note that ap_call() and bsp_call() can be NULL. In the NULL case the |
| 44 | * callback will just not be called. |
Simon Glass | 00906cb | 2020-07-17 08:48:30 -0600 | [diff] [blame] | 45 | * |
| 46 | * @barrier: Ensures that the BSP and AP don't run the flight record at the same |
| 47 | * time |
| 48 | * @cpus_entered: Counts the number of APs that have run this record |
| 49 | * @ap_call: Function for the APs to call |
| 50 | * @ap_arg: Argument to pass to @ap_call |
| 51 | * @bsp_call: Function for the BSP to call |
| 52 | * @bsp_arg: Argument to pass to @bsp_call |
Simon Glass | a9a4426 | 2015-04-29 22:25:59 -0600 | [diff] [blame] | 53 | */ |
| 54 | struct mp_flight_record { |
| 55 | atomic_t barrier; |
| 56 | atomic_t cpus_entered; |
| 57 | mp_callback_t ap_call; |
| 58 | void *ap_arg; |
| 59 | mp_callback_t bsp_call; |
| 60 | void *bsp_arg; |
| 61 | } __attribute__((aligned(ARCH_DMA_MINALIGN))); |
| 62 | |
| 63 | #define MP_FLIGHT_RECORD(barrier_, ap_func_, ap_arg_, bsp_func_, bsp_arg_) \ |
| 64 | { \ |
| 65 | .barrier = ATOMIC_INIT(barrier_), \ |
| 66 | .cpus_entered = ATOMIC_INIT(0), \ |
| 67 | .ap_call = ap_func_, \ |
| 68 | .ap_arg = ap_arg_, \ |
| 69 | .bsp_call = bsp_func_, \ |
| 70 | .bsp_arg = bsp_arg_, \ |
| 71 | } |
| 72 | |
| 73 | #define MP_FR_BLOCK_APS(ap_func, ap_arg, bsp_func, bsp_arg) \ |
| 74 | MP_FLIGHT_RECORD(0, ap_func, ap_arg, bsp_func, bsp_arg) |
| 75 | |
| 76 | #define MP_FR_NOBLOCK_APS(ap_func, ap_arg, bsp_func, bsp_arg) \ |
| 77 | MP_FLIGHT_RECORD(1, ap_func, ap_arg, bsp_func, bsp_arg) |
| 78 | |
| 79 | /* |
Simon Glass | a9a4426 | 2015-04-29 22:25:59 -0600 | [diff] [blame] | 80 | * mp_init() will set up the SIPI vector and bring up the APs according to |
| 81 | * mp_params. Each flight record will be executed according to the plan. Note |
| 82 | * that the MP infrastructure uses SMM default area without saving it. It's |
| 83 | * up to the chipset or mainboard to either e820 reserve this area or save this |
| 84 | * region prior to calling mp_init() and restoring it after mp_init returns. |
| 85 | * |
| 86 | * At the time mp_init() is called the MTRR MSRs are mirrored into APs then |
| 87 | * caching is enabled before running the flight plan. |
| 88 | * |
| 89 | * The MP init has the following properties: |
| 90 | * 1. APs are brought up in parallel. |
| 91 | * 2. The ordering of cpu number and APIC ids is not deterministic. |
| 92 | * Therefore, one cannot rely on this property or the order of devices in |
| 93 | * the device tree unless the chipset or mainboard know the APIC ids |
| 94 | * a priori. |
| 95 | * |
| 96 | * mp_init() returns < 0 on error, 0 on success. |
| 97 | */ |
Simon Glass | e40633d | 2020-07-17 08:48:08 -0600 | [diff] [blame] | 98 | int mp_init(void); |
Simon Glass | a9a4426 | 2015-04-29 22:25:59 -0600 | [diff] [blame] | 99 | |
Simon Glass | 00906cb | 2020-07-17 08:48:30 -0600 | [diff] [blame] | 100 | /** |
| 101 | * x86_mp_init() - Set up additional CPUs |
| 102 | * |
| 103 | * @returns < 0 on error, 0 on success. |
| 104 | */ |
Simon Glass | 16a624b | 2017-01-16 07:03:57 -0700 | [diff] [blame] | 105 | int x86_mp_init(void); |
| 106 | |
Simon Glass | 6871dff | 2020-07-17 08:48:19 -0600 | [diff] [blame] | 107 | /** |
| 108 | * mp_run_func() - Function to call on the AP |
| 109 | * |
| 110 | * @arg: Argument to pass |
| 111 | */ |
| 112 | typedef void (*mp_run_func)(void *arg); |
| 113 | |
Simon Glass | a2c515c | 2020-07-17 08:48:23 -0600 | [diff] [blame] | 114 | #if CONFIG_IS_ENABLED(SMP) && !CONFIG_IS_ENABLED(X86_64) |
Simon Glass | 6871dff | 2020-07-17 08:48:19 -0600 | [diff] [blame] | 115 | /** |
| 116 | * mp_run_on_cpus() - Run a function on one or all CPUs |
| 117 | * |
| 118 | * This does not return until all CPUs have completed the work |
| 119 | * |
| 120 | * Running on anything other than the boot CPU is only supported if |
| 121 | * CONFIG_SMP_AP_WORK is enabled |
| 122 | * |
Simon Glass | 6feac81 | 2020-12-16 21:20:22 -0700 | [diff] [blame] | 123 | * @cpu_select: CPU to run on (its dev_seq() value), or MP_SELECT_ALL for |
Simon Glass | 6871dff | 2020-07-17 08:48:19 -0600 | [diff] [blame] | 124 | * all, or MP_SELECT_BSP for BSP |
| 125 | * @func: Function to run |
| 126 | * @arg: Argument to pass to the function |
| 127 | * @return 0 on success, -ve on error |
| 128 | */ |
| 129 | int mp_run_on_cpus(int cpu_select, mp_run_func func, void *arg); |
Simon Glass | 32d5695 | 2020-07-17 08:48:20 -0600 | [diff] [blame] | 130 | |
| 131 | /** |
| 132 | * mp_park_aps() - Park the APs ready for the OS |
| 133 | * |
| 134 | * This halts all CPUs except the main one, ready for the OS to use them |
| 135 | * |
| 136 | * @return 0 if OK, -ve on error |
| 137 | */ |
| 138 | int mp_park_aps(void); |
Simon Glass | 44344f5 | 2020-07-17 08:48:21 -0600 | [diff] [blame] | 139 | |
| 140 | /** |
| 141 | * mp_first_cpu() - Get the first CPU to process, from a selection |
| 142 | * |
| 143 | * This is used to iterate through selected CPUs. Call this function first, then |
| 144 | * call mp_next_cpu() repeatedly (with the same @cpu_select) until it returns |
| 145 | * -EFBIG. |
| 146 | * |
| 147 | * @cpu_select: Selected CPUs (either a CPU number or MP_SELECT_...) |
| 148 | * @return next CPU number to run on (e.g. 0) |
| 149 | */ |
| 150 | int mp_first_cpu(int cpu_select); |
| 151 | |
| 152 | /** |
| 153 | * mp_next_cpu() - Get the next CPU to process, from a selection |
| 154 | * |
| 155 | * This is used to iterate through selected CPUs. After first calling |
| 156 | * mp_first_cpu() once, call this function repeatedly until it returns -EFBIG. |
| 157 | * |
| 158 | * The value of @cpu_select must be the same for all calls and must match the |
| 159 | * value passed to mp_first_cpu(), otherwise the behaviour is undefined. |
| 160 | * |
| 161 | * @cpu_select: Selected CPUs (either a CPU number or MP_SELECT_...) |
| 162 | * @prev_cpu: Previous value returned by mp_first_cpu()/mp_next_cpu() |
| 163 | * @return next CPU number to run on (e.g. 0) |
| 164 | */ |
| 165 | int mp_next_cpu(int cpu_select, int prev_cpu); |
Simon Glass | 6871dff | 2020-07-17 08:48:19 -0600 | [diff] [blame] | 166 | #else |
| 167 | static inline int mp_run_on_cpus(int cpu_select, mp_run_func func, void *arg) |
| 168 | { |
| 169 | /* There is only one CPU, so just call the function here */ |
| 170 | func(arg); |
| 171 | |
| 172 | return 0; |
| 173 | } |
Simon Glass | 32d5695 | 2020-07-17 08:48:20 -0600 | [diff] [blame] | 174 | |
| 175 | static inline int mp_park_aps(void) |
| 176 | { |
| 177 | /* No APs to park */ |
| 178 | |
| 179 | return 0; |
| 180 | } |
| 181 | |
Simon Glass | 44344f5 | 2020-07-17 08:48:21 -0600 | [diff] [blame] | 182 | static inline int mp_first_cpu(int cpu_select) |
| 183 | { |
| 184 | /* We cannot run on any APs, nor a selected CPU */ |
| 185 | return cpu_select == MP_SELECT_APS ? -EFBIG : MP_SELECT_BSP; |
| 186 | } |
| 187 | |
| 188 | static inline int mp_next_cpu(int cpu_select, int prev_cpu) |
| 189 | { |
| 190 | /* |
| 191 | * When MP is not enabled, there is only one CPU and we did it in |
| 192 | * mp_first_cpu() |
| 193 | */ |
| 194 | return -EFBIG; |
| 195 | } |
| 196 | |
Simon Glass | 6871dff | 2020-07-17 08:48:19 -0600 | [diff] [blame] | 197 | #endif |
| 198 | |
Simon Glass | a9a4426 | 2015-04-29 22:25:59 -0600 | [diff] [blame] | 199 | #endif /* _X86_MP_H_ */ |