Tom Rini | 10e4779 | 2018-05-06 17:58:06 -0400 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Simon Glass | a9a4426 | 2015-04-29 22:25:59 -0600 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (c) 2015 Google, Inc |
| 4 | * |
Simon Glass | a9a4426 | 2015-04-29 22:25:59 -0600 | [diff] [blame] | 5 | * Taken from coreboot file of the same name |
| 6 | */ |
| 7 | |
| 8 | #ifndef _X86_MP_H_ |
| 9 | #define _X86_MP_H_ |
| 10 | |
| 11 | #include <asm/atomic.h> |
Simon Glass | 274e0b0 | 2020-05-10 11:39:56 -0600 | [diff] [blame] | 12 | #include <asm/cache.h> |
Simon Glass | a9a4426 | 2015-04-29 22:25:59 -0600 | [diff] [blame] | 13 | |
Simon Glass | 4a30bbb | 2020-07-17 08:48:16 -0600 | [diff] [blame] | 14 | enum { |
| 15 | /* Indicates that the function should run on all CPUs */ |
| 16 | MP_SELECT_ALL = -1, |
| 17 | |
| 18 | /* Run on boot CPUs */ |
| 19 | MP_SELECT_BSP = -2, |
| 20 | |
| 21 | /* Run on non-boot CPUs */ |
| 22 | MP_SELECT_APS = -3, |
| 23 | }; |
| 24 | |
Simon Glass | a9a4426 | 2015-04-29 22:25:59 -0600 | [diff] [blame] | 25 | typedef int (*mp_callback_t)(struct udevice *cpu, void *arg); |
| 26 | |
| 27 | /* |
| 28 | * A mp_flight_record details a sequence of calls for the APs to perform |
| 29 | * along with the BSP to coordinate sequencing. Each flight record either |
| 30 | * provides a barrier for each AP before calling the callback or the APs |
| 31 | * are allowed to perform the callback without waiting. Regardless, each |
| 32 | * record has the cpus_entered field incremented for each record. When |
| 33 | * the BSP observes that the cpus_entered matches the number of APs |
| 34 | * the bsp_call is called with bsp_arg and upon returning releases the |
| 35 | * barrier allowing the APs to make further progress. |
| 36 | * |
| 37 | * Note that ap_call() and bsp_call() can be NULL. In the NULL case the |
| 38 | * callback will just not be called. |
| 39 | */ |
| 40 | struct mp_flight_record { |
| 41 | atomic_t barrier; |
| 42 | atomic_t cpus_entered; |
| 43 | mp_callback_t ap_call; |
| 44 | void *ap_arg; |
| 45 | mp_callback_t bsp_call; |
| 46 | void *bsp_arg; |
| 47 | } __attribute__((aligned(ARCH_DMA_MINALIGN))); |
| 48 | |
| 49 | #define MP_FLIGHT_RECORD(barrier_, ap_func_, ap_arg_, bsp_func_, bsp_arg_) \ |
| 50 | { \ |
| 51 | .barrier = ATOMIC_INIT(barrier_), \ |
| 52 | .cpus_entered = ATOMIC_INIT(0), \ |
| 53 | .ap_call = ap_func_, \ |
| 54 | .ap_arg = ap_arg_, \ |
| 55 | .bsp_call = bsp_func_, \ |
| 56 | .bsp_arg = bsp_arg_, \ |
| 57 | } |
| 58 | |
| 59 | #define MP_FR_BLOCK_APS(ap_func, ap_arg, bsp_func, bsp_arg) \ |
| 60 | MP_FLIGHT_RECORD(0, ap_func, ap_arg, bsp_func, bsp_arg) |
| 61 | |
| 62 | #define MP_FR_NOBLOCK_APS(ap_func, ap_arg, bsp_func, bsp_arg) \ |
| 63 | MP_FLIGHT_RECORD(1, ap_func, ap_arg, bsp_func, bsp_arg) |
| 64 | |
| 65 | /* |
Simon Glass | a9a4426 | 2015-04-29 22:25:59 -0600 | [diff] [blame] | 66 | * mp_init() will set up the SIPI vector and bring up the APs according to |
| 67 | * mp_params. Each flight record will be executed according to the plan. Note |
| 68 | * that the MP infrastructure uses SMM default area without saving it. It's |
| 69 | * up to the chipset or mainboard to either e820 reserve this area or save this |
| 70 | * region prior to calling mp_init() and restoring it after mp_init returns. |
| 71 | * |
| 72 | * At the time mp_init() is called the MTRR MSRs are mirrored into APs then |
| 73 | * caching is enabled before running the flight plan. |
| 74 | * |
| 75 | * The MP init has the following properties: |
| 76 | * 1. APs are brought up in parallel. |
| 77 | * 2. The ordering of cpu number and APIC ids is not deterministic. |
| 78 | * Therefore, one cannot rely on this property or the order of devices in |
| 79 | * the device tree unless the chipset or mainboard know the APIC ids |
| 80 | * a priori. |
| 81 | * |
| 82 | * mp_init() returns < 0 on error, 0 on success. |
| 83 | */ |
Simon Glass | e40633d | 2020-07-17 08:48:08 -0600 | [diff] [blame] | 84 | int mp_init(void); |
Simon Glass | a9a4426 | 2015-04-29 22:25:59 -0600 | [diff] [blame] | 85 | |
Simon Glass | 16a624b | 2017-01-16 07:03:57 -0700 | [diff] [blame] | 86 | /* Set up additional CPUs */ |
| 87 | int x86_mp_init(void); |
| 88 | |
Simon Glass | 6871dff | 2020-07-17 08:48:19 -0600 | [diff] [blame] | 89 | /** |
| 90 | * mp_run_func() - Function to call on the AP |
| 91 | * |
| 92 | * @arg: Argument to pass |
| 93 | */ |
| 94 | typedef void (*mp_run_func)(void *arg); |
| 95 | |
| 96 | #if defined(CONFIG_SMP) && !CONFIG_IS_ENABLED(X86_64) |
| 97 | /** |
| 98 | * mp_run_on_cpus() - Run a function on one or all CPUs |
| 99 | * |
| 100 | * This does not return until all CPUs have completed the work |
| 101 | * |
| 102 | * Running on anything other than the boot CPU is only supported if |
| 103 | * CONFIG_SMP_AP_WORK is enabled |
| 104 | * |
| 105 | * @cpu_select: CPU to run on (its dev->req_seq value), or MP_SELECT_ALL for |
| 106 | * all, or MP_SELECT_BSP for BSP |
| 107 | * @func: Function to run |
| 108 | * @arg: Argument to pass to the function |
| 109 | * @return 0 on success, -ve on error |
| 110 | */ |
| 111 | int mp_run_on_cpus(int cpu_select, mp_run_func func, void *arg); |
Simon Glass | 32d5695 | 2020-07-17 08:48:20 -0600 | [diff] [blame] | 112 | |
| 113 | /** |
| 114 | * mp_park_aps() - Park the APs ready for the OS |
| 115 | * |
| 116 | * This halts all CPUs except the main one, ready for the OS to use them |
| 117 | * |
| 118 | * @return 0 if OK, -ve on error |
| 119 | */ |
| 120 | int mp_park_aps(void); |
Simon Glass | 44344f5 | 2020-07-17 08:48:21 -0600 | [diff] [blame^] | 121 | |
| 122 | /** |
| 123 | * mp_first_cpu() - Get the first CPU to process, from a selection |
| 124 | * |
| 125 | * This is used to iterate through selected CPUs. Call this function first, then |
| 126 | * call mp_next_cpu() repeatedly (with the same @cpu_select) until it returns |
| 127 | * -EFBIG. |
| 128 | * |
| 129 | * @cpu_select: Selected CPUs (either a CPU number or MP_SELECT_...) |
| 130 | * @return next CPU number to run on (e.g. 0) |
| 131 | */ |
| 132 | int mp_first_cpu(int cpu_select); |
| 133 | |
| 134 | /** |
| 135 | * mp_next_cpu() - Get the next CPU to process, from a selection |
| 136 | * |
| 137 | * This is used to iterate through selected CPUs. After first calling |
| 138 | * mp_first_cpu() once, call this function repeatedly until it returns -EFBIG. |
| 139 | * |
| 140 | * The value of @cpu_select must be the same for all calls and must match the |
| 141 | * value passed to mp_first_cpu(), otherwise the behaviour is undefined. |
| 142 | * |
| 143 | * @cpu_select: Selected CPUs (either a CPU number or MP_SELECT_...) |
| 144 | * @prev_cpu: Previous value returned by mp_first_cpu()/mp_next_cpu() |
| 145 | * @return next CPU number to run on (e.g. 0) |
| 146 | */ |
| 147 | int mp_next_cpu(int cpu_select, int prev_cpu); |
Simon Glass | 6871dff | 2020-07-17 08:48:19 -0600 | [diff] [blame] | 148 | #else |
| 149 | static inline int mp_run_on_cpus(int cpu_select, mp_run_func func, void *arg) |
| 150 | { |
| 151 | /* There is only one CPU, so just call the function here */ |
| 152 | func(arg); |
| 153 | |
| 154 | return 0; |
| 155 | } |
Simon Glass | 32d5695 | 2020-07-17 08:48:20 -0600 | [diff] [blame] | 156 | |
| 157 | static inline int mp_park_aps(void) |
| 158 | { |
| 159 | /* No APs to park */ |
| 160 | |
| 161 | return 0; |
| 162 | } |
| 163 | |
Simon Glass | 44344f5 | 2020-07-17 08:48:21 -0600 | [diff] [blame^] | 164 | static inline int mp_first_cpu(int cpu_select) |
| 165 | { |
| 166 | /* We cannot run on any APs, nor a selected CPU */ |
| 167 | return cpu_select == MP_SELECT_APS ? -EFBIG : MP_SELECT_BSP; |
| 168 | } |
| 169 | |
| 170 | static inline int mp_next_cpu(int cpu_select, int prev_cpu) |
| 171 | { |
| 172 | /* |
| 173 | * When MP is not enabled, there is only one CPU and we did it in |
| 174 | * mp_first_cpu() |
| 175 | */ |
| 176 | return -EFBIG; |
| 177 | } |
| 178 | |
Simon Glass | 6871dff | 2020-07-17 08:48:19 -0600 | [diff] [blame] | 179 | #endif |
| 180 | |
Simon Glass | a9a4426 | 2015-04-29 22:25:59 -0600 | [diff] [blame] | 181 | #endif /* _X86_MP_H_ */ |