blob: f4c4d6c257c528e5d1e501e37cac00c9d49f492b [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001/* SPDX-License-Identifier: GPL-2.0 */
Simon Glassa9a44262015-04-29 22:25:59 -06002/*
3 * Copyright (c) 2015 Google, Inc
4 *
Simon Glassa9a44262015-04-29 22:25:59 -06005 * Taken from coreboot file of the same name
6 */
7
8#ifndef _X86_MP_H_
9#define _X86_MP_H_
10
11#include <asm/atomic.h>
Simon Glass274e0b02020-05-10 11:39:56 -060012#include <asm/cache.h>
Simon Glass0b13e3c2021-06-27 17:51:02 -060013#include <linux/bitops.h>
Simon Glassa9a44262015-04-29 22:25:59 -060014
Simon Glass3ba929a2020-10-30 21:38:53 -060015struct udevice;
16
Simon Glass4a30bbb2020-07-17 08:48:16 -060017enum {
Simon Glass0b13e3c2021-06-27 17:51:02 -060018 /*
19 * Indicates that the function should run on all CPUs. We use a large
20 * number, above the number of real CPUs we expect to find.
21 */
22 MP_SELECT_ALL = BIT(16),
Simon Glass4a30bbb2020-07-17 08:48:16 -060023
24 /* Run on boot CPUs */
Simon Glass0b13e3c2021-06-27 17:51:02 -060025 MP_SELECT_BSP,
Simon Glass4a30bbb2020-07-17 08:48:16 -060026
27 /* Run on non-boot CPUs */
Simon Glass0b13e3c2021-06-27 17:51:02 -060028 MP_SELECT_APS,
Simon Glass4a30bbb2020-07-17 08:48:16 -060029};
30
Simon Glassa9a44262015-04-29 22:25:59 -060031typedef int (*mp_callback_t)(struct udevice *cpu, void *arg);
32
33/*
34 * A mp_flight_record details a sequence of calls for the APs to perform
35 * along with the BSP to coordinate sequencing. Each flight record either
36 * provides a barrier for each AP before calling the callback or the APs
37 * are allowed to perform the callback without waiting. Regardless, each
38 * record has the cpus_entered field incremented for each record. When
39 * the BSP observes that the cpus_entered matches the number of APs
40 * the bsp_call is called with bsp_arg and upon returning releases the
41 * barrier allowing the APs to make further progress.
42 *
43 * Note that ap_call() and bsp_call() can be NULL. In the NULL case the
44 * callback will just not be called.
Simon Glass00906cb2020-07-17 08:48:30 -060045 *
46 * @barrier: Ensures that the BSP and AP don't run the flight record at the same
47 * time
48 * @cpus_entered: Counts the number of APs that have run this record
49 * @ap_call: Function for the APs to call
50 * @ap_arg: Argument to pass to @ap_call
51 * @bsp_call: Function for the BSP to call
52 * @bsp_arg: Argument to pass to @bsp_call
Simon Glassa9a44262015-04-29 22:25:59 -060053 */
54struct mp_flight_record {
55 atomic_t barrier;
56 atomic_t cpus_entered;
57 mp_callback_t ap_call;
58 void *ap_arg;
59 mp_callback_t bsp_call;
60 void *bsp_arg;
61} __attribute__((aligned(ARCH_DMA_MINALIGN)));
62
63#define MP_FLIGHT_RECORD(barrier_, ap_func_, ap_arg_, bsp_func_, bsp_arg_) \
64 { \
65 .barrier = ATOMIC_INIT(barrier_), \
66 .cpus_entered = ATOMIC_INIT(0), \
67 .ap_call = ap_func_, \
68 .ap_arg = ap_arg_, \
69 .bsp_call = bsp_func_, \
70 .bsp_arg = bsp_arg_, \
71 }
72
73#define MP_FR_BLOCK_APS(ap_func, ap_arg, bsp_func, bsp_arg) \
74 MP_FLIGHT_RECORD(0, ap_func, ap_arg, bsp_func, bsp_arg)
75
76#define MP_FR_NOBLOCK_APS(ap_func, ap_arg, bsp_func, bsp_arg) \
77 MP_FLIGHT_RECORD(1, ap_func, ap_arg, bsp_func, bsp_arg)
78
79/*
Simon Glassa9a44262015-04-29 22:25:59 -060080 * mp_init() will set up the SIPI vector and bring up the APs according to
81 * mp_params. Each flight record will be executed according to the plan. Note
82 * that the MP infrastructure uses SMM default area without saving it. It's
83 * up to the chipset or mainboard to either e820 reserve this area or save this
84 * region prior to calling mp_init() and restoring it after mp_init returns.
85 *
86 * At the time mp_init() is called the MTRR MSRs are mirrored into APs then
87 * caching is enabled before running the flight plan.
88 *
89 * The MP init has the following properties:
90 * 1. APs are brought up in parallel.
91 * 2. The ordering of cpu number and APIC ids is not deterministic.
92 * Therefore, one cannot rely on this property or the order of devices in
93 * the device tree unless the chipset or mainboard know the APIC ids
94 * a priori.
95 *
96 * mp_init() returns < 0 on error, 0 on success.
97 */
Simon Glasse40633d2020-07-17 08:48:08 -060098int mp_init(void);
Simon Glassa9a44262015-04-29 22:25:59 -060099
Simon Glass00906cb2020-07-17 08:48:30 -0600100/**
101 * x86_mp_init() - Set up additional CPUs
102 *
103 * @returns < 0 on error, 0 on success.
104 */
Simon Glass16a624b2017-01-16 07:03:57 -0700105int x86_mp_init(void);
106
Simon Glass6871dff2020-07-17 08:48:19 -0600107/**
108 * mp_run_func() - Function to call on the AP
109 *
110 * @arg: Argument to pass
111 */
112typedef void (*mp_run_func)(void *arg);
113
Simon Glassa2c515c2020-07-17 08:48:23 -0600114#if CONFIG_IS_ENABLED(SMP) && !CONFIG_IS_ENABLED(X86_64)
Simon Glass6871dff2020-07-17 08:48:19 -0600115/**
116 * mp_run_on_cpus() - Run a function on one or all CPUs
117 *
118 * This does not return until all CPUs have completed the work
119 *
120 * Running on anything other than the boot CPU is only supported if
121 * CONFIG_SMP_AP_WORK is enabled
122 *
Simon Glass6feac812020-12-16 21:20:22 -0700123 * @cpu_select: CPU to run on (its dev_seq() value), or MP_SELECT_ALL for
Simon Glass6871dff2020-07-17 08:48:19 -0600124 * all, or MP_SELECT_BSP for BSP
125 * @func: Function to run
126 * @arg: Argument to pass to the function
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100127 * Return: 0 on success, -ve on error
Simon Glass6871dff2020-07-17 08:48:19 -0600128 */
129int mp_run_on_cpus(int cpu_select, mp_run_func func, void *arg);
Simon Glass32d56952020-07-17 08:48:20 -0600130
131/**
132 * mp_park_aps() - Park the APs ready for the OS
133 *
134 * This halts all CPUs except the main one, ready for the OS to use them
135 *
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100136 * Return: 0 if OK, -ve on error
Simon Glass32d56952020-07-17 08:48:20 -0600137 */
138int mp_park_aps(void);
Simon Glass44344f52020-07-17 08:48:21 -0600139
140/**
141 * mp_first_cpu() - Get the first CPU to process, from a selection
142 *
143 * This is used to iterate through selected CPUs. Call this function first, then
144 * call mp_next_cpu() repeatedly (with the same @cpu_select) until it returns
145 * -EFBIG.
146 *
147 * @cpu_select: Selected CPUs (either a CPU number or MP_SELECT_...)
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100148 * Return: next CPU number to run on (e.g. 0)
Simon Glass44344f52020-07-17 08:48:21 -0600149 */
150int mp_first_cpu(int cpu_select);
151
152/**
153 * mp_next_cpu() - Get the next CPU to process, from a selection
154 *
155 * This is used to iterate through selected CPUs. After first calling
156 * mp_first_cpu() once, call this function repeatedly until it returns -EFBIG.
157 *
158 * The value of @cpu_select must be the same for all calls and must match the
159 * value passed to mp_first_cpu(), otherwise the behaviour is undefined.
160 *
161 * @cpu_select: Selected CPUs (either a CPU number or MP_SELECT_...)
162 * @prev_cpu: Previous value returned by mp_first_cpu()/mp_next_cpu()
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100163 * Return: next CPU number to run on (e.g. 0)
Simon Glass44344f52020-07-17 08:48:21 -0600164 */
165int mp_next_cpu(int cpu_select, int prev_cpu);
Simon Glass6871dff2020-07-17 08:48:19 -0600166#else
167static inline int mp_run_on_cpus(int cpu_select, mp_run_func func, void *arg)
168{
169 /* There is only one CPU, so just call the function here */
170 func(arg);
171
172 return 0;
173}
Simon Glass32d56952020-07-17 08:48:20 -0600174
175static inline int mp_park_aps(void)
176{
177 /* No APs to park */
178
179 return 0;
180}
181
Simon Glass44344f52020-07-17 08:48:21 -0600182static inline int mp_first_cpu(int cpu_select)
183{
184 /* We cannot run on any APs, nor a selected CPU */
185 return cpu_select == MP_SELECT_APS ? -EFBIG : MP_SELECT_BSP;
186}
187
188static inline int mp_next_cpu(int cpu_select, int prev_cpu)
189{
190 /*
191 * When MP is not enabled, there is only one CPU and we did it in
192 * mp_first_cpu()
193 */
194 return -EFBIG;
195}
196
Simon Glass6871dff2020-07-17 08:48:19 -0600197#endif
198
Simon Glassa9a44262015-04-29 22:25:59 -0600199#endif /* _X86_MP_H_ */