blob: 6f095c53a57b65cfbc125a500271087588bc83b5 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Simon Glass7bf5b9e2015-01-01 16:18:07 -07002/*
3 * (C) Copyright 2014 Google, Inc
4 *
Simon Glass7bf5b9e2015-01-01 16:18:07 -07005 * Memory Type Range Regsters - these are used to tell the CPU whether
6 * memory is cacheable and if so the cache write mode to use.
7 *
8 * These can speed up booting. See the mtrr command.
9 *
10 * Reference: Intel Architecture Software Developer's Manual, Volume 3:
11 * System Programming
12 */
13
Simon Glass8fafd012018-10-01 12:22:37 -060014/*
15 * Note that any console output (e.g. debug()) in this file will likely fail
16 * since the MTRR registers are sometimes in flux.
17 */
18
Simon Glass7bf5b9e2015-01-01 16:18:07 -070019#include <common.h>
Simon Glass1d91ba72019-11-14 12:57:37 -070020#include <cpu_func.h>
Simon Glass0f2af882020-05-10 11:40:05 -060021#include <log.h>
Simon Glass6b88e882020-09-22 12:45:27 -060022#include <sort.h>
Simon Glass274e0b02020-05-10 11:39:56 -060023#include <asm/cache.h>
Simon Glass7bf5b9e2015-01-01 16:18:07 -070024#include <asm/io.h>
Simon Glass7403c262020-07-17 08:48:22 -060025#include <asm/mp.h>
Simon Glass7bf5b9e2015-01-01 16:18:07 -070026#include <asm/msr.h>
27#include <asm/mtrr.h>
28
Bin Meng068fb352015-01-22 11:29:39 +080029DECLARE_GLOBAL_DATA_PTR;
30
Simon Glass7bf5b9e2015-01-01 16:18:07 -070031/* Prepare to adjust MTRRs */
Simon Glass8fafd012018-10-01 12:22:37 -060032void mtrr_open(struct mtrr_state *state, bool do_caches)
Simon Glass7bf5b9e2015-01-01 16:18:07 -070033{
Bin Meng80d29762015-01-22 11:29:41 +080034 if (!gd->arch.has_mtrr)
35 return;
36
Simon Glass8fafd012018-10-01 12:22:37 -060037 if (do_caches) {
38 state->enable_cache = dcache_status();
Simon Glass7bf5b9e2015-01-01 16:18:07 -070039
Simon Glass8fafd012018-10-01 12:22:37 -060040 if (state->enable_cache)
41 disable_caches();
42 }
Simon Glass7bf5b9e2015-01-01 16:18:07 -070043 state->deftype = native_read_msr(MTRR_DEF_TYPE_MSR);
44 wrmsrl(MTRR_DEF_TYPE_MSR, state->deftype & ~MTRR_DEF_TYPE_EN);
45}
46
47/* Clean up after adjusting MTRRs, and enable them */
Simon Glass8fafd012018-10-01 12:22:37 -060048void mtrr_close(struct mtrr_state *state, bool do_caches)
Simon Glass7bf5b9e2015-01-01 16:18:07 -070049{
Bin Meng80d29762015-01-22 11:29:41 +080050 if (!gd->arch.has_mtrr)
51 return;
52
Simon Glass7bf5b9e2015-01-01 16:18:07 -070053 wrmsrl(MTRR_DEF_TYPE_MSR, state->deftype | MTRR_DEF_TYPE_EN);
Simon Glass8fafd012018-10-01 12:22:37 -060054 if (do_caches && state->enable_cache)
Simon Glass7bf5b9e2015-01-01 16:18:07 -070055 enable_caches();
56}
57
Simon Glass35520592019-09-25 08:56:45 -060058static void set_var_mtrr(uint reg, uint type, uint64_t start, uint64_t size)
59{
60 u64 mask;
61
62 wrmsrl(MTRR_PHYS_BASE_MSR(reg), start | type);
63 mask = ~(size - 1);
64 mask &= (1ULL << CONFIG_CPU_ADDR_BITS) - 1;
65 wrmsrl(MTRR_PHYS_MASK_MSR(reg), mask | MTRR_PHYS_MASK_VALID);
66}
67
Simon Glass7403c262020-07-17 08:48:22 -060068void mtrr_read_all(struct mtrr_info *info)
69{
Simon Glassfbf120c2020-09-22 14:54:51 -060070 int reg_count = mtrr_get_var_count();
Simon Glass7403c262020-07-17 08:48:22 -060071 int i;
72
Simon Glassfbf120c2020-09-22 14:54:51 -060073 for (i = 0; i < reg_count; i++) {
Simon Glass7403c262020-07-17 08:48:22 -060074 info->mtrr[i].base = native_read_msr(MTRR_PHYS_BASE_MSR(i));
75 info->mtrr[i].mask = native_read_msr(MTRR_PHYS_MASK_MSR(i));
76 }
77}
78
Simon Glass00dc52f2020-07-17 08:48:25 -060079void mtrr_write_all(struct mtrr_info *info)
80{
Simon Glassfbf120c2020-09-22 14:54:51 -060081 int reg_count = mtrr_get_var_count();
Simon Glass00dc52f2020-07-17 08:48:25 -060082 struct mtrr_state state;
83 int i;
84
Simon Glassfbf120c2020-09-22 14:54:51 -060085 for (i = 0; i < reg_count; i++) {
Simon Glass00dc52f2020-07-17 08:48:25 -060086 mtrr_open(&state, true);
87 wrmsrl(MTRR_PHYS_BASE_MSR(i), info->mtrr[i].base);
88 wrmsrl(MTRR_PHYS_MASK_MSR(i), info->mtrr[i].mask);
89 mtrr_close(&state, true);
90 }
91}
92
93static void write_mtrrs(void *arg)
94{
95 struct mtrr_info *info = arg;
96
97 mtrr_write_all(info);
98}
99
100static void read_mtrrs(void *arg)
101{
102 struct mtrr_info *info = arg;
103
104 mtrr_read_all(info);
105}
106
107/**
108 * mtrr_copy_to_aps() - Copy the MTRRs from the boot CPU to other CPUs
109 *
110 * @return 0 on success, -ve on failure
111 */
112static int mtrr_copy_to_aps(void)
113{
114 struct mtrr_info info;
115 int ret;
116
117 ret = mp_run_on_cpus(MP_SELECT_BSP, read_mtrrs, &info);
118 if (ret == -ENXIO)
119 return 0;
120 else if (ret)
121 return log_msg_ret("bsp", ret);
122
123 ret = mp_run_on_cpus(MP_SELECT_APS, write_mtrrs, &info);
124 if (ret)
125 return log_msg_ret("bsp", ret);
126
127 return 0;
128}
129
Simon Glass6b88e882020-09-22 12:45:27 -0600130static int h_comp_mtrr(const void *p1, const void *p2)
131{
132 const struct mtrr_request *req1 = p1;
133 const struct mtrr_request *req2 = p2;
134
135 s64 diff = req1->start - req2->start;
136
137 return diff < 0 ? -1 : diff > 0 ? 1 : 0;
138}
139
Simon Glass7bf5b9e2015-01-01 16:18:07 -0700140int mtrr_commit(bool do_caches)
141{
142 struct mtrr_request *req = gd->arch.mtrr_req;
143 struct mtrr_state state;
Simon Glass00dc52f2020-07-17 08:48:25 -0600144 int ret;
Simon Glass7bf5b9e2015-01-01 16:18:07 -0700145 int i;
146
Simon Glass8fafd012018-10-01 12:22:37 -0600147 debug("%s: enabled=%d, count=%d\n", __func__, gd->arch.has_mtrr,
148 gd->arch.mtrr_req_count);
Bin Meng80d29762015-01-22 11:29:41 +0800149 if (!gd->arch.has_mtrr)
150 return -ENOSYS;
151
Simon Glass8fafd012018-10-01 12:22:37 -0600152 debug("open\n");
153 mtrr_open(&state, do_caches);
154 debug("open done\n");
Simon Glass6b88e882020-09-22 12:45:27 -0600155 qsort(req, gd->arch.mtrr_req_count, sizeof(*req), h_comp_mtrr);
Simon Glass35520592019-09-25 08:56:45 -0600156 for (i = 0; i < gd->arch.mtrr_req_count; i++, req++)
157 set_var_mtrr(i, req->type, req->start, req->size);
Simon Glass7bf5b9e2015-01-01 16:18:07 -0700158
159 /* Clear the ones that are unused */
Simon Glass8fafd012018-10-01 12:22:37 -0600160 debug("clear\n");
Bin Meng761d8eb2020-11-09 15:55:49 +0800161 for (; i < mtrr_get_var_count(); i++)
Simon Glass7bf5b9e2015-01-01 16:18:07 -0700162 wrmsrl(MTRR_PHYS_MASK_MSR(i), 0);
Simon Glass8fafd012018-10-01 12:22:37 -0600163 debug("close\n");
164 mtrr_close(&state, do_caches);
165 debug("mtrr done\n");
Simon Glass7bf5b9e2015-01-01 16:18:07 -0700166
Simon Glass00dc52f2020-07-17 08:48:25 -0600167 if (gd->flags & GD_FLG_RELOC) {
168 ret = mtrr_copy_to_aps();
169 if (ret)
170 return log_msg_ret("copy", ret);
171 }
172
Simon Glass7bf5b9e2015-01-01 16:18:07 -0700173 return 0;
174}
175
176int mtrr_add_request(int type, uint64_t start, uint64_t size)
177{
178 struct mtrr_request *req;
179 uint64_t mask;
180
Simon Glass8fafd012018-10-01 12:22:37 -0600181 debug("%s: count=%d\n", __func__, gd->arch.mtrr_req_count);
Bin Meng80d29762015-01-22 11:29:41 +0800182 if (!gd->arch.has_mtrr)
183 return -ENOSYS;
184
Simon Glass7bf5b9e2015-01-01 16:18:07 -0700185 if (gd->arch.mtrr_req_count == MAX_MTRR_REQUESTS)
186 return -ENOSPC;
187 req = &gd->arch.mtrr_req[gd->arch.mtrr_req_count++];
188 req->type = type;
189 req->start = start;
190 req->size = size;
191 debug("%d: type=%d, %08llx %08llx\n", gd->arch.mtrr_req_count - 1,
192 req->type, req->start, req->size);
193 mask = ~(req->size - 1);
194 mask &= (1ULL << CONFIG_CPU_ADDR_BITS) - 1;
195 mask |= MTRR_PHYS_MASK_VALID;
196 debug(" %016llx %016llx\n", req->start | req->type, mask);
197
198 return 0;
199}
Simon Glass753297d2019-09-25 08:56:46 -0600200
Simon Glassfbf120c2020-09-22 14:54:51 -0600201int mtrr_get_var_count(void)
Simon Glass753297d2019-09-25 08:56:46 -0600202{
203 return msr_read(MSR_MTRR_CAP_MSR).lo & MSR_MTRR_CAP_VCNT;
204}
205
206static int get_free_var_mtrr(void)
207{
208 struct msr_t maskm;
209 int vcnt;
210 int i;
211
Simon Glassfbf120c2020-09-22 14:54:51 -0600212 vcnt = mtrr_get_var_count();
Simon Glass753297d2019-09-25 08:56:46 -0600213
214 /* Identify the first var mtrr which is not valid */
215 for (i = 0; i < vcnt; i++) {
216 maskm = msr_read(MTRR_PHYS_MASK_MSR(i));
217 if ((maskm.lo & MTRR_PHYS_MASK_VALID) == 0)
218 return i;
219 }
220
221 /* No free var mtrr */
222 return -ENOSPC;
223}
224
225int mtrr_set_next_var(uint type, uint64_t start, uint64_t size)
226{
227 int mtrr;
228
229 mtrr = get_free_var_mtrr();
230 if (mtrr < 0)
231 return mtrr;
232
233 set_var_mtrr(mtrr, type, start, size);
234 debug("MTRR %x: start=%x, size=%x\n", mtrr, (uint)start, (uint)size);
235
236 return 0;
237}
Simon Glassd89e15f2020-07-17 08:48:26 -0600238
239/** enum mtrr_opcode - supported operations for mtrr_do_oper() */
240enum mtrr_opcode {
241 MTRR_OP_SET,
242 MTRR_OP_SET_VALID,
243};
244
245/**
246 * struct mtrr_oper - An MTRR operation to perform on a CPU
247 *
248 * @opcode: Indicates operation to perform
249 * @reg: MTRR reg number to select (0-7, -1 = all)
250 * @valid: Valid value to write for MTRR_OP_SET_VALID
251 * @base: Base value to write for MTRR_OP_SET
252 * @mask: Mask value to write for MTRR_OP_SET
253 */
254struct mtrr_oper {
255 enum mtrr_opcode opcode;
256 int reg;
257 bool valid;
258 u64 base;
259 u64 mask;
260};
261
262static void mtrr_do_oper(void *arg)
263{
264 struct mtrr_oper *oper = arg;
265 u64 mask;
266
267 switch (oper->opcode) {
268 case MTRR_OP_SET_VALID:
269 mask = native_read_msr(MTRR_PHYS_MASK_MSR(oper->reg));
270 if (oper->valid)
271 mask |= MTRR_PHYS_MASK_VALID;
272 else
273 mask &= ~MTRR_PHYS_MASK_VALID;
274 wrmsrl(MTRR_PHYS_MASK_MSR(oper->reg), mask);
275 break;
276 case MTRR_OP_SET:
277 wrmsrl(MTRR_PHYS_BASE_MSR(oper->reg), oper->base);
278 wrmsrl(MTRR_PHYS_MASK_MSR(oper->reg), oper->mask);
279 break;
280 }
281}
282
283static int mtrr_start_op(int cpu_select, struct mtrr_oper *oper)
284{
285 struct mtrr_state state;
286 int ret;
287
288 mtrr_open(&state, true);
289 ret = mp_run_on_cpus(cpu_select, mtrr_do_oper, oper);
290 mtrr_close(&state, true);
291 if (ret)
292 return log_msg_ret("run", ret);
293
294 return 0;
295}
296
297int mtrr_set_valid(int cpu_select, int reg, bool valid)
298{
299 struct mtrr_oper oper;
300
301 oper.opcode = MTRR_OP_SET_VALID;
302 oper.reg = reg;
303 oper.valid = valid;
304
305 return mtrr_start_op(cpu_select, &oper);
306}
307
308int mtrr_set(int cpu_select, int reg, u64 base, u64 mask)
309{
310 struct mtrr_oper oper;
311
312 oper.opcode = MTRR_OP_SET;
313 oper.reg = reg;
314 oper.base = base;
315 oper.mask = mask;
316
317 return mtrr_start_op(cpu_select, &oper);
318}