Simon Glass | a9a4426 | 2015-04-29 22:25:59 -0600 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright (C) 2015 Google, Inc |
| 3 | * |
| 4 | * SPDX-License-Identifier: GPL-2.0+ |
| 5 | * |
| 6 | * Based on code from the coreboot file of the same name |
| 7 | */ |
| 8 | |
| 9 | #include <common.h> |
| 10 | #include <cpu.h> |
| 11 | #include <dm.h> |
| 12 | #include <errno.h> |
| 13 | #include <malloc.h> |
| 14 | #include <asm/atomic.h> |
| 15 | #include <asm/cpu.h> |
| 16 | #include <asm/interrupt.h> |
| 17 | #include <asm/lapic.h> |
| 18 | #include <asm/mp.h> |
| 19 | #include <asm/mtrr.h> |
| 20 | #include <asm/sipi.h> |
| 21 | #include <dm/device-internal.h> |
| 22 | #include <dm/uclass-internal.h> |
| 23 | #include <linux/linkage.h> |
| 24 | |
| 25 | /* This also needs to match the sipi.S assembly code for saved MSR encoding */ |
| 26 | struct saved_msr { |
| 27 | uint32_t index; |
| 28 | uint32_t lo; |
| 29 | uint32_t hi; |
| 30 | } __packed; |
| 31 | |
| 32 | |
| 33 | struct mp_flight_plan { |
| 34 | int num_records; |
| 35 | struct mp_flight_record *records; |
| 36 | }; |
| 37 | |
| 38 | static struct mp_flight_plan mp_info; |
| 39 | |
| 40 | struct cpu_map { |
| 41 | struct udevice *dev; |
| 42 | int apic_id; |
| 43 | int err_code; |
| 44 | }; |
| 45 | |
| 46 | static inline void barrier_wait(atomic_t *b) |
| 47 | { |
| 48 | while (atomic_read(b) == 0) |
| 49 | asm("pause"); |
| 50 | mfence(); |
| 51 | } |
| 52 | |
| 53 | static inline void release_barrier(atomic_t *b) |
| 54 | { |
| 55 | mfence(); |
| 56 | atomic_set(b, 1); |
| 57 | } |
| 58 | |
| 59 | /* Returns 1 if timeout waiting for APs. 0 if target APs found */ |
| 60 | static int wait_for_aps(atomic_t *val, int target, int total_delay, |
| 61 | int delay_step) |
| 62 | { |
| 63 | int timeout = 0; |
| 64 | int delayed = 0; |
| 65 | |
| 66 | while (atomic_read(val) != target) { |
| 67 | udelay(delay_step); |
| 68 | delayed += delay_step; |
| 69 | if (delayed >= total_delay) { |
| 70 | timeout = 1; |
| 71 | break; |
| 72 | } |
| 73 | } |
| 74 | |
| 75 | return timeout; |
| 76 | } |
| 77 | |
| 78 | static void ap_do_flight_plan(struct udevice *cpu) |
| 79 | { |
| 80 | int i; |
| 81 | |
| 82 | for (i = 0; i < mp_info.num_records; i++) { |
| 83 | struct mp_flight_record *rec = &mp_info.records[i]; |
| 84 | |
| 85 | atomic_inc(&rec->cpus_entered); |
| 86 | barrier_wait(&rec->barrier); |
| 87 | |
| 88 | if (rec->ap_call != NULL) |
| 89 | rec->ap_call(cpu, rec->ap_arg); |
| 90 | } |
| 91 | } |
| 92 | |
| 93 | static int find_cpu_by_apid_id(int apic_id, struct udevice **devp) |
| 94 | { |
| 95 | struct udevice *dev; |
| 96 | |
| 97 | *devp = NULL; |
| 98 | for (uclass_find_first_device(UCLASS_CPU, &dev); |
| 99 | dev; |
| 100 | uclass_find_next_device(&dev)) { |
| 101 | struct cpu_platdata *plat = dev_get_parent_platdata(dev); |
| 102 | |
| 103 | if (plat->cpu_id == apic_id) { |
| 104 | *devp = dev; |
| 105 | return 0; |
| 106 | } |
| 107 | } |
| 108 | |
| 109 | return -ENOENT; |
| 110 | } |
| 111 | |
| 112 | /* |
| 113 | * By the time APs call ap_init() caching has been setup, and microcode has |
| 114 | * been loaded |
| 115 | */ |
| 116 | static void ap_init(unsigned int cpu_index) |
| 117 | { |
| 118 | struct udevice *dev; |
| 119 | int apic_id; |
| 120 | int ret; |
| 121 | |
| 122 | /* Ensure the local apic is enabled */ |
| 123 | enable_lapic(); |
| 124 | |
| 125 | apic_id = lapicid(); |
| 126 | ret = find_cpu_by_apid_id(apic_id, &dev); |
| 127 | if (ret) { |
| 128 | debug("Unknown CPU apic_id %x\n", apic_id); |
| 129 | goto done; |
| 130 | } |
| 131 | |
| 132 | debug("AP: slot %d apic_id %x, dev %s\n", cpu_index, apic_id, |
| 133 | dev ? dev->name : "(apic_id not found)"); |
| 134 | |
| 135 | /* Walk the flight plan */ |
| 136 | ap_do_flight_plan(dev); |
| 137 | |
| 138 | /* Park the AP */ |
| 139 | debug("parking\n"); |
| 140 | done: |
| 141 | stop_this_cpu(); |
| 142 | } |
| 143 | |
| 144 | static const unsigned int fixed_mtrrs[NUM_FIXED_MTRRS] = { |
| 145 | MTRR_FIX_64K_00000_MSR, MTRR_FIX_16K_80000_MSR, MTRR_FIX_16K_A0000_MSR, |
| 146 | MTRR_FIX_4K_C0000_MSR, MTRR_FIX_4K_C8000_MSR, MTRR_FIX_4K_D0000_MSR, |
| 147 | MTRR_FIX_4K_D8000_MSR, MTRR_FIX_4K_E0000_MSR, MTRR_FIX_4K_E8000_MSR, |
| 148 | MTRR_FIX_4K_F0000_MSR, MTRR_FIX_4K_F8000_MSR, |
| 149 | }; |
| 150 | |
| 151 | static inline struct saved_msr *save_msr(int index, struct saved_msr *entry) |
| 152 | { |
| 153 | msr_t msr; |
| 154 | |
| 155 | msr = msr_read(index); |
| 156 | entry->index = index; |
| 157 | entry->lo = msr.lo; |
| 158 | entry->hi = msr.hi; |
| 159 | |
| 160 | /* Return the next entry */ |
| 161 | entry++; |
| 162 | return entry; |
| 163 | } |
| 164 | |
| 165 | static int save_bsp_msrs(char *start, int size) |
| 166 | { |
| 167 | int msr_count; |
| 168 | int num_var_mtrrs; |
| 169 | struct saved_msr *msr_entry; |
| 170 | int i; |
| 171 | msr_t msr; |
| 172 | |
| 173 | /* Determine number of MTRRs need to be saved */ |
| 174 | msr = msr_read(MTRR_CAP_MSR); |
| 175 | num_var_mtrrs = msr.lo & 0xff; |
| 176 | |
| 177 | /* 2 * num_var_mtrrs for base and mask. +1 for IA32_MTRR_DEF_TYPE */ |
| 178 | msr_count = 2 * num_var_mtrrs + NUM_FIXED_MTRRS + 1; |
| 179 | |
| 180 | if ((msr_count * sizeof(struct saved_msr)) > size) { |
| 181 | printf("Cannot mirror all %d msrs.\n", msr_count); |
| 182 | return -ENOSPC; |
| 183 | } |
| 184 | |
| 185 | msr_entry = (void *)start; |
| 186 | for (i = 0; i < NUM_FIXED_MTRRS; i++) |
| 187 | msr_entry = save_msr(fixed_mtrrs[i], msr_entry); |
| 188 | |
| 189 | for (i = 0; i < num_var_mtrrs; i++) { |
| 190 | msr_entry = save_msr(MTRR_PHYS_BASE_MSR(i), msr_entry); |
| 191 | msr_entry = save_msr(MTRR_PHYS_MASK_MSR(i), msr_entry); |
| 192 | } |
| 193 | |
| 194 | msr_entry = save_msr(MTRR_DEF_TYPE_MSR, msr_entry); |
| 195 | |
| 196 | return msr_count; |
| 197 | } |
| 198 | |
| 199 | static int load_sipi_vector(atomic_t **ap_countp) |
| 200 | { |
| 201 | struct sipi_params_16bit *params16; |
| 202 | struct sipi_params *params; |
| 203 | static char msr_save[512]; |
| 204 | char *stack; |
| 205 | ulong addr; |
| 206 | int code_len; |
| 207 | int size; |
| 208 | int ret; |
| 209 | |
| 210 | /* Copy in the code */ |
| 211 | code_len = ap_start16_code_end - ap_start16; |
| 212 | debug("Copying SIPI code to %x: %d bytes\n", AP_DEFAULT_BASE, |
| 213 | code_len); |
| 214 | memcpy((void *)AP_DEFAULT_BASE, ap_start16, code_len); |
| 215 | |
| 216 | addr = AP_DEFAULT_BASE + (ulong)sipi_params_16bit - (ulong)ap_start16; |
| 217 | params16 = (struct sipi_params_16bit *)addr; |
| 218 | params16->ap_start = (uint32_t)ap_start; |
| 219 | params16->gdt = (uint32_t)gd->arch.gdt; |
| 220 | params16->gdt_limit = X86_GDT_SIZE - 1; |
| 221 | debug("gdt = %x, gdt_limit = %x\n", params16->gdt, params16->gdt_limit); |
| 222 | |
| 223 | params = (struct sipi_params *)sipi_params; |
| 224 | debug("SIPI 32-bit params at %p\n", params); |
| 225 | params->idt_ptr = (uint32_t)x86_get_idt(); |
| 226 | |
| 227 | params->stack_size = CONFIG_AP_STACK_SIZE; |
| 228 | size = params->stack_size * CONFIG_MAX_CPUS; |
| 229 | stack = memalign(size, 4096); |
| 230 | if (!stack) |
| 231 | return -ENOMEM; |
| 232 | params->stack_top = (u32)(stack + size); |
| 233 | |
| 234 | params->microcode_ptr = 0; |
| 235 | params->msr_table_ptr = (u32)msr_save; |
| 236 | ret = save_bsp_msrs(msr_save, sizeof(msr_save)); |
| 237 | if (ret < 0) |
| 238 | return ret; |
| 239 | params->msr_count = ret; |
| 240 | |
| 241 | params->c_handler = (uint32_t)&ap_init; |
| 242 | |
| 243 | *ap_countp = ¶ms->ap_count; |
| 244 | atomic_set(*ap_countp, 0); |
| 245 | debug("SIPI vector is ready\n"); |
| 246 | |
| 247 | return 0; |
| 248 | } |
| 249 | |
| 250 | static int check_cpu_devices(int expected_cpus) |
| 251 | { |
| 252 | int i; |
| 253 | |
| 254 | for (i = 0; i < expected_cpus; i++) { |
| 255 | struct udevice *dev; |
| 256 | int ret; |
| 257 | |
| 258 | ret = uclass_find_device(UCLASS_CPU, i, &dev); |
| 259 | if (ret) { |
| 260 | debug("Cannot find CPU %d in device tree\n", i); |
| 261 | return ret; |
| 262 | } |
| 263 | } |
| 264 | |
| 265 | return 0; |
| 266 | } |
| 267 | |
| 268 | /* Returns 1 for timeout. 0 on success */ |
| 269 | static int apic_wait_timeout(int total_delay, int delay_step) |
| 270 | { |
| 271 | int total = 0; |
| 272 | int timeout = 0; |
| 273 | |
| 274 | while (lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY) { |
| 275 | udelay(delay_step); |
| 276 | total += delay_step; |
| 277 | if (total >= total_delay) { |
| 278 | timeout = 1; |
| 279 | break; |
| 280 | } |
| 281 | } |
| 282 | |
| 283 | return timeout; |
| 284 | } |
| 285 | |
| 286 | static int start_aps(int ap_count, atomic_t *num_aps) |
| 287 | { |
| 288 | int sipi_vector; |
| 289 | /* Max location is 4KiB below 1MiB */ |
| 290 | const int max_vector_loc = ((1 << 20) - (1 << 12)) >> 12; |
| 291 | |
| 292 | if (ap_count == 0) |
| 293 | return 0; |
| 294 | |
| 295 | /* The vector is sent as a 4k aligned address in one byte */ |
| 296 | sipi_vector = AP_DEFAULT_BASE >> 12; |
| 297 | |
| 298 | if (sipi_vector > max_vector_loc) { |
| 299 | printf("SIPI vector too large! 0x%08x\n", |
| 300 | sipi_vector); |
| 301 | return -1; |
| 302 | } |
| 303 | |
| 304 | debug("Attempting to start %d APs\n", ap_count); |
| 305 | |
| 306 | if ((lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY)) { |
| 307 | debug("Waiting for ICR not to be busy..."); |
| 308 | if (apic_wait_timeout(1000, 50)) { |
| 309 | debug("timed out. Aborting.\n"); |
| 310 | return -1; |
| 311 | } else { |
| 312 | debug("done.\n"); |
| 313 | } |
| 314 | } |
| 315 | |
| 316 | /* Send INIT IPI to all but self */ |
| 317 | lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(0)); |
| 318 | lapic_write_around(LAPIC_ICR, LAPIC_DEST_ALLBUT | LAPIC_INT_ASSERT | |
| 319 | LAPIC_DM_INIT); |
| 320 | debug("Waiting for 10ms after sending INIT.\n"); |
| 321 | mdelay(10); |
| 322 | |
| 323 | /* Send 1st SIPI */ |
| 324 | if ((lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY)) { |
| 325 | debug("Waiting for ICR not to be busy..."); |
| 326 | if (apic_wait_timeout(1000, 50)) { |
| 327 | debug("timed out. Aborting.\n"); |
| 328 | return -1; |
| 329 | } else { |
| 330 | debug("done.\n"); |
| 331 | } |
| 332 | } |
| 333 | |
| 334 | lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(0)); |
| 335 | lapic_write_around(LAPIC_ICR, LAPIC_DEST_ALLBUT | LAPIC_INT_ASSERT | |
| 336 | LAPIC_DM_STARTUP | sipi_vector); |
| 337 | debug("Waiting for 1st SIPI to complete..."); |
| 338 | if (apic_wait_timeout(10000, 50)) { |
| 339 | debug("timed out.\n"); |
| 340 | return -1; |
| 341 | } else { |
| 342 | debug("done.\n"); |
| 343 | } |
| 344 | |
| 345 | /* Wait for CPUs to check in up to 200 us */ |
| 346 | wait_for_aps(num_aps, ap_count, 200, 15); |
| 347 | |
| 348 | /* Send 2nd SIPI */ |
| 349 | if ((lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY)) { |
| 350 | debug("Waiting for ICR not to be busy..."); |
| 351 | if (apic_wait_timeout(1000, 50)) { |
| 352 | debug("timed out. Aborting.\n"); |
| 353 | return -1; |
| 354 | } else { |
| 355 | debug("done.\n"); |
| 356 | } |
| 357 | } |
| 358 | |
| 359 | lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(0)); |
| 360 | lapic_write_around(LAPIC_ICR, LAPIC_DEST_ALLBUT | LAPIC_INT_ASSERT | |
| 361 | LAPIC_DM_STARTUP | sipi_vector); |
| 362 | debug("Waiting for 2nd SIPI to complete..."); |
| 363 | if (apic_wait_timeout(10000, 50)) { |
| 364 | debug("timed out.\n"); |
| 365 | return -1; |
| 366 | } else { |
| 367 | debug("done.\n"); |
| 368 | } |
| 369 | |
| 370 | /* Wait for CPUs to check in */ |
| 371 | if (wait_for_aps(num_aps, ap_count, 10000, 50)) { |
| 372 | debug("Not all APs checked in: %d/%d.\n", |
| 373 | atomic_read(num_aps), ap_count); |
| 374 | return -1; |
| 375 | } |
| 376 | |
| 377 | return 0; |
| 378 | } |
| 379 | |
| 380 | static int bsp_do_flight_plan(struct udevice *cpu, struct mp_params *mp_params) |
| 381 | { |
| 382 | int i; |
| 383 | int ret = 0; |
| 384 | const int timeout_us = 100000; |
| 385 | const int step_us = 100; |
| 386 | int num_aps = mp_params->num_cpus - 1; |
| 387 | |
| 388 | for (i = 0; i < mp_params->num_records; i++) { |
| 389 | struct mp_flight_record *rec = &mp_params->flight_plan[i]; |
| 390 | |
| 391 | /* Wait for APs if the record is not released */ |
| 392 | if (atomic_read(&rec->barrier) == 0) { |
| 393 | /* Wait for the APs to check in */ |
| 394 | if (wait_for_aps(&rec->cpus_entered, num_aps, |
| 395 | timeout_us, step_us)) { |
| 396 | debug("MP record %d timeout.\n", i); |
| 397 | ret = -1; |
| 398 | } |
| 399 | } |
| 400 | |
| 401 | if (rec->bsp_call != NULL) |
| 402 | rec->bsp_call(cpu, rec->bsp_arg); |
| 403 | |
| 404 | release_barrier(&rec->barrier); |
| 405 | } |
| 406 | return ret; |
| 407 | } |
| 408 | |
| 409 | static int init_bsp(struct udevice **devp) |
| 410 | { |
| 411 | char processor_name[CPU_MAX_NAME_LEN]; |
| 412 | int apic_id; |
| 413 | int ret; |
| 414 | |
| 415 | cpu_get_name(processor_name); |
| 416 | debug("CPU: %s.\n", processor_name); |
| 417 | |
| 418 | enable_lapic(); |
| 419 | |
| 420 | apic_id = lapicid(); |
| 421 | ret = find_cpu_by_apid_id(apic_id, devp); |
| 422 | if (ret) { |
| 423 | printf("Cannot find boot CPU, APIC ID %d\n", apic_id); |
| 424 | return ret; |
| 425 | } |
| 426 | |
| 427 | return 0; |
| 428 | } |
| 429 | |
| 430 | int mp_init(struct mp_params *p) |
| 431 | { |
| 432 | int num_aps; |
| 433 | atomic_t *ap_count; |
| 434 | struct udevice *cpu; |
| 435 | int ret; |
| 436 | |
| 437 | /* This will cause the CPUs devices to be bound */ |
| 438 | struct uclass *uc; |
| 439 | ret = uclass_get(UCLASS_CPU, &uc); |
| 440 | if (ret) |
| 441 | return ret; |
| 442 | |
| 443 | ret = init_bsp(&cpu); |
| 444 | if (ret) { |
| 445 | debug("Cannot init boot CPU: err=%d\n", ret); |
| 446 | return ret; |
| 447 | } |
| 448 | |
| 449 | if (p == NULL || p->flight_plan == NULL || p->num_records < 1) { |
| 450 | printf("Invalid MP parameters\n"); |
| 451 | return -1; |
| 452 | } |
| 453 | |
| 454 | ret = check_cpu_devices(p->num_cpus); |
| 455 | if (ret) |
| 456 | debug("Warning: Device tree does not describe all CPUs. Extra ones will not be started correctly\n"); |
| 457 | |
| 458 | /* Copy needed parameters so that APs have a reference to the plan */ |
| 459 | mp_info.num_records = p->num_records; |
| 460 | mp_info.records = p->flight_plan; |
| 461 | |
| 462 | /* Load the SIPI vector */ |
| 463 | ret = load_sipi_vector(&ap_count); |
| 464 | if (ap_count == NULL) |
| 465 | return -1; |
| 466 | |
| 467 | /* |
| 468 | * Make sure SIPI data hits RAM so the APs that come up will see |
| 469 | * the startup code even if the caches are disabled |
| 470 | */ |
| 471 | wbinvd(); |
| 472 | |
| 473 | /* Start the APs providing number of APs and the cpus_entered field */ |
| 474 | num_aps = p->num_cpus - 1; |
| 475 | ret = start_aps(num_aps, ap_count); |
| 476 | if (ret) { |
| 477 | mdelay(1000); |
| 478 | debug("%d/%d eventually checked in?\n", atomic_read(ap_count), |
| 479 | num_aps); |
| 480 | return ret; |
| 481 | } |
| 482 | |
| 483 | /* Walk the flight plan for the BSP */ |
| 484 | ret = bsp_do_flight_plan(cpu, p); |
| 485 | if (ret) { |
| 486 | debug("CPU init failed: err=%d\n", ret); |
| 487 | return ret; |
| 488 | } |
| 489 | |
| 490 | return 0; |
| 491 | } |
| 492 | |
| 493 | int mp_init_cpu(struct udevice *cpu, void *unused) |
| 494 | { |
| 495 | return device_probe(cpu); |
| 496 | } |