blob: fa1e6cb19af48cf7d4a892c394ec0857f88c641b [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001/* SPDX-License-Identifier: GPL-2.0 */
Simon Glassa9a44262015-04-29 22:25:59 -06002/*
3 * Copyright (c) 2015 Google, Inc
4 *
Simon Glassa9a44262015-04-29 22:25:59 -06005 * Taken from coreboot file of the same name
6 */
7
8/*
9 * The SIPI vector is responsible for initializing the APs in the sytem. It
10 * loads microcode, sets up MSRs, and enables caching before calling into
11 * C code
12 */
13
14#include <asm/global_data.h>
15#include <asm/msr-index.h>
16#include <asm/processor.h>
17#include <asm/processor-flags.h>
18#include <asm/sipi.h>
19
20#define CODE_SEG (X86_GDT_ENTRY_32BIT_CS * X86_GDT_ENTRY_SIZE)
21#define DATA_SEG (X86_GDT_ENTRY_32BIT_DS * X86_GDT_ENTRY_SIZE)
22
23/*
24 * First we have the 16-bit section. Every AP process starts here.
25 * The simple task is to load U-Boot's Global Descriptor Table (GDT) to allow
26 * U-Boot's 32-bit code to become visible, then jump to ap_start.
27 *
28 * Note that this code is copied to RAM below 1MB in mp_init.c, and runs from
29 * there, but the 32-bit code (ap_start and onwards) is part of U-Boot and
30 * is therefore relocated to the top of RAM with other U-Boot code. This
31 * means that for the 16-bit code we must write relocatable code, but for the
32 * rest, we can do what we like.
33 */
34.text
35.code16
36.globl ap_start16
37ap_start16:
38 cli
39 xorl %eax, %eax
40 movl %eax, %cr3 /* Invalidate TLB */
41
42 /* setup the data segment */
43 movw %cs, %ax
44 movw %ax, %ds
45
46 /* Use an address relative to the data segment for the GDT */
47 movl $gdtaddr, %ebx
48 subl $ap_start16, %ebx
49
50 data32 lgdt (%ebx)
51
52 movl %cr0, %eax
53 andl $(~(X86_CR0_PG | X86_CR0_AM | X86_CR0_WP | X86_CR0_NE | \
54 X86_CR0_TS | X86_CR0_EM | X86_CR0_MP)), %eax
55 orl $(X86_CR0_NW | X86_CR0_CD | X86_CR0_PE), %eax
56 movl %eax, %cr0
57
58 movl $ap_start_jmp, %eax
59 subl $ap_start16, %eax
60 movw %ax, %bp
61
62 /* Jump to ap_start within U-Boot */
63data32 cs ljmp *(%bp)
64
65 .align 4
66.globl sipi_params_16bit
67sipi_params_16bit:
68 /* 48-bit far pointer */
69ap_start_jmp:
70 .long 0 /* offset set to ap_start by U-Boot */
71 .word CODE_SEG /* segment */
72
73 .word 0 /* padding */
74gdtaddr:
75 .word 0 /* limit */
76 .long 0 /* table */
77 .word 0 /* unused */
78
79.globl ap_start16_code_end
80ap_start16_code_end:
81
82/*
83 * Set up the special 'fs' segment for global_data. Then jump to ap_continue
84 * to set up the AP.
85 */
86.globl ap_start
87ap_start:
88 .code32
89 movw $DATA_SEG, %ax
90 movw %ax, %ds
91 movw %ax, %es
92 movw %ax, %ss
93 movw %ax, %gs
94
95 movw $(X86_GDT_ENTRY_32BIT_FS * X86_GDT_ENTRY_SIZE), %ax
96 movw %ax, %fs
97
98 /* Load the Interrupt descriptor table */
99 mov idt_ptr, %ebx
100 lidt (%ebx)
101
102 /* Obtain cpu number */
103 movl ap_count, %eax
1041:
105 movl %eax, %ecx
106 inc %ecx
107 lock cmpxchg %ecx, ap_count
108 jnz 1b
109
110 /* Setup stacks for each CPU */
111 movl stack_size, %eax
112 mul %ecx
113 movl stack_top, %edx
114 subl %eax, %edx
115 mov %edx, %esp
116 /* Save cpu number */
117 mov %ecx, %esi
118
119 /* Determine if one should check microcode versions */
120 mov microcode_ptr, %edi
121 test %edi, %edi
122 jz microcode_done /* Bypass if no microde exists */
123
124 /* Get the Microcode version */
125 mov $1, %eax
126 cpuid
127 mov $MSR_IA32_UCODE_REV, %ecx
128 rdmsr
129 /* If something already loaded skip loading again */
130 test %edx, %edx
131 jnz microcode_done
132
133 /* Determine if parallel microcode loading is allowed */
Andy Shevchenko3303fe32020-07-28 12:56:25 +0300134 cmpl $0xffffffff, microcode_lock
Simon Glassa9a44262015-04-29 22:25:59 -0600135 je load_microcode
136
137 /* Protect microcode loading */
138lock_microcode:
Andy Shevchenko3303fe32020-07-28 12:56:25 +0300139 lock btsl $0, microcode_lock
Simon Glassa9a44262015-04-29 22:25:59 -0600140 jc lock_microcode
141
142load_microcode:
143 /* Load new microcode */
144 mov $MSR_IA32_UCODE_WRITE, %ecx
145 xor %edx, %edx
146 mov %edi, %eax
147 /*
148 * The microcode pointer is passed in pointing to the header. Adjust
149 * pointer to reflect the payload (header size is 48 bytes)
150 */
151 add $UCODE_HEADER_LEN, %eax
152 pusha
153 wrmsr
154 popa
155
156 /* Unconditionally unlock microcode loading */
Andy Shevchenko3303fe32020-07-28 12:56:25 +0300157 cmpl $0xffffffff, microcode_lock
Simon Glassa9a44262015-04-29 22:25:59 -0600158 je microcode_done
159
160 xor %eax, %eax
161 mov %eax, microcode_lock
162
163microcode_done:
164 /*
165 * Load MSRs. Each entry in the table consists of:
166 * 0: index,
167 * 4: value[31:0]
168 * 8: value[63:32]
169 * See struct saved_msr in mp_init.c.
170 */
171 mov msr_table_ptr, %edi
172 mov msr_count, %ebx
173 test %ebx, %ebx
174 jz 1f
175load_msr:
176 mov (%edi), %ecx
177 mov 4(%edi), %eax
178 mov 8(%edi), %edx
179 wrmsr
180 add $12, %edi
181 dec %ebx
182 jnz load_msr
183
1841:
185 /* Enable caching */
186 mov %cr0, %eax
187 andl $(~(X86_CR0_CD | X86_CR0_NW)), %eax
188 mov %eax, %cr0
189
190 /* c_handler(cpu_num) */
191 movl %esi, %eax /* cpu_num */
Bin Mengc0718c12015-10-14 02:01:21 -0700192 mov c_handler, %esi
193 call *%esi
Simon Glassa9a44262015-04-29 22:25:59 -0600194
Simon Glassc926bfc2016-03-06 19:28:25 -0700195 /* This matches struct sipi_param */
Simon Glassa9a44262015-04-29 22:25:59 -0600196 .align 4
197.globl sipi_params
198sipi_params:
199idt_ptr:
200 .long 0
201stack_top:
202 .long 0
203stack_size:
204 .long 0
205microcode_lock:
206 .long 0
207microcode_ptr:
208 .long 0
209msr_table_ptr:
210 .long 0
211msr_count:
212 .long 0
213c_handler:
214 .long 0
215ap_count:
216 .long 0