blob: fbede1b1a72b2dfcf5bfb20518fa42114d577e47 [file] [log] [blame]
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +03001/*
2 * Copyright (C) 2018 Marvell International Ltd.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 * https://spdx.org/licenses
6 */
7
8#include <assert.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +00009
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +030010#include <platform_def.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000011
12#include <arch_helpers.h>
13#include <common/debug.h>
14#include <drivers/delay_timer.h>
Grzegorz Jaszczyk7588ae22019-04-17 11:24:43 +020015#include <mg_conf_cm3/mg_conf_cm3.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000016#include <lib/mmio.h>
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +030017
18#include <plat_pm_trace.h>
19#include <mss_scp_bootloader.h>
20#include <mss_ipc_drv.h>
21#include <mss_mem.h>
Konstantin Porotchkinb3d4bd52021-02-28 16:12:56 +020022#include <mss_defs.h>
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +030023#include <mss_scp_bl2_format.h>
24
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +030025#define MSS_DMA_TIMEOUT 1000
26#define MSS_EXTERNAL_SPACE 0x50000000
27#define MSS_EXTERNAL_ADDR_MASK 0xfffffff
Konstantin Porotchkin276fd8e2020-01-29 16:02:46 +020028#define MSS_INTERNAL_SPACE 0x40000000
29#define MSS_INTERNAL_ADDR_MASK 0x00ffffff
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +030030
31#define DMA_SIZE 128
32
33#define MSS_HANDSHAKE_TIMEOUT 50
34
35static int mss_check_image_ready(volatile struct mss_pm_ctrl_block *mss_pm_crtl)
36{
37 int timeout = MSS_HANDSHAKE_TIMEOUT;
38
39 /* Wait for SCP to signal it's ready */
40 while ((mss_pm_crtl->handshake != MSS_ACKNOWLEDGMENT) &&
41 (timeout-- > 0))
42 mdelay(1);
43
44 if (mss_pm_crtl->handshake != MSS_ACKNOWLEDGMENT)
45 return -1;
46
47 mss_pm_crtl->handshake = HOST_ACKNOWLEDGMENT;
48
49 return 0;
50}
51
Konstantin Porotchkin276fd8e2020-01-29 16:02:46 +020052static int mss_iram_dma_load(uint32_t src_addr, uint32_t size,
53 uintptr_t mss_regs)
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +030054{
55 uint32_t i, loop_num, timeout;
56
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +030057 /* load image to MSS RAM using DMA */
Konstantin Porotchkin276fd8e2020-01-29 16:02:46 +020058 loop_num = (size / DMA_SIZE) + !!(size % DMA_SIZE);
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +030059 for (i = 0; i < loop_num; i++) {
Konstantin Porotchkin276fd8e2020-01-29 16:02:46 +020060 /* write source address */
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +030061 mmio_write_32(MSS_DMA_SRCBR(mss_regs),
Konstantin Porotchkin276fd8e2020-01-29 16:02:46 +020062 src_addr + (i * DMA_SIZE));
63 /* write destination address */
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +030064 mmio_write_32(MSS_DMA_DSTBR(mss_regs), (i * DMA_SIZE));
Konstantin Porotchkin276fd8e2020-01-29 16:02:46 +020065 /* make sure DMA data is ready before triggering it */
66 dsb();
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +030067 /* set the DMA control register */
Konstantin Porotchkin276fd8e2020-01-29 16:02:46 +020068 mmio_write_32(MSS_DMA_CTRLR(mss_regs),
69 ((MSS_DMA_CTRLR_REQ_SET <<
70 MSS_DMA_CTRLR_REQ_OFFSET) |
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +030071 (DMA_SIZE << MSS_DMA_CTRLR_SIZE_OFFSET)));
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +030072 /* Poll DMA_ACK at MSS_DMACTLR until it is ready */
73 timeout = MSS_DMA_TIMEOUT;
Konstantin Porotchkin276fd8e2020-01-29 16:02:46 +020074 while (timeout > 0U) {
Konstantin Porotchkinc977a032021-03-22 14:34:30 +020075 if (((mmio_read_32(MSS_DMA_CTRLR(mss_regs)) >>
76 MSS_DMA_CTRLR_ACK_OFFSET) &
77 MSS_DMA_CTRLR_ACK_MASK)
Konstantin Porotchkin276fd8e2020-01-29 16:02:46 +020078 == MSS_DMA_CTRLR_ACK_READY) {
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +030079 break;
80 }
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +030081 udelay(50);
82 timeout--;
83 }
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +030084 if (timeout == 0) {
Konstantin Porotchkin276fd8e2020-01-29 16:02:46 +020085 ERROR("\nMSS DMA failed (timeout)\n");
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +030086 return 1;
87 }
88 }
Konstantin Porotchkin276fd8e2020-01-29 16:02:46 +020089 return 0;
90}
91
92static int mss_image_load(uint32_t src_addr, uint32_t size,
93 uintptr_t mss_regs, uintptr_t sram)
94{
95 uint32_t chunks = 1; /* !sram case */
96 uint32_t chunk_num;
97 int ret;
98
99 /* Check if the img size is not bigger than ID-RAM size of MSS CM3 */
100 if (size > MSS_IDRAM_SIZE) {
101 ERROR("image is too big to fit into MSS CM3 memory\n");
102 return 1;
103 }
104
105 /* The CPx MSS DMA cannot access DRAM directly in secure boot mode
106 * Copy the MSS FW image to MSS SRAM by the CPU first, then run
107 * MSS DMA for SRAM to IRAM copy
108 */
109 if (sram != 0) {
110 chunks = size / MSS_SRAM_SIZE + !!(size % MSS_SRAM_SIZE);
111 }
112
113 NOTICE("%s Loading MSS FW from addr. 0x%x Size 0x%x to MSS at 0x%lx\n",
114 sram == 0 ? "" : "SECURELY", src_addr, size, mss_regs);
115 for (chunk_num = 0; chunk_num < chunks; chunk_num++) {
116 size_t chunk_size = size;
117 uint32_t img_src = MSS_EXTERNAL_SPACE | /* no SRAM */
118 (src_addr & MSS_EXTERNAL_ADDR_MASK);
119
120 if (sram != 0) {
121 uintptr_t chunk_source =
122 src_addr + MSS_SRAM_SIZE * chunk_num;
123
124 if (chunk_num != (size / MSS_SRAM_SIZE)) {
125 chunk_size = MSS_SRAM_SIZE;
126 } else {
127 chunk_size = size % MSS_SRAM_SIZE;
128 }
129
130 if (chunk_size == 0) {
131 break;
132 }
133
134 VERBOSE("Chunk %d -> SRAM 0x%lx from 0x%lx SZ 0x%lx\n",
135 chunk_num, sram, chunk_source, chunk_size);
136 memcpy((void *)sram, (void *)chunk_source, chunk_size);
137 dsb();
138 img_src = MSS_INTERNAL_SPACE |
139 (sram & MSS_INTERNAL_ADDR_MASK);
140 }
141
142 ret = mss_iram_dma_load(img_src, chunk_size, mss_regs);
143 if (ret != 0) {
144 ERROR("MSS FW chunk %d load failed\n", chunk_num);
145 return ret;
146 }
147 }
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +0300148
149 bl2_plat_configure_mss_windows(mss_regs);
150
Konstantin Porotchkinb3d4bd52021-02-28 16:12:56 +0200151 if (sram != 0) {
152 /* Wipe the MSS SRAM after using it as copy buffer */
Konstantin Porotchkinfe638422020-06-17 13:07:15 +0300153 memset((void *)sram, 0, MSS_SRAM_SIZE);
Konstantin Porotchkinb3d4bd52021-02-28 16:12:56 +0200154 NOTICE("CP MSS startup is postponed\n");
155 /* FW loaded, but CPU startup postponed until final CP setup */
156 mmio_write_32(sram, MSS_FW_READY_MAGIC);
157 dsb();
158 } else {
159 /* Release M3 from reset */
160 mmio_write_32(MSS_M3_RSTCR(mss_regs),
161 (MSS_M3_RSTCR_RST_OFF <<
162 MSS_M3_RSTCR_RST_OFFSET));
Konstantin Porotchkinfe638422020-06-17 13:07:15 +0300163 }
164
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +0300165 NOTICE("Done\n");
166
167 return 0;
168}
169
170/* Load image to MSS AP and do PM related initialization
171 * Note that this routine is different than other CM3 loading routines, because
172 * firmware for AP is dedicated for PM and therefore some additional PM
173 * initialization is required
174 */
175static int mss_ap_load_image(uintptr_t single_img,
176 uint32_t image_size, uint32_t ap_idx)
177{
178 volatile struct mss_pm_ctrl_block *mss_pm_crtl;
179 int ret;
180
181 /* TODO: add PM Control Info from platform */
182 mss_pm_crtl = (struct mss_pm_ctrl_block *)MSS_SRAM_PM_CONTROL_BASE;
183 mss_pm_crtl->ipc_version = MV_PM_FW_IPC_VERSION;
184 mss_pm_crtl->num_of_clusters = PLAT_MARVELL_CLUSTER_COUNT;
185 mss_pm_crtl->num_of_cores_per_cluster =
186 PLAT_MARVELL_CLUSTER_CORE_COUNT;
187 mss_pm_crtl->num_of_cores = PLAT_MARVELL_CLUSTER_COUNT *
188 PLAT_MARVELL_CLUSTER_CORE_COUNT;
189 mss_pm_crtl->pm_trace_ctrl_base_address = AP_MSS_ATF_CORE_CTRL_BASE;
190 mss_pm_crtl->pm_trace_info_base_address = AP_MSS_ATF_CORE_INFO_BASE;
191 mss_pm_crtl->pm_trace_info_core_size = AP_MSS_ATF_CORE_INFO_SIZE;
192 VERBOSE("MSS Control Block = 0x%x\n", MSS_SRAM_PM_CONTROL_BASE);
193 VERBOSE("mss_pm_crtl->ipc_version = 0x%x\n",
194 mss_pm_crtl->ipc_version);
195 VERBOSE("mss_pm_crtl->num_of_cores = 0x%x\n",
196 mss_pm_crtl->num_of_cores);
197 VERBOSE("mss_pm_crtl->num_of_clusters = 0x%x\n",
198 mss_pm_crtl->num_of_clusters);
199 VERBOSE("mss_pm_crtl->num_of_cores_per_cluster = 0x%x\n",
200 mss_pm_crtl->num_of_cores_per_cluster);
201 VERBOSE("mss_pm_crtl->pm_trace_ctrl_base_address = 0x%x\n",
202 mss_pm_crtl->pm_trace_ctrl_base_address);
203 VERBOSE("mss_pm_crtl->pm_trace_info_base_address = 0x%x\n",
204 mss_pm_crtl->pm_trace_info_base_address);
205 VERBOSE("mss_pm_crtl->pm_trace_info_core_size = 0x%x\n",
206 mss_pm_crtl->pm_trace_info_core_size);
207
208 /* TODO: add checksum to image */
209 VERBOSE("Send info about the SCP_BL2 image to be transferred to SCP\n");
210
211 ret = mss_image_load(single_img, image_size,
Konstantin Porotchkin276fd8e2020-01-29 16:02:46 +0200212 bl2_plat_get_ap_mss_regs(ap_idx), 0);
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +0300213 if (ret != 0) {
214 ERROR("SCP Image load failed\n");
215 return -1;
216 }
217
218 /* check that the image was loaded successfully */
219 ret = mss_check_image_ready(mss_pm_crtl);
220 if (ret != 0)
221 NOTICE("SCP Image doesn't contain PM firmware\n");
222
223 return 0;
224}
225
226/* Load CM3 image (single_img) to CM3 pointed by cm3_type */
227static int load_img_to_cm3(enum cm3_t cm3_type,
228 uintptr_t single_img, uint32_t image_size)
229{
230 int ret, ap_idx, cp_index;
231 uint32_t ap_count = bl2_plat_get_ap_count();
232
233 switch (cm3_type) {
234 case MSS_AP:
235 for (ap_idx = 0; ap_idx < ap_count; ap_idx++) {
236 NOTICE("Load image to AP%d MSS\n", ap_idx);
237 ret = mss_ap_load_image(single_img, image_size, ap_idx);
238 if (ret != 0)
239 return ret;
240 }
241 break;
242 case MSS_CP0:
243 case MSS_CP1:
244 case MSS_CP2:
245 case MSS_CP3:
246 /* MSS_AP = 0
247 * MSS_CP1 = 1
248 * .
249 * .
250 * MSS_CP3 = 4
251 * Actual CP index is MSS_CPX - 1
252 */
253 cp_index = cm3_type - 1;
254 for (ap_idx = 0; ap_idx < ap_count; ap_idx++) {
255 /* Check if we should load this image
256 * according to number of CPs
257 */
258 if (bl2_plat_get_cp_count(ap_idx) <= cp_index) {
259 NOTICE("Skipping MSS CP%d related image\n",
260 cp_index);
261 break;
262 }
263
264 NOTICE("Load image to CP%d MSS AP%d\n",
265 cp_index, ap_idx);
266 ret = mss_image_load(single_img, image_size,
267 bl2_plat_get_cp_mss_regs(
Konstantin Porotchkin276fd8e2020-01-29 16:02:46 +0200268 ap_idx, cp_index),
269 bl2_plat_get_cp_mss_sram(
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +0300270 ap_idx, cp_index));
271 if (ret != 0) {
272 ERROR("SCP Image load failed\n");
273 return -1;
274 }
275 }
276 break;
277 case MG_CP0:
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +0300278 case MG_CP1:
Grzegorz Jaszczykf618f182019-04-04 14:38:55 +0200279 case MG_CP2:
Grzegorz Jaszczyk17e43dd2017-08-18 16:42:12 +0200280 cp_index = cm3_type - MG_CP0;
Grzegorz Jaszczykf618f182019-04-04 14:38:55 +0200281 if (bl2_plat_get_cp_count(0) <= cp_index) {
282 NOTICE("Skipping MG CP%d related image\n",
283 cp_index);
284 break;
285 }
Grzegorz Jaszczyk17e43dd2017-08-18 16:42:12 +0200286 NOTICE("Load image to CP%d MG\n", cp_index);
Grzegorz Jaszczyk7588ae22019-04-17 11:24:43 +0200287 ret = mg_image_load(single_img, image_size, cp_index);
Grzegorz Jaszczyk17e43dd2017-08-18 16:42:12 +0200288 if (ret != 0) {
289 ERROR("SCP Image load failed\n");
290 return -1;
291 }
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +0300292 break;
293 default:
294 ERROR("SCP_BL2 wrong img format (cm3_type=%d)\n", cm3_type);
295 break;
296 }
297
298 return 0;
299}
300
301/* The Armada 8K has 5 service CPUs and Armada 7K has 3. Therefore it was
302 * required to provide a method for loading firmware to all of the service CPUs.
303 * To achieve that, the scp_bl2 image in fact is file containing up to 5
304 * concatenated firmwares and this routine splits concatenated image into single
305 * images dedicated for appropriate service CPU and then load them.
306 */
307static int split_and_load_bl2_image(void *image)
308{
309 file_header_t *file_hdr;
310 img_header_t *img_hdr;
311 uintptr_t single_img;
312 int i;
313
314 file_hdr = (file_header_t *)image;
315
316 if (file_hdr->magic != FILE_MAGIC) {
317 ERROR("SCP_BL2 wrong img format\n");
318 return -1;
319 }
320
321 if (file_hdr->nr_of_imgs > MAX_NR_OF_FILES) {
Grzegorz Jaszczykf618f182019-04-04 14:38:55 +0200322 ERROR("SCP_BL2 concatenated image contains too many images\n");
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +0300323 return -1;
324 }
325
326 img_hdr = (img_header_t *)((uintptr_t)image + sizeof(file_header_t));
327 single_img = (uintptr_t)image + sizeof(file_header_t) +
328 sizeof(img_header_t) * file_hdr->nr_of_imgs;
329
330 NOTICE("SCP_BL2 contains %d concatenated images\n",
331 file_hdr->nr_of_imgs);
332 for (i = 0; i < file_hdr->nr_of_imgs; i++) {
333
334 /* Before loading make sanity check on header */
335 if (img_hdr->version != HEADER_VERSION) {
336 ERROR("Wrong header, img corrupted exiting\n");
337 return -1;
338 }
339
340 load_img_to_cm3(img_hdr->type, single_img, img_hdr->length);
341
342 /* Prepare offsets for next run */
343 single_img += img_hdr->length;
344 img_hdr++;
345 }
346
347 return 0;
348}
349
350int scp_bootloader_transfer(void *image, unsigned int image_size)
351{
352#ifdef SCP_BL2_BASE
353 assert((uintptr_t) image == SCP_BL2_BASE);
354#endif
355
356 VERBOSE("Concatenated img size %d\n", image_size);
357
358 if (image_size == 0) {
359 ERROR("SCP_BL2 image size can't be 0 (current size = 0x%x)\n",
360 image_size);
361 return -1;
362 }
363
364 if (split_and_load_bl2_image(image))
365 return -1;
366
367 return 0;
368}