blob: f669a777bed384d94bd899297f62c4d67c8c1248 [file] [log] [blame]
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +03001/*
2 * Copyright (C) 2018 Marvell International Ltd.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 * https://spdx.org/licenses
6 */
7
8#include <assert.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +00009
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +030010#include <platform_def.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000011
12#include <arch_helpers.h>
13#include <common/debug.h>
14#include <drivers/delay_timer.h>
Grzegorz Jaszczyk7588ae22019-04-17 11:24:43 +020015#include <mg_conf_cm3/mg_conf_cm3.h>
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000016#include <lib/mmio.h>
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +030017
18#include <plat_pm_trace.h>
19#include <mss_scp_bootloader.h>
20#include <mss_ipc_drv.h>
21#include <mss_mem.h>
22#include <mss_scp_bl2_format.h>
23
24#define MSS_DMA_SRCBR(base) (base + 0xC0)
25#define MSS_DMA_DSTBR(base) (base + 0xC4)
26#define MSS_DMA_CTRLR(base) (base + 0xC8)
27#define MSS_M3_RSTCR(base) (base + 0xFC)
28
29#define MSS_DMA_CTRLR_SIZE_OFFSET (0)
30#define MSS_DMA_CTRLR_REQ_OFFSET (15)
31#define MSS_DMA_CTRLR_REQ_SET (1)
32#define MSS_DMA_CTRLR_ACK_OFFSET (12)
33#define MSS_DMA_CTRLR_ACK_MASK (0x1)
34#define MSS_DMA_CTRLR_ACK_READY (1)
35#define MSS_M3_RSTCR_RST_OFFSET (0)
36#define MSS_M3_RSTCR_RST_OFF (1)
37
38#define MSS_DMA_TIMEOUT 1000
39#define MSS_EXTERNAL_SPACE 0x50000000
40#define MSS_EXTERNAL_ADDR_MASK 0xfffffff
Konstantin Porotchkin276fd8e2020-01-29 16:02:46 +020041#define MSS_INTERNAL_SPACE 0x40000000
42#define MSS_INTERNAL_ADDR_MASK 0x00ffffff
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +030043
44#define DMA_SIZE 128
45
46#define MSS_HANDSHAKE_TIMEOUT 50
47
48static int mss_check_image_ready(volatile struct mss_pm_ctrl_block *mss_pm_crtl)
49{
50 int timeout = MSS_HANDSHAKE_TIMEOUT;
51
52 /* Wait for SCP to signal it's ready */
53 while ((mss_pm_crtl->handshake != MSS_ACKNOWLEDGMENT) &&
54 (timeout-- > 0))
55 mdelay(1);
56
57 if (mss_pm_crtl->handshake != MSS_ACKNOWLEDGMENT)
58 return -1;
59
60 mss_pm_crtl->handshake = HOST_ACKNOWLEDGMENT;
61
62 return 0;
63}
64
Konstantin Porotchkin276fd8e2020-01-29 16:02:46 +020065static int mss_iram_dma_load(uint32_t src_addr, uint32_t size,
66 uintptr_t mss_regs)
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +030067{
68 uint32_t i, loop_num, timeout;
69
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +030070 /* load image to MSS RAM using DMA */
Konstantin Porotchkin276fd8e2020-01-29 16:02:46 +020071 loop_num = (size / DMA_SIZE) + !!(size % DMA_SIZE);
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +030072 for (i = 0; i < loop_num; i++) {
Konstantin Porotchkin276fd8e2020-01-29 16:02:46 +020073 /* write source address */
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +030074 mmio_write_32(MSS_DMA_SRCBR(mss_regs),
Konstantin Porotchkin276fd8e2020-01-29 16:02:46 +020075 src_addr + (i * DMA_SIZE));
76 /* write destination address */
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +030077 mmio_write_32(MSS_DMA_DSTBR(mss_regs), (i * DMA_SIZE));
Konstantin Porotchkin276fd8e2020-01-29 16:02:46 +020078 /* make sure DMA data is ready before triggering it */
79 dsb();
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +030080 /* set the DMA control register */
Konstantin Porotchkin276fd8e2020-01-29 16:02:46 +020081 mmio_write_32(MSS_DMA_CTRLR(mss_regs),
82 ((MSS_DMA_CTRLR_REQ_SET <<
83 MSS_DMA_CTRLR_REQ_OFFSET) |
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +030084 (DMA_SIZE << MSS_DMA_CTRLR_SIZE_OFFSET)));
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +030085 /* Poll DMA_ACK at MSS_DMACTLR until it is ready */
86 timeout = MSS_DMA_TIMEOUT;
Konstantin Porotchkin276fd8e2020-01-29 16:02:46 +020087 while (timeout > 0U) {
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +030088 if ((mmio_read_32(MSS_DMA_CTRLR(mss_regs)) >>
Konstantin Porotchkin276fd8e2020-01-29 16:02:46 +020089 (MSS_DMA_CTRLR_ACK_OFFSET &
90 MSS_DMA_CTRLR_ACK_MASK))
91 == MSS_DMA_CTRLR_ACK_READY) {
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +030092 break;
93 }
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +030094 udelay(50);
95 timeout--;
96 }
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +030097 if (timeout == 0) {
Konstantin Porotchkin276fd8e2020-01-29 16:02:46 +020098 ERROR("\nMSS DMA failed (timeout)\n");
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +030099 return 1;
100 }
101 }
Konstantin Porotchkin276fd8e2020-01-29 16:02:46 +0200102 return 0;
103}
104
105static int mss_image_load(uint32_t src_addr, uint32_t size,
106 uintptr_t mss_regs, uintptr_t sram)
107{
108 uint32_t chunks = 1; /* !sram case */
109 uint32_t chunk_num;
110 int ret;
111
112 /* Check if the img size is not bigger than ID-RAM size of MSS CM3 */
113 if (size > MSS_IDRAM_SIZE) {
114 ERROR("image is too big to fit into MSS CM3 memory\n");
115 return 1;
116 }
117
118 /* The CPx MSS DMA cannot access DRAM directly in secure boot mode
119 * Copy the MSS FW image to MSS SRAM by the CPU first, then run
120 * MSS DMA for SRAM to IRAM copy
121 */
122 if (sram != 0) {
123 chunks = size / MSS_SRAM_SIZE + !!(size % MSS_SRAM_SIZE);
124 }
125
126 NOTICE("%s Loading MSS FW from addr. 0x%x Size 0x%x to MSS at 0x%lx\n",
127 sram == 0 ? "" : "SECURELY", src_addr, size, mss_regs);
128 for (chunk_num = 0; chunk_num < chunks; chunk_num++) {
129 size_t chunk_size = size;
130 uint32_t img_src = MSS_EXTERNAL_SPACE | /* no SRAM */
131 (src_addr & MSS_EXTERNAL_ADDR_MASK);
132
133 if (sram != 0) {
134 uintptr_t chunk_source =
135 src_addr + MSS_SRAM_SIZE * chunk_num;
136
137 if (chunk_num != (size / MSS_SRAM_SIZE)) {
138 chunk_size = MSS_SRAM_SIZE;
139 } else {
140 chunk_size = size % MSS_SRAM_SIZE;
141 }
142
143 if (chunk_size == 0) {
144 break;
145 }
146
147 VERBOSE("Chunk %d -> SRAM 0x%lx from 0x%lx SZ 0x%lx\n",
148 chunk_num, sram, chunk_source, chunk_size);
149 memcpy((void *)sram, (void *)chunk_source, chunk_size);
150 dsb();
151 img_src = MSS_INTERNAL_SPACE |
152 (sram & MSS_INTERNAL_ADDR_MASK);
153 }
154
155 ret = mss_iram_dma_load(img_src, chunk_size, mss_regs);
156 if (ret != 0) {
157 ERROR("MSS FW chunk %d load failed\n", chunk_num);
158 return ret;
159 }
160 }
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +0300161
162 bl2_plat_configure_mss_windows(mss_regs);
163
Konstantin Porotchkinfe638422020-06-17 13:07:15 +0300164 /* Wipe the MSS SRAM after using it as copy buffer */
165 if (sram) {
166 memset((void *)sram, 0, MSS_SRAM_SIZE);
167 }
168
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +0300169 /* Release M3 from reset */
Konstantin Porotchkin276fd8e2020-01-29 16:02:46 +0200170 mmio_write_32(MSS_M3_RSTCR(mss_regs),
171 (MSS_M3_RSTCR_RST_OFF << MSS_M3_RSTCR_RST_OFFSET));
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +0300172
173 NOTICE("Done\n");
174
175 return 0;
176}
177
178/* Load image to MSS AP and do PM related initialization
179 * Note that this routine is different than other CM3 loading routines, because
180 * firmware for AP is dedicated for PM and therefore some additional PM
181 * initialization is required
182 */
183static int mss_ap_load_image(uintptr_t single_img,
184 uint32_t image_size, uint32_t ap_idx)
185{
186 volatile struct mss_pm_ctrl_block *mss_pm_crtl;
187 int ret;
188
189 /* TODO: add PM Control Info from platform */
190 mss_pm_crtl = (struct mss_pm_ctrl_block *)MSS_SRAM_PM_CONTROL_BASE;
191 mss_pm_crtl->ipc_version = MV_PM_FW_IPC_VERSION;
192 mss_pm_crtl->num_of_clusters = PLAT_MARVELL_CLUSTER_COUNT;
193 mss_pm_crtl->num_of_cores_per_cluster =
194 PLAT_MARVELL_CLUSTER_CORE_COUNT;
195 mss_pm_crtl->num_of_cores = PLAT_MARVELL_CLUSTER_COUNT *
196 PLAT_MARVELL_CLUSTER_CORE_COUNT;
197 mss_pm_crtl->pm_trace_ctrl_base_address = AP_MSS_ATF_CORE_CTRL_BASE;
198 mss_pm_crtl->pm_trace_info_base_address = AP_MSS_ATF_CORE_INFO_BASE;
199 mss_pm_crtl->pm_trace_info_core_size = AP_MSS_ATF_CORE_INFO_SIZE;
200 VERBOSE("MSS Control Block = 0x%x\n", MSS_SRAM_PM_CONTROL_BASE);
201 VERBOSE("mss_pm_crtl->ipc_version = 0x%x\n",
202 mss_pm_crtl->ipc_version);
203 VERBOSE("mss_pm_crtl->num_of_cores = 0x%x\n",
204 mss_pm_crtl->num_of_cores);
205 VERBOSE("mss_pm_crtl->num_of_clusters = 0x%x\n",
206 mss_pm_crtl->num_of_clusters);
207 VERBOSE("mss_pm_crtl->num_of_cores_per_cluster = 0x%x\n",
208 mss_pm_crtl->num_of_cores_per_cluster);
209 VERBOSE("mss_pm_crtl->pm_trace_ctrl_base_address = 0x%x\n",
210 mss_pm_crtl->pm_trace_ctrl_base_address);
211 VERBOSE("mss_pm_crtl->pm_trace_info_base_address = 0x%x\n",
212 mss_pm_crtl->pm_trace_info_base_address);
213 VERBOSE("mss_pm_crtl->pm_trace_info_core_size = 0x%x\n",
214 mss_pm_crtl->pm_trace_info_core_size);
215
216 /* TODO: add checksum to image */
217 VERBOSE("Send info about the SCP_BL2 image to be transferred to SCP\n");
218
219 ret = mss_image_load(single_img, image_size,
Konstantin Porotchkin276fd8e2020-01-29 16:02:46 +0200220 bl2_plat_get_ap_mss_regs(ap_idx), 0);
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +0300221 if (ret != 0) {
222 ERROR("SCP Image load failed\n");
223 return -1;
224 }
225
226 /* check that the image was loaded successfully */
227 ret = mss_check_image_ready(mss_pm_crtl);
228 if (ret != 0)
229 NOTICE("SCP Image doesn't contain PM firmware\n");
230
231 return 0;
232}
233
234/* Load CM3 image (single_img) to CM3 pointed by cm3_type */
235static int load_img_to_cm3(enum cm3_t cm3_type,
236 uintptr_t single_img, uint32_t image_size)
237{
238 int ret, ap_idx, cp_index;
239 uint32_t ap_count = bl2_plat_get_ap_count();
240
241 switch (cm3_type) {
242 case MSS_AP:
243 for (ap_idx = 0; ap_idx < ap_count; ap_idx++) {
244 NOTICE("Load image to AP%d MSS\n", ap_idx);
245 ret = mss_ap_load_image(single_img, image_size, ap_idx);
246 if (ret != 0)
247 return ret;
248 }
249 break;
250 case MSS_CP0:
251 case MSS_CP1:
252 case MSS_CP2:
253 case MSS_CP3:
254 /* MSS_AP = 0
255 * MSS_CP1 = 1
256 * .
257 * .
258 * MSS_CP3 = 4
259 * Actual CP index is MSS_CPX - 1
260 */
261 cp_index = cm3_type - 1;
262 for (ap_idx = 0; ap_idx < ap_count; ap_idx++) {
263 /* Check if we should load this image
264 * according to number of CPs
265 */
266 if (bl2_plat_get_cp_count(ap_idx) <= cp_index) {
267 NOTICE("Skipping MSS CP%d related image\n",
268 cp_index);
269 break;
270 }
271
272 NOTICE("Load image to CP%d MSS AP%d\n",
273 cp_index, ap_idx);
274 ret = mss_image_load(single_img, image_size,
275 bl2_plat_get_cp_mss_regs(
Konstantin Porotchkin276fd8e2020-01-29 16:02:46 +0200276 ap_idx, cp_index),
277 bl2_plat_get_cp_mss_sram(
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +0300278 ap_idx, cp_index));
279 if (ret != 0) {
280 ERROR("SCP Image load failed\n");
281 return -1;
282 }
283 }
284 break;
285 case MG_CP0:
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +0300286 case MG_CP1:
Grzegorz Jaszczykf618f182019-04-04 14:38:55 +0200287 case MG_CP2:
Grzegorz Jaszczyk17e43dd2017-08-18 16:42:12 +0200288 cp_index = cm3_type - MG_CP0;
Grzegorz Jaszczykf618f182019-04-04 14:38:55 +0200289 if (bl2_plat_get_cp_count(0) <= cp_index) {
290 NOTICE("Skipping MG CP%d related image\n",
291 cp_index);
292 break;
293 }
Grzegorz Jaszczyk17e43dd2017-08-18 16:42:12 +0200294 NOTICE("Load image to CP%d MG\n", cp_index);
Grzegorz Jaszczyk7588ae22019-04-17 11:24:43 +0200295 ret = mg_image_load(single_img, image_size, cp_index);
Grzegorz Jaszczyk17e43dd2017-08-18 16:42:12 +0200296 if (ret != 0) {
297 ERROR("SCP Image load failed\n");
298 return -1;
299 }
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +0300300 break;
301 default:
302 ERROR("SCP_BL2 wrong img format (cm3_type=%d)\n", cm3_type);
303 break;
304 }
305
306 return 0;
307}
308
309/* The Armada 8K has 5 service CPUs and Armada 7K has 3. Therefore it was
310 * required to provide a method for loading firmware to all of the service CPUs.
311 * To achieve that, the scp_bl2 image in fact is file containing up to 5
312 * concatenated firmwares and this routine splits concatenated image into single
313 * images dedicated for appropriate service CPU and then load them.
314 */
315static int split_and_load_bl2_image(void *image)
316{
317 file_header_t *file_hdr;
318 img_header_t *img_hdr;
319 uintptr_t single_img;
320 int i;
321
322 file_hdr = (file_header_t *)image;
323
324 if (file_hdr->magic != FILE_MAGIC) {
325 ERROR("SCP_BL2 wrong img format\n");
326 return -1;
327 }
328
329 if (file_hdr->nr_of_imgs > MAX_NR_OF_FILES) {
Grzegorz Jaszczykf618f182019-04-04 14:38:55 +0200330 ERROR("SCP_BL2 concatenated image contains too many images\n");
Konstantin Porotchkinf69ec582018-06-07 18:31:14 +0300331 return -1;
332 }
333
334 img_hdr = (img_header_t *)((uintptr_t)image + sizeof(file_header_t));
335 single_img = (uintptr_t)image + sizeof(file_header_t) +
336 sizeof(img_header_t) * file_hdr->nr_of_imgs;
337
338 NOTICE("SCP_BL2 contains %d concatenated images\n",
339 file_hdr->nr_of_imgs);
340 for (i = 0; i < file_hdr->nr_of_imgs; i++) {
341
342 /* Before loading make sanity check on header */
343 if (img_hdr->version != HEADER_VERSION) {
344 ERROR("Wrong header, img corrupted exiting\n");
345 return -1;
346 }
347
348 load_img_to_cm3(img_hdr->type, single_img, img_hdr->length);
349
350 /* Prepare offsets for next run */
351 single_img += img_hdr->length;
352 img_hdr++;
353 }
354
355 return 0;
356}
357
358int scp_bootloader_transfer(void *image, unsigned int image_size)
359{
360#ifdef SCP_BL2_BASE
361 assert((uintptr_t) image == SCP_BL2_BASE);
362#endif
363
364 VERBOSE("Concatenated img size %d\n", image_size);
365
366 if (image_size == 0) {
367 ERROR("SCP_BL2 image size can't be 0 (current size = 0x%x)\n",
368 image_size);
369 return -1;
370 }
371
372 if (split_and_load_bl2_image(image))
373 return -1;
374
375 return 0;
376}