blob: 78a44a57301cb45aab29b77a67456d5df657a26d [file] [log] [blame]
Achin Gupta4f6ad662013-10-25 09:08:21 +01001/*
2 * Copyright (c) 2013, ARM Limited. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <string.h>
32#include <assert.h>
33#include <arch_helpers.h>
34#include <platform.h>
35#include <bl_common.h>
36/* Included only for error codes */
37#include <psci.h>
38
39unsigned char platform_normal_stacks[PLATFORM_STACK_SIZE][PLATFORM_CORE_COUNT]
40__attribute__ ((aligned(PLATFORM_CACHE_LINE_SIZE),
41 section("tzfw_normal_stacks")));
42
43/*******************************************************************************
44 * This array holds the characteristics of the differences between the three
45 * FVP platforms (Base, A53_A57 & Foundation). It will be populated during cold
46 * boot at each boot stage by the primary before enabling the MMU (to allow cci
47 * configuration) & used thereafter. Each BL will have its own copy to allow
48 * independent operation.
49 ******************************************************************************/
50static unsigned long platform_config[CONFIG_LIMIT];
51
52/*******************************************************************************
53 * TODO: Check page table alignment to avoid space wastage
54 ******************************************************************************/
55
56/*******************************************************************************
57 * Level 1 translation tables need 4 entries for the 4GB address space accessib-
58 * le by the secure firmware. Input address space will be restricted using the
59 * T0SZ settings in the TCR.
60 ******************************************************************************/
61static unsigned long l1_xlation_table[ADDR_SPACE_SIZE >> 30]
62__attribute__ ((aligned((ADDR_SPACE_SIZE >> 30) << 3)));
63
64/*******************************************************************************
65 * Level 2 translation tables describe the first & second gb of the address
66 * space needed to address secure peripherals e.g. trusted ROM and RAM.
67 ******************************************************************************/
68static unsigned long l2_xlation_table[NUM_L2_PAGETABLES][NUM_2MB_IN_GB]
69__attribute__ ((aligned(NUM_2MB_IN_GB << 3)));
70
71/*******************************************************************************
72 * Level 3 translation tables (2 sets) describe the trusted & non-trusted RAM
73 * regions at a granularity of 4K.
74 ******************************************************************************/
75static unsigned long l3_xlation_table[NUM_L3_PAGETABLES][NUM_4K_IN_2MB]
76__attribute__ ((aligned(NUM_4K_IN_2MB << 3)));
77
78/*******************************************************************************
79 * Helper to create a level 1/2 table descriptor which points to a level 2/3
80 * table.
81 ******************************************************************************/
82static unsigned long create_table_desc(unsigned long *next_table_ptr)
83{
84 unsigned long desc = (unsigned long) next_table_ptr;
85
86 /* Clear the last 12 bits */
87 desc >>= FOUR_KB_SHIFT;
88 desc <<= FOUR_KB_SHIFT;
89
90 desc |= TABLE_DESC;
91
92 return desc;
93}
94
95/*******************************************************************************
96 * Helper to create a level 1/2/3 block descriptor which maps the va to addr
97 ******************************************************************************/
98static unsigned long create_block_desc(unsigned long desc,
99 unsigned long addr,
100 unsigned int level)
101{
102 switch (level) {
103 case LEVEL1:
104 desc |= (addr << FIRST_LEVEL_DESC_N) | BLOCK_DESC;
105 break;
106 case LEVEL2:
107 desc |= (addr << SECOND_LEVEL_DESC_N) | BLOCK_DESC;
108 break;
109 case LEVEL3:
110 desc |= (addr << THIRD_LEVEL_DESC_N) | TABLE_DESC;
111 break;
112 default:
113 assert(0);
114 }
115
116 return desc;
117}
118
119/*******************************************************************************
120 * Helper to create a level 1/2/3 block descriptor which maps the va to output_
121 * addr with Device nGnRE attributes.
122 ******************************************************************************/
123static unsigned long create_device_block(unsigned long output_addr,
124 unsigned int level,
125 unsigned int ns)
126{
127 unsigned long upper_attrs, lower_attrs, desc;
128
129 lower_attrs = LOWER_ATTRS(ACCESS_FLAG | OSH | AP_RW);
130 lower_attrs |= LOWER_ATTRS(ns | ATTR_DEVICE_INDEX);
131 upper_attrs = UPPER_ATTRS(XN);
132 desc = upper_attrs | lower_attrs;
133
134 return create_block_desc(desc, output_addr, level);
135}
136
137/*******************************************************************************
138 * Helper to create a level 1/2/3 block descriptor which maps the va to output_
139 * addr with inner-shareable normal wbwa read-only memory attributes.
140 ******************************************************************************/
141static unsigned long create_romem_block(unsigned long output_addr,
142 unsigned int level,
143 unsigned int ns)
144{
145 unsigned long upper_attrs, lower_attrs, desc;
146
147 lower_attrs = LOWER_ATTRS(ACCESS_FLAG | ISH | AP_RO);
148 lower_attrs |= LOWER_ATTRS(ns | ATTR_IWBWA_OWBWA_NTR_INDEX);
149 upper_attrs = UPPER_ATTRS(0ull);
150 desc = upper_attrs | lower_attrs;
151
152 return create_block_desc(desc, output_addr, level);
153}
154
155/*******************************************************************************
156 * Helper to create a level 1/2/3 block descriptor which maps the va to output_
157 * addr with inner-shareable normal wbwa read-write memory attributes.
158 ******************************************************************************/
159static unsigned long create_rwmem_block(unsigned long output_addr,
160 unsigned int level,
161 unsigned int ns)
162{
163 unsigned long upper_attrs, lower_attrs, desc;
164
165 lower_attrs = LOWER_ATTRS(ACCESS_FLAG | ISH | AP_RW);
166 lower_attrs |= LOWER_ATTRS(ns | ATTR_IWBWA_OWBWA_NTR_INDEX);
167 upper_attrs = UPPER_ATTRS(XN);
168 desc = upper_attrs | lower_attrs;
169
170 return create_block_desc(desc, output_addr, level);
171}
172
173/*******************************************************************************
174 * Create page tables as per the platform memory map. Certain aspects of page
175 * talble creating have been abstracted in the above routines. This can be impr-
176 * oved further.
177 * TODO: Move the page table setup helpers into the arch or lib directory
178 *******************************************************************************/
179static unsigned long fill_xlation_tables(meminfo *tzram_layout,
180 unsigned long ro_start,
181 unsigned long ro_limit,
182 unsigned long coh_start,
183 unsigned long coh_limit)
184{
185 unsigned long l2_desc, l3_desc;
186 unsigned long *xt_addr = 0, *pt_addr, off = 0;
187 unsigned long trom_start_index, trom_end_index;
188 unsigned long tzram_start_index, tzram_end_index;
189 unsigned long flash0_start_index, flash0_end_index;
190 unsigned long flash1_start_index, flash1_end_index;
191 unsigned long vram_start_index, vram_end_index;
192 unsigned long nsram_start_index, nsram_end_index;
193 unsigned long tdram_start_index, tdram_end_index;
194 unsigned long dram_start_index, dram_end_index;
195 unsigned long dev0_start_index, dev0_end_index;
196 unsigned long dev1_start_index, dev1_end_index;
197 unsigned int idx;
198
199
200 /*****************************************************************
201 * LEVEL1 PAGETABLE SETUP
202 *
203 * Find the start and end indices of the memory peripherals in the
204 * first level pagetables. These are the main areas we care about.
205 * Also bump the end index by one if its equal to the start to
206 * allow for regions which lie completely in a GB.
207 *****************************************************************/
208 trom_start_index = ONE_GB_INDEX(TZROM_BASE);
209 dev0_start_index = ONE_GB_INDEX(TZRNG_BASE);
210 dram_start_index = ONE_GB_INDEX(DRAM_BASE);
211 dram_end_index = ONE_GB_INDEX(DRAM_BASE + DRAM_SIZE);
212
213 if (dram_end_index == dram_start_index)
214 dram_end_index++;
215
216 /*
217 * Fill up the level1 translation table first
218 */
219 for (idx = 0; idx < (ADDR_SPACE_SIZE >> 30); idx++) {
220
221 /*
222 * Fill up the entry for the TZROM. This will cover
223 * everything in the first GB.
224 */
225 if (idx == trom_start_index) {
226 xt_addr = &l2_xlation_table[GB1_L2_PAGETABLE][0];
227 l1_xlation_table[idx] = create_table_desc(xt_addr);
228 continue;
229 }
230
231 /*
232 * Mark the second gb as device
233 */
234 if (idx == dev0_start_index) {
235 xt_addr = &l2_xlation_table[GB2_L2_PAGETABLE][0];
236 l1_xlation_table[idx] = create_table_desc(xt_addr);
237 continue;
238 }
239
240 /*
241 * Fill up the block entry for the DRAM with Normal
242 * inner-WBWA outer-WBWA non-transient attributes.
243 * This will cover 2-4GB. Note that the acesses are
244 * marked as non-secure.
245 */
246 if ((idx >= dram_start_index) && (idx < dram_end_index)) {
247 l1_xlation_table[idx] = create_rwmem_block(idx, LEVEL1,
248 NS);
249 continue;
250 }
251
252 assert(0);
253 }
254
255
256 /*****************************************************************
257 * LEVEL2 PAGETABLE SETUP
258 *
259 * Find the start and end indices of the memory & peripherals in the
260 * second level pagetables.
261 ******************************************************************/
262
263 /* Initializations for the 1st GB */
264 trom_start_index = TWO_MB_INDEX(TZROM_BASE);
265 trom_end_index = TWO_MB_INDEX(TZROM_BASE + TZROM_SIZE);
266 if (trom_end_index == trom_start_index)
267 trom_end_index++;
268
269 tdram_start_index = TWO_MB_INDEX(TZDRAM_BASE);
270 tdram_end_index = TWO_MB_INDEX(TZDRAM_BASE + TZDRAM_SIZE);
271 if (tdram_end_index == tdram_start_index)
272 tdram_end_index++;
273
274 flash0_start_index = TWO_MB_INDEX(FLASH0_BASE);
275 flash0_end_index = TWO_MB_INDEX(FLASH0_BASE + TZROM_SIZE);
276 if (flash0_end_index == flash0_start_index)
277 flash0_end_index++;
278
279 flash1_start_index = TWO_MB_INDEX(FLASH1_BASE);
280 flash1_end_index = TWO_MB_INDEX(FLASH1_BASE + FLASH1_SIZE);
281 if (flash1_end_index == flash1_start_index)
282 flash1_end_index++;
283
284 vram_start_index = TWO_MB_INDEX(VRAM_BASE);
285 vram_end_index = TWO_MB_INDEX(VRAM_BASE + VRAM_SIZE);
286 if (vram_end_index == vram_start_index)
287 vram_end_index++;
288
289 dev0_start_index = TWO_MB_INDEX(DEVICE0_BASE);
290 dev0_end_index = TWO_MB_INDEX(DEVICE0_BASE + DEVICE0_SIZE);
291 if (dev0_end_index == dev0_start_index)
292 dev0_end_index++;
293
294 dev1_start_index = TWO_MB_INDEX(DEVICE1_BASE);
295 dev1_end_index = TWO_MB_INDEX(DEVICE1_BASE + DEVICE1_SIZE);
296 if (dev1_end_index == dev1_start_index)
297 dev1_end_index++;
298
299 /* Since the size is < 2M this is a single index */
300 tzram_start_index = TWO_MB_INDEX(tzram_layout->total_base);
301 nsram_start_index = TWO_MB_INDEX(NSRAM_BASE);
302
303 /*
304 * Fill up the level2 translation table for the first GB next
305 */
306 for (idx = 0; idx < NUM_2MB_IN_GB; idx++) {
307
308 l2_desc = INVALID_DESC;
309 xt_addr = &l2_xlation_table[GB1_L2_PAGETABLE][idx];
310
311 /* Block entries for 64M of trusted Boot ROM */
312 if ((idx >= trom_start_index) && (idx < trom_end_index))
313 l2_desc = create_romem_block(idx, LEVEL2, 0);
314
315 /* Single L3 page table entry for 256K of TZRAM */
316 if (idx == tzram_start_index) {
317 pt_addr = &l3_xlation_table[TZRAM_PAGETABLE][0];
318 l2_desc = create_table_desc(pt_addr);
319 }
320
321 /* Block entries for 32M of trusted DRAM */
322 if ((idx >= tdram_start_index) && (idx <= tdram_end_index))
323 l2_desc = create_rwmem_block(idx, LEVEL2, 0);
324
325 /* Block entries for 64M of aliased trusted Boot ROM */
326 if ((idx >= flash0_start_index) && (idx < flash0_end_index))
327 l2_desc = create_romem_block(idx, LEVEL2, 0);
328
329 /* Block entries for 64M of flash1 */
330 if ((idx >= flash1_start_index) && (idx < flash1_end_index))
331 l2_desc = create_romem_block(idx, LEVEL2, 0);
332
333 /* Block entries for 32M of VRAM */
334 if ((idx >= vram_start_index) && (idx < vram_end_index))
335 l2_desc = create_rwmem_block(idx, LEVEL2, 0);
336
337 /* Block entries for all the devices in the first gb */
338 if ((idx >= dev0_start_index) && (idx < dev0_end_index))
339 l2_desc = create_device_block(idx, LEVEL2, 0);
340
341 /* Block entries for all the devices in the first gb */
342 if ((idx >= dev1_start_index) && (idx < dev1_end_index))
343 l2_desc = create_device_block(idx, LEVEL2, 0);
344
345 /* Single L3 page table entry for 64K of NSRAM */
346 if (idx == nsram_start_index) {
347 pt_addr = &l3_xlation_table[NSRAM_PAGETABLE][0];
348 l2_desc = create_table_desc(pt_addr);
349 }
350
351 *xt_addr = l2_desc;
352 }
353
354
355 /*
356 * Initializations for the 2nd GB. Mark everything as device
357 * for the time being as the memory map is not final. Each
358 * index will need to be offset'ed to allow absolute values
359 */
360 off = NUM_2MB_IN_GB;
361 for (idx = off; idx < (NUM_2MB_IN_GB + off); idx++) {
362 l2_desc = create_device_block(idx, LEVEL2, 0);
363 xt_addr = &l2_xlation_table[GB2_L2_PAGETABLE][idx - off];
364 *xt_addr = l2_desc;
365 }
366
367
368 /*****************************************************************
369 * LEVEL3 PAGETABLE SETUP
370 * The following setup assumes knowledge of the scatter file. This
371 * should be reasonable as this is platform specific code.
372 *****************************************************************/
373
374 /* Fill up the level3 pagetable for the trusted SRAM. */
375 tzram_start_index = FOUR_KB_INDEX(tzram_layout->total_base);
376 tzram_end_index = FOUR_KB_INDEX(tzram_layout->total_base +
377 tzram_layout->total_size);
378 if (tzram_end_index == tzram_start_index)
379 tzram_end_index++;
380
381 /*
382 * Reusing trom* to mark RO memory. BLX_STACKS follows BLX_RO in the
383 * scatter file. Using BLX_RO$$Limit does not work as it might not
384 * cross the page boundary thus leading to truncation of valid RO
385 * memory
386 */
387 trom_start_index = FOUR_KB_INDEX(ro_start);
388 trom_end_index = FOUR_KB_INDEX(ro_limit);
389 if (trom_end_index == trom_start_index)
390 trom_end_index++;
391
392 /*
393 * Reusing dev* to mark coherent device memory. $$Limit works here
394 * 'cause the coherent memory section is known to be 4k in size
395 */
396 dev0_start_index = FOUR_KB_INDEX(coh_start);
397 dev0_end_index = FOUR_KB_INDEX(coh_limit);
398 if (dev0_end_index == dev0_start_index)
399 dev0_end_index++;
400
401
402 /* Each index will need to be offset'ed to allow absolute values */
403 off = FOUR_KB_INDEX(TZRAM_BASE);
404 for (idx = off; idx < (NUM_4K_IN_2MB + off); idx++) {
405
406 l3_desc = INVALID_DESC;
407 xt_addr = &l3_xlation_table[TZRAM_PAGETABLE][idx - off];
408
409 if (idx >= tzram_start_index && idx < tzram_end_index)
410 l3_desc = create_rwmem_block(idx, LEVEL3, 0);
411
412 if (idx >= trom_start_index && idx < trom_end_index)
413 l3_desc = create_romem_block(idx, LEVEL3, 0);
414
415 if (idx >= dev0_start_index && idx < dev0_end_index)
416 l3_desc = create_device_block(idx, LEVEL3, 0);
417
418 *xt_addr = l3_desc;
419 }
420
421 /* Fill up the level3 pagetable for the non-trusted SRAM. */
422 nsram_start_index = FOUR_KB_INDEX(NSRAM_BASE);
423 nsram_end_index = FOUR_KB_INDEX(NSRAM_BASE + NSRAM_SIZE);
424 if (nsram_end_index == nsram_start_index)
425 nsram_end_index++;
426
427 /* Each index will need to be offset'ed to allow absolute values */
428 off = FOUR_KB_INDEX(NSRAM_BASE);
429 for (idx = off; idx < (NUM_4K_IN_2MB + off); idx++) {
430
431 l3_desc = INVALID_DESC;
432 xt_addr = &l3_xlation_table[NSRAM_PAGETABLE][idx - off];
433
434 if (idx >= nsram_start_index && idx < nsram_end_index)
435 l3_desc = create_rwmem_block(idx, LEVEL3, NS);
436
437 *xt_addr = l3_desc;
438 }
439
440 return (unsigned long) l1_xlation_table;
441}
442
443/*******************************************************************************
444 * Enable the MMU assuming that the pagetables have already been created
445 *******************************************************************************/
446void enable_mmu()
447{
448 unsigned long mair, tcr, ttbr, sctlr;
449 unsigned long current_el = read_current_el();
450
451 /* Set the attributes in the right indices of the MAIR */
452 mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
453 mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
454 ATTR_IWBWA_OWBWA_NTR_INDEX);
455 write_mair(mair);
456
457 /*
458 * Set TCR bits as well. Inner & outer WBWA & shareable + T0SZ = 32
459 */
460 tcr = TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WBA |
461 TCR_RGN_INNER_WBA | TCR_T0SZ_4GB;
462 if (GET_EL(current_el) == MODE_EL3) {
463 tcr |= TCR_EL3_RES1;
464 /* Invalidate all TLBs */
465 tlbialle3();
466 } else {
467 /* Invalidate EL1 TLBs */
468 tlbivmalle1();
469 }
470
471 write_tcr(tcr);
472
473 /* Set TTBR bits as well */
474 assert(((unsigned long)l1_xlation_table & (sizeof(l1_xlation_table) - 1)) == 0);
475 ttbr = (unsigned long) l1_xlation_table;
476 write_ttbr0(ttbr);
477
478 sctlr = read_sctlr();
479 sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT | SCTLR_I_BIT;
480 sctlr |= SCTLR_A_BIT | SCTLR_C_BIT;
481 write_sctlr(sctlr);
482
483 return;
484}
485
486void disable_mmu(void)
487{
488 /* Zero out the MMU related registers */
489 write_mair(0);
490 write_tcr(0);
491 write_ttbr0(0);
492 write_sctlr(0);
493
494 /* Invalidate TLBs of the CurrentEL */
495 tlbiall();
496
497 /* Flush the caches */
498 dcsw_op_all(DCCISW);
499
500 return;
501}
502
503/*******************************************************************************
504 * Setup the pagetables as per the platform memory map & initialize the mmu
505 *******************************************************************************/
506void configure_mmu(meminfo *mem_layout,
507 unsigned long ro_start,
508 unsigned long ro_limit,
509 unsigned long coh_start,
510 unsigned long coh_limit)
511{
512 fill_xlation_tables(mem_layout,
513 ro_start,
514 ro_limit,
515 coh_start,
516 coh_limit);
517 enable_mmu();
518 return;
519}
520
521/* Simple routine which returns a configuration variable value */
522unsigned long platform_get_cfgvar(unsigned int var_id)
523{
524 assert(var_id < CONFIG_LIMIT);
525 return platform_config[var_id];
526}
527
528/*******************************************************************************
529 * A single boot loader stack is expected to work on both the Foundation FVP
530 * models and the two flavours of the Base FVP models (AEMv8 & Cortex). The
531 * SYS_ID register provides a mechanism for detecting the differences between
532 * these platforms. This information is stored in a per-BL array to allow the
533 * code to take the correct path.Per BL platform configuration.
534 ******************************************************************************/
535int platform_config_setup(void)
536{
537 unsigned int rev, hbi, bld, arch, sys_id, midr_pn;
538
539 sys_id = mmio_read_32(VE_SYSREGS_BASE + V2M_SYS_ID);
540 rev = (sys_id >> SYS_ID_REV_SHIFT) & SYS_ID_REV_MASK;
541 hbi = (sys_id >> SYS_ID_HBI_SHIFT) & SYS_ID_HBI_MASK;
542 bld = (sys_id >> SYS_ID_BLD_SHIFT) & SYS_ID_BLD_MASK;
543 arch = (sys_id >> SYS_ID_ARCH_SHIFT) & SYS_ID_ARCH_MASK;
544
545 assert(rev == REV_FVP);
546 assert(arch == ARCH_MODEL);
547
548 /*
549 * The build field in the SYS_ID tells which variant of the GIC
550 * memory is implemented by the model.
551 */
552 switch (bld) {
553 case BLD_GIC_VE_MMAP:
554 platform_config[CONFIG_GICD_ADDR] = VE_GICD_BASE;
555 platform_config[CONFIG_GICC_ADDR] = VE_GICC_BASE;
556 platform_config[CONFIG_GICH_ADDR] = VE_GICH_BASE;
557 platform_config[CONFIG_GICV_ADDR] = VE_GICV_BASE;
558 break;
559 case BLD_GIC_A53A57_MMAP:
560 platform_config[CONFIG_GICD_ADDR] = BASE_GICD_BASE;
561 platform_config[CONFIG_GICC_ADDR] = BASE_GICC_BASE;
562 platform_config[CONFIG_GICH_ADDR] = BASE_GICH_BASE;
563 platform_config[CONFIG_GICV_ADDR] = BASE_GICV_BASE;
564 break;
565 default:
566 assert(0);
567 }
568
569 /*
570 * The hbi field in the SYS_ID is 0x020 for the Base FVP & 0x010
571 * for the Foundation FVP.
572 */
573 switch (hbi) {
574 case HBI_FOUNDATION:
575 platform_config[CONFIG_MAX_AFF0] = 4;
576 platform_config[CONFIG_MAX_AFF1] = 1;
577 platform_config[CONFIG_CPU_SETUP] = 0;
578 platform_config[CONFIG_BASE_MMAP] = 0;
Harry Liebel30affd52013-10-30 17:41:48 +0000579 platform_config[CONFIG_HAS_CCI] = 0;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100580 break;
581 case HBI_FVP_BASE:
582 midr_pn = (read_midr() >> MIDR_PN_SHIFT) & MIDR_PN_MASK;
583 if ((midr_pn == MIDR_PN_A57) || (midr_pn == MIDR_PN_A53))
584 platform_config[CONFIG_CPU_SETUP] = 1;
585 else
586 platform_config[CONFIG_CPU_SETUP] = 0;
587
588 platform_config[CONFIG_MAX_AFF0] = 4;
589 platform_config[CONFIG_MAX_AFF1] = 2;
590 platform_config[CONFIG_BASE_MMAP] = 1;
Harry Liebel30affd52013-10-30 17:41:48 +0000591 platform_config[CONFIG_HAS_CCI] = 1;
Achin Gupta4f6ad662013-10-25 09:08:21 +0100592 break;
593 default:
594 assert(0);
595 }
596
597 return 0;
598}
599
600unsigned long plat_get_ns_image_entrypoint(void) {
601 return NS_IMAGE_OFFSET;
602}