Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 1 | /* |
Soby Mathew | a0fedc4 | 2016-06-16 14:52:04 +0100 | [diff] [blame] | 2 | * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved. |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 3 | * |
| 4 | * Redistribution and use in source and binary forms, with or without |
| 5 | * modification, are permitted provided that the following conditions are met: |
| 6 | * |
| 7 | * Redistributions of source code must retain the above copyright notice, this |
| 8 | * list of conditions and the following disclaimer. |
| 9 | * |
| 10 | * Redistributions in binary form must reproduce the above copyright notice, |
| 11 | * this list of conditions and the following disclaimer in the documentation |
| 12 | * and/or other materials provided with the distribution. |
| 13 | * |
| 14 | * Neither the name of ARM nor the names of its contributors may be used |
| 15 | * to endorse or promote products derived from this software without specific |
| 16 | * prior written permission. |
| 17 | * |
| 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
| 19 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 20 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 21 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE |
| 22 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
| 23 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
| 24 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
| 25 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
| 26 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| 27 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
| 28 | * POSSIBILITY OF SUCH DAMAGE. |
| 29 | */ |
| 30 | |
Dan Handley | 2bd4ef2 | 2014-04-09 13:14:54 +0100 | [diff] [blame] | 31 | #include <arch.h> |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 32 | #include <arch_helpers.h> |
Dan Handley | 2bd4ef2 | 2014-04-09 13:14:54 +0100 | [diff] [blame] | 33 | #include <assert.h> |
Juan Castillo | a08a5e7 | 2015-05-19 11:54:12 +0100 | [diff] [blame] | 34 | #include <auth_mod.h> |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 35 | #include <bl_common.h> |
Dan Handley | 714a0d2 | 2014-04-09 13:13:04 +0100 | [diff] [blame] | 36 | #include <debug.h> |
Sandrine Bailleux | 467d057 | 2014-06-24 14:02:34 +0100 | [diff] [blame] | 37 | #include <errno.h> |
Dan Handley | 2bd4ef2 | 2014-04-09 13:14:54 +0100 | [diff] [blame] | 38 | #include <io_storage.h> |
| 39 | #include <platform.h> |
Juan Castillo | 97dbcf1 | 2015-08-17 10:43:27 +0100 | [diff] [blame] | 40 | #include <string.h> |
Sandrine Bailleux | 4ec7e2d | 2016-07-12 09:12:24 +0100 | [diff] [blame] | 41 | #include <utils.h> |
Soby Mathew | 44170c4 | 2016-03-22 15:51:08 +0000 | [diff] [blame] | 42 | #include <xlat_tables.h> |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 43 | |
Soby Mathew | a0fedc4 | 2016-06-16 14:52:04 +0100 | [diff] [blame] | 44 | uintptr_t page_align(uintptr_t value, unsigned dir) |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 45 | { |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 46 | /* Round up the limit to the next page boundary */ |
Soby Mathew | a0fedc4 | 2016-06-16 14:52:04 +0100 | [diff] [blame] | 47 | if (value & (PAGE_SIZE - 1)) { |
| 48 | value &= ~(PAGE_SIZE - 1); |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 49 | if (dir == UP) |
Soby Mathew | a0fedc4 | 2016-06-16 14:52:04 +0100 | [diff] [blame] | 50 | value += PAGE_SIZE; |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 51 | } |
| 52 | |
| 53 | return value; |
| 54 | } |
| 55 | |
Soby Mathew | a0fedc4 | 2016-06-16 14:52:04 +0100 | [diff] [blame] | 56 | static inline unsigned int is_page_aligned (uintptr_t addr) { |
| 57 | return (addr & (PAGE_SIZE - 1)) == 0; |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 58 | } |
| 59 | |
Sandrine Bailleux | 467d057 | 2014-06-24 14:02:34 +0100 | [diff] [blame] | 60 | /****************************************************************************** |
| 61 | * Determine whether the memory region delimited by 'addr' and 'size' is free, |
| 62 | * given the extents of free memory. |
Sandrine Bailleux | 4ec7e2d | 2016-07-12 09:12:24 +0100 | [diff] [blame] | 63 | * Return 1 if it is free, 0 if it is not free or if the input values are |
| 64 | * invalid. |
Sandrine Bailleux | 467d057 | 2014-06-24 14:02:34 +0100 | [diff] [blame] | 65 | *****************************************************************************/ |
Soby Mathew | a0fedc4 | 2016-06-16 14:52:04 +0100 | [diff] [blame] | 66 | static int is_mem_free(uintptr_t free_base, size_t free_size, |
| 67 | uintptr_t addr, size_t size) |
Sandrine Bailleux | 467d057 | 2014-06-24 14:02:34 +0100 | [diff] [blame] | 68 | { |
Sandrine Bailleux | 4ec7e2d | 2016-07-12 09:12:24 +0100 | [diff] [blame] | 69 | uintptr_t free_end, requested_end; |
| 70 | |
| 71 | /* |
| 72 | * Handle corner cases first. |
| 73 | * |
| 74 | * The order of the 2 tests is important, because if there's no space |
| 75 | * left (i.e. free_size == 0) but we don't ask for any memory |
| 76 | * (i.e. size == 0) then we should report that the memory is free. |
| 77 | */ |
| 78 | if (size == 0) |
| 79 | return 1; /* A zero-byte region is always free */ |
| 80 | if (free_size == 0) |
| 81 | return 0; |
| 82 | |
| 83 | /* |
| 84 | * Check that the end addresses don't overflow. |
| 85 | * If they do, consider that this memory region is not free, as this |
| 86 | * is an invalid scenario. |
| 87 | */ |
| 88 | if (check_uptr_overflow(free_base, free_size - 1)) |
| 89 | return 0; |
| 90 | free_end = free_base + (free_size - 1); |
| 91 | |
| 92 | if (check_uptr_overflow(addr, size - 1)) |
| 93 | return 0; |
| 94 | requested_end = addr + (size - 1); |
| 95 | |
| 96 | /* |
| 97 | * Finally, check that the requested memory region lies within the free |
| 98 | * region. |
| 99 | */ |
| 100 | return (addr >= free_base) && (requested_end <= free_end); |
Sandrine Bailleux | 467d057 | 2014-06-24 14:02:34 +0100 | [diff] [blame] | 101 | } |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 102 | |
Sandrine Bailleux | 467d057 | 2014-06-24 14:02:34 +0100 | [diff] [blame] | 103 | /****************************************************************************** |
| 104 | * Inside a given memory region, determine whether a sub-region of memory is |
| 105 | * closer from the top or the bottom of the encompassing region. Return the |
| 106 | * size of the smallest chunk of free memory surrounding the sub-region in |
| 107 | * 'small_chunk_size'. |
| 108 | *****************************************************************************/ |
Soby Mathew | a0fedc4 | 2016-06-16 14:52:04 +0100 | [diff] [blame] | 109 | static unsigned int choose_mem_pos(uintptr_t mem_start, uintptr_t mem_end, |
| 110 | uintptr_t submem_start, uintptr_t submem_end, |
| 111 | size_t *small_chunk_size) |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 112 | { |
Sandrine Bailleux | 467d057 | 2014-06-24 14:02:34 +0100 | [diff] [blame] | 113 | size_t top_chunk_size, bottom_chunk_size; |
| 114 | |
| 115 | assert(mem_start <= submem_start); |
| 116 | assert(submem_start <= submem_end); |
| 117 | assert(submem_end <= mem_end); |
| 118 | assert(small_chunk_size != NULL); |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 119 | |
Sandrine Bailleux | 467d057 | 2014-06-24 14:02:34 +0100 | [diff] [blame] | 120 | top_chunk_size = mem_end - submem_end; |
| 121 | bottom_chunk_size = submem_start - mem_start; |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 122 | |
Sandrine Bailleux | 467d057 | 2014-06-24 14:02:34 +0100 | [diff] [blame] | 123 | if (top_chunk_size < bottom_chunk_size) { |
| 124 | *small_chunk_size = top_chunk_size; |
| 125 | return TOP; |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 126 | } else { |
Sandrine Bailleux | 467d057 | 2014-06-24 14:02:34 +0100 | [diff] [blame] | 127 | *small_chunk_size = bottom_chunk_size; |
| 128 | return BOTTOM; |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 129 | } |
Sandrine Bailleux | 467d057 | 2014-06-24 14:02:34 +0100 | [diff] [blame] | 130 | } |
| 131 | |
| 132 | /****************************************************************************** |
| 133 | * Reserve the memory region delimited by 'addr' and 'size'. The extents of free |
| 134 | * memory are passed in 'free_base' and 'free_size' and they will be updated to |
| 135 | * reflect the memory usage. |
Sandrine Bailleux | 4ec7e2d | 2016-07-12 09:12:24 +0100 | [diff] [blame] | 136 | * The caller must ensure the memory to reserve is free and that the addresses |
| 137 | * and sizes passed in arguments are sane. |
Sandrine Bailleux | 467d057 | 2014-06-24 14:02:34 +0100 | [diff] [blame] | 138 | *****************************************************************************/ |
Soby Mathew | a0fedc4 | 2016-06-16 14:52:04 +0100 | [diff] [blame] | 139 | void reserve_mem(uintptr_t *free_base, size_t *free_size, |
| 140 | uintptr_t addr, size_t size) |
Sandrine Bailleux | 467d057 | 2014-06-24 14:02:34 +0100 | [diff] [blame] | 141 | { |
| 142 | size_t discard_size; |
| 143 | size_t reserved_size; |
| 144 | unsigned int pos; |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 145 | |
Sandrine Bailleux | 467d057 | 2014-06-24 14:02:34 +0100 | [diff] [blame] | 146 | assert(free_base != NULL); |
| 147 | assert(free_size != NULL); |
| 148 | assert(is_mem_free(*free_base, *free_size, addr, size)); |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 149 | |
Sandrine Bailleux | 4ec7e2d | 2016-07-12 09:12:24 +0100 | [diff] [blame] | 150 | if (size == 0) { |
| 151 | WARN("Nothing to allocate, requested size is zero\n"); |
| 152 | return; |
| 153 | } |
| 154 | |
| 155 | pos = choose_mem_pos(*free_base, *free_base + (*free_size - 1), |
| 156 | addr, addr + (size - 1), |
Sandrine Bailleux | 467d057 | 2014-06-24 14:02:34 +0100 | [diff] [blame] | 157 | &discard_size); |
| 158 | |
| 159 | reserved_size = size + discard_size; |
| 160 | *free_size -= reserved_size; |
| 161 | |
| 162 | if (pos == BOTTOM) |
| 163 | *free_base = addr + size; |
| 164 | |
Soby Mathew | a0fedc4 | 2016-06-16 14:52:04 +0100 | [diff] [blame] | 165 | VERBOSE("Reserved 0x%zx bytes (discarded 0x%zx bytes %s)\n", |
Sandrine Bailleux | 467d057 | 2014-06-24 14:02:34 +0100 | [diff] [blame] | 166 | reserved_size, discard_size, |
| 167 | pos == TOP ? "above" : "below"); |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 168 | } |
| 169 | |
Soby Mathew | a0fedc4 | 2016-06-16 14:52:04 +0100 | [diff] [blame] | 170 | static void dump_load_info(uintptr_t image_load_addr, |
| 171 | size_t image_size, |
Dan Handley | e2712bc | 2014-04-10 15:37:22 +0100 | [diff] [blame] | 172 | const meminfo_t *mem_layout) |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 173 | { |
Soby Mathew | a0fedc4 | 2016-06-16 14:52:04 +0100 | [diff] [blame] | 174 | INFO("Trying to load image at address %p, size = 0x%zx\n", |
| 175 | (void *)image_load_addr, image_size); |
Dan Handley | 91b624e | 2014-07-29 17:14:00 +0100 | [diff] [blame] | 176 | INFO("Current memory layout:\n"); |
Sandrine Bailleux | 4ec7e2d | 2016-07-12 09:12:24 +0100 | [diff] [blame] | 177 | INFO(" total region = [base = %p, size = 0x%zx]\n", |
| 178 | (void *) mem_layout->total_base, mem_layout->total_size); |
| 179 | INFO(" free region = [base = %p, size = 0x%zx]\n", |
| 180 | (void *) mem_layout->free_base, mem_layout->free_size); |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 181 | } |
| 182 | |
Ryan Harkin | 87274c4 | 2014-02-04 11:43:57 +0000 | [diff] [blame] | 183 | /* Generic function to return the size of an image */ |
Soby Mathew | a0fedc4 | 2016-06-16 14:52:04 +0100 | [diff] [blame] | 184 | size_t image_size(unsigned int image_id) |
Ryan Harkin | 87274c4 | 2014-02-04 11:43:57 +0000 | [diff] [blame] | 185 | { |
Dan Handley | a4cb68e | 2014-04-23 13:47:06 +0100 | [diff] [blame] | 186 | uintptr_t dev_handle; |
| 187 | uintptr_t image_handle; |
| 188 | uintptr_t image_spec; |
Ryan Harkin | 87274c4 | 2014-02-04 11:43:57 +0000 | [diff] [blame] | 189 | size_t image_size = 0; |
Juan Castillo | 6e76206 | 2015-11-02 10:47:01 +0000 | [diff] [blame] | 190 | int io_result; |
Ryan Harkin | 87274c4 | 2014-02-04 11:43:57 +0000 | [diff] [blame] | 191 | |
Ryan Harkin | 87274c4 | 2014-02-04 11:43:57 +0000 | [diff] [blame] | 192 | /* Obtain a reference to the image by querying the platform layer */ |
Juan Castillo | 3a66aca | 2015-04-13 17:36:19 +0100 | [diff] [blame] | 193 | io_result = plat_get_image_source(image_id, &dev_handle, &image_spec); |
Juan Castillo | 6e76206 | 2015-11-02 10:47:01 +0000 | [diff] [blame] | 194 | if (io_result != 0) { |
Juan Castillo | 3a66aca | 2015-04-13 17:36:19 +0100 | [diff] [blame] | 195 | WARN("Failed to obtain reference to image id=%u (%i)\n", |
| 196 | image_id, io_result); |
Ryan Harkin | 87274c4 | 2014-02-04 11:43:57 +0000 | [diff] [blame] | 197 | return 0; |
| 198 | } |
| 199 | |
| 200 | /* Attempt to access the image */ |
| 201 | io_result = io_open(dev_handle, image_spec, &image_handle); |
Juan Castillo | 6e76206 | 2015-11-02 10:47:01 +0000 | [diff] [blame] | 202 | if (io_result != 0) { |
Juan Castillo | 3a66aca | 2015-04-13 17:36:19 +0100 | [diff] [blame] | 203 | WARN("Failed to access image id=%u (%i)\n", |
| 204 | image_id, io_result); |
Ryan Harkin | 87274c4 | 2014-02-04 11:43:57 +0000 | [diff] [blame] | 205 | return 0; |
| 206 | } |
| 207 | |
| 208 | /* Find the size of the image */ |
| 209 | io_result = io_size(image_handle, &image_size); |
Juan Castillo | 6e76206 | 2015-11-02 10:47:01 +0000 | [diff] [blame] | 210 | if ((io_result != 0) || (image_size == 0)) { |
Juan Castillo | 3a66aca | 2015-04-13 17:36:19 +0100 | [diff] [blame] | 211 | WARN("Failed to determine the size of the image id=%u (%i)\n", |
| 212 | image_id, io_result); |
Ryan Harkin | 87274c4 | 2014-02-04 11:43:57 +0000 | [diff] [blame] | 213 | } |
| 214 | io_result = io_close(image_handle); |
| 215 | /* Ignore improbable/unrecoverable error in 'close' */ |
| 216 | |
| 217 | /* TODO: Consider maintaining open device connection from this |
| 218 | * bootloader stage |
| 219 | */ |
| 220 | io_result = io_dev_close(dev_handle); |
| 221 | /* Ignore improbable/unrecoverable error in 'dev_close' */ |
| 222 | |
| 223 | return image_size; |
| 224 | } |
Sandrine Bailleux | 467d057 | 2014-06-24 14:02:34 +0100 | [diff] [blame] | 225 | |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 226 | /******************************************************************************* |
Sandrine Bailleux | 8eaf46e | 2016-05-27 14:08:10 +0100 | [diff] [blame] | 227 | * Generic function to load an image at a specific address given an image ID and |
| 228 | * extents of free memory. |
| 229 | * |
| 230 | * If the load is successful then the image information is updated. |
| 231 | * |
| 232 | * If the entry_point_info argument is not NULL then this function also updates: |
| 233 | * - the memory layout to mark the memory as reserved; |
| 234 | * - the entry point information. |
| 235 | * |
| 236 | * The caller might pass a NULL pointer for the entry point if they are not |
| 237 | * interested in this information. This is typically the case for non-executable |
| 238 | * images (e.g. certificates) and executable images that won't ever be executed |
| 239 | * on the application processor (e.g. additional microcontroller firmware). |
| 240 | * |
Sandrine Bailleux | 467d057 | 2014-06-24 14:02:34 +0100 | [diff] [blame] | 241 | * Returns 0 on success, a negative error code otherwise. |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 242 | ******************************************************************************/ |
Vikram Kanigiri | da56743 | 2014-04-15 18:08:08 +0100 | [diff] [blame] | 243 | int load_image(meminfo_t *mem_layout, |
Juan Castillo | 3a66aca | 2015-04-13 17:36:19 +0100 | [diff] [blame] | 244 | unsigned int image_id, |
Juan Castillo | a08a5e7 | 2015-05-19 11:54:12 +0100 | [diff] [blame] | 245 | uintptr_t image_base, |
Sandrine Bailleux | 467d057 | 2014-06-24 14:02:34 +0100 | [diff] [blame] | 246 | image_info_t *image_data, |
| 247 | entry_point_info_t *entry_point_info) |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 248 | { |
Dan Handley | a4cb68e | 2014-04-23 13:47:06 +0100 | [diff] [blame] | 249 | uintptr_t dev_handle; |
| 250 | uintptr_t image_handle; |
| 251 | uintptr_t image_spec; |
Sandrine Bailleux | 467d057 | 2014-06-24 14:02:34 +0100 | [diff] [blame] | 252 | size_t image_size; |
| 253 | size_t bytes_read; |
Juan Castillo | ec813f5 | 2015-10-01 18:37:40 +0100 | [diff] [blame] | 254 | int io_result; |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 255 | |
James Morrissey | 9d72b4e | 2014-02-10 17:04:32 +0000 | [diff] [blame] | 256 | assert(mem_layout != NULL); |
Sandrine Bailleux | 467d057 | 2014-06-24 14:02:34 +0100 | [diff] [blame] | 257 | assert(image_data != NULL); |
Vikram Kanigiri | da56743 | 2014-04-15 18:08:08 +0100 | [diff] [blame] | 258 | assert(image_data->h.version >= VERSION_1); |
James Morrissey | 9d72b4e | 2014-02-10 17:04:32 +0000 | [diff] [blame] | 259 | |
| 260 | /* Obtain a reference to the image by querying the platform layer */ |
Juan Castillo | 3a66aca | 2015-04-13 17:36:19 +0100 | [diff] [blame] | 261 | io_result = plat_get_image_source(image_id, &dev_handle, &image_spec); |
Juan Castillo | ec813f5 | 2015-10-01 18:37:40 +0100 | [diff] [blame] | 262 | if (io_result != 0) { |
Juan Castillo | 3a66aca | 2015-04-13 17:36:19 +0100 | [diff] [blame] | 263 | WARN("Failed to obtain reference to image id=%u (%i)\n", |
| 264 | image_id, io_result); |
Vikram Kanigiri | da56743 | 2014-04-15 18:08:08 +0100 | [diff] [blame] | 265 | return io_result; |
James Morrissey | 9d72b4e | 2014-02-10 17:04:32 +0000 | [diff] [blame] | 266 | } |
| 267 | |
| 268 | /* Attempt to access the image */ |
| 269 | io_result = io_open(dev_handle, image_spec, &image_handle); |
Juan Castillo | ec813f5 | 2015-10-01 18:37:40 +0100 | [diff] [blame] | 270 | if (io_result != 0) { |
Juan Castillo | 3a66aca | 2015-04-13 17:36:19 +0100 | [diff] [blame] | 271 | WARN("Failed to access image id=%u (%i)\n", |
| 272 | image_id, io_result); |
Vikram Kanigiri | da56743 | 2014-04-15 18:08:08 +0100 | [diff] [blame] | 273 | return io_result; |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 274 | } |
| 275 | |
Antonio Nino Diaz | b3a0a7b | 2016-02-02 12:03:38 +0000 | [diff] [blame] | 276 | INFO("Loading image id=%u at address %p\n", image_id, |
| 277 | (void *) image_base); |
Sandrine Bailleux | 467d057 | 2014-06-24 14:02:34 +0100 | [diff] [blame] | 278 | |
James Morrissey | 9d72b4e | 2014-02-10 17:04:32 +0000 | [diff] [blame] | 279 | /* Find the size of the image */ |
| 280 | io_result = io_size(image_handle, &image_size); |
Juan Castillo | ec813f5 | 2015-10-01 18:37:40 +0100 | [diff] [blame] | 281 | if ((io_result != 0) || (image_size == 0)) { |
Juan Castillo | 3a66aca | 2015-04-13 17:36:19 +0100 | [diff] [blame] | 282 | WARN("Failed to determine the size of the image id=%u (%i)\n", |
| 283 | image_id, io_result); |
Vikram Kanigiri | da56743 | 2014-04-15 18:08:08 +0100 | [diff] [blame] | 284 | goto exit; |
James Morrissey | 9d72b4e | 2014-02-10 17:04:32 +0000 | [diff] [blame] | 285 | } |
| 286 | |
Sandrine Bailleux | 467d057 | 2014-06-24 14:02:34 +0100 | [diff] [blame] | 287 | /* Check that the memory where the image will be loaded is free */ |
| 288 | if (!is_mem_free(mem_layout->free_base, mem_layout->free_size, |
| 289 | image_base, image_size)) { |
Sandrine Bailleux | 4ec7e2d | 2016-07-12 09:12:24 +0100 | [diff] [blame] | 290 | WARN("Failed to reserve region [base = %p, size = 0x%zx]\n", |
| 291 | (void *) image_base, image_size); |
Vikram Kanigiri | da56743 | 2014-04-15 18:08:08 +0100 | [diff] [blame] | 292 | dump_load_info(image_base, image_size, mem_layout); |
| 293 | io_result = -ENOMEM; |
| 294 | goto exit; |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 295 | } |
| 296 | |
| 297 | /* We have enough space so load the image now */ |
James Morrissey | 9d72b4e | 2014-02-10 17:04:32 +0000 | [diff] [blame] | 298 | /* TODO: Consider whether to try to recover/retry a partially successful read */ |
Dan Handley | a4cb68e | 2014-04-23 13:47:06 +0100 | [diff] [blame] | 299 | io_result = io_read(image_handle, image_base, image_size, &bytes_read); |
Juan Castillo | ec813f5 | 2015-10-01 18:37:40 +0100 | [diff] [blame] | 300 | if ((io_result != 0) || (bytes_read < image_size)) { |
Juan Castillo | 3a66aca | 2015-04-13 17:36:19 +0100 | [diff] [blame] | 301 | WARN("Failed to load image id=%u (%i)\n", image_id, io_result); |
Vikram Kanigiri | da56743 | 2014-04-15 18:08:08 +0100 | [diff] [blame] | 302 | goto exit; |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 303 | } |
| 304 | |
Sandrine Bailleux | 8eaf46e | 2016-05-27 14:08:10 +0100 | [diff] [blame] | 305 | image_data->image_base = image_base; |
| 306 | image_data->image_size = image_size; |
| 307 | |
Sandrine Bailleux | 467d057 | 2014-06-24 14:02:34 +0100 | [diff] [blame] | 308 | /* |
| 309 | * Update the memory usage info. |
| 310 | * This is done after the actual loading so that it is not updated when |
| 311 | * the load is unsuccessful. |
Juan Castillo | 09a55a8 | 2015-01-19 16:51:21 +0000 | [diff] [blame] | 312 | * If the caller does not provide an entry point, bypass the memory |
| 313 | * reservation. |
Sandrine Bailleux | 467d057 | 2014-06-24 14:02:34 +0100 | [diff] [blame] | 314 | */ |
Juan Castillo | 09a55a8 | 2015-01-19 16:51:21 +0000 | [diff] [blame] | 315 | if (entry_point_info != NULL) { |
| 316 | reserve_mem(&mem_layout->free_base, &mem_layout->free_size, |
| 317 | image_base, image_size); |
Sandrine Bailleux | 8eaf46e | 2016-05-27 14:08:10 +0100 | [diff] [blame] | 318 | entry_point_info->pc = image_base; |
Juan Castillo | 09a55a8 | 2015-01-19 16:51:21 +0000 | [diff] [blame] | 319 | } else { |
Sandrine Bailleux | 4ec7e2d | 2016-07-12 09:12:24 +0100 | [diff] [blame] | 320 | INFO("Skip reserving region [base = %p, size = 0x%zx]\n", |
| 321 | (void *) image_base, image_size); |
Juan Castillo | 09a55a8 | 2015-01-19 16:51:21 +0000 | [diff] [blame] | 322 | } |
Sandrine Bailleux | 467d057 | 2014-06-24 14:02:34 +0100 | [diff] [blame] | 323 | |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 324 | /* |
Sandrine Bailleux | 467d057 | 2014-06-24 14:02:34 +0100 | [diff] [blame] | 325 | * File has been successfully loaded. |
Sandrine Bailleux | 8eaf46e | 2016-05-27 14:08:10 +0100 | [diff] [blame] | 326 | * Flush the image in Trusted SRAM so that the next exception level can |
| 327 | * see it. |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 328 | */ |
James Morrissey | 9d72b4e | 2014-02-10 17:04:32 +0000 | [diff] [blame] | 329 | flush_dcache_range(image_base, image_size); |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 330 | |
Sandrine Bailleux | 4ec7e2d | 2016-07-12 09:12:24 +0100 | [diff] [blame] | 331 | INFO("Image id=%u loaded at address %p, size = 0x%zx\n", image_id, |
| 332 | (void *) image_base, image_size); |
James Morrissey | 9d72b4e | 2014-02-10 17:04:32 +0000 | [diff] [blame] | 333 | |
| 334 | exit: |
Vikram Kanigiri | da56743 | 2014-04-15 18:08:08 +0100 | [diff] [blame] | 335 | io_close(image_handle); |
James Morrissey | 9d72b4e | 2014-02-10 17:04:32 +0000 | [diff] [blame] | 336 | /* Ignore improbable/unrecoverable error in 'close' */ |
| 337 | |
| 338 | /* TODO: Consider maintaining open device connection from this bootloader stage */ |
Vikram Kanigiri | da56743 | 2014-04-15 18:08:08 +0100 | [diff] [blame] | 339 | io_dev_close(dev_handle); |
James Morrissey | 9d72b4e | 2014-02-10 17:04:32 +0000 | [diff] [blame] | 340 | /* Ignore improbable/unrecoverable error in 'dev_close' */ |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 341 | |
Vikram Kanigiri | da56743 | 2014-04-15 18:08:08 +0100 | [diff] [blame] | 342 | return io_result; |
Achin Gupta | 4f6ad66 | 2013-10-25 09:08:21 +0100 | [diff] [blame] | 343 | } |
Juan Castillo | a08a5e7 | 2015-05-19 11:54:12 +0100 | [diff] [blame] | 344 | |
| 345 | /******************************************************************************* |
| 346 | * Generic function to load and authenticate an image. The image is actually |
| 347 | * loaded by calling the 'load_image()' function. In addition, this function |
| 348 | * uses recursion to authenticate the parent images up to the root of trust. |
| 349 | ******************************************************************************/ |
| 350 | int load_auth_image(meminfo_t *mem_layout, |
| 351 | unsigned int image_id, |
| 352 | uintptr_t image_base, |
| 353 | image_info_t *image_data, |
| 354 | entry_point_info_t *entry_point_info) |
| 355 | { |
| 356 | int rc; |
| 357 | |
| 358 | #if TRUSTED_BOARD_BOOT |
| 359 | unsigned int parent_id; |
| 360 | |
| 361 | /* Use recursion to authenticate parent images */ |
| 362 | rc = auth_mod_get_parent_id(image_id, &parent_id); |
| 363 | if (rc == 0) { |
| 364 | rc = load_auth_image(mem_layout, parent_id, image_base, |
| 365 | image_data, NULL); |
Juan Castillo | ec813f5 | 2015-10-01 18:37:40 +0100 | [diff] [blame] | 366 | if (rc != 0) { |
Juan Castillo | a08a5e7 | 2015-05-19 11:54:12 +0100 | [diff] [blame] | 367 | return rc; |
| 368 | } |
| 369 | } |
| 370 | #endif /* TRUSTED_BOARD_BOOT */ |
| 371 | |
| 372 | /* Load the image */ |
| 373 | rc = load_image(mem_layout, image_id, image_base, image_data, |
| 374 | entry_point_info); |
Juan Castillo | ec813f5 | 2015-10-01 18:37:40 +0100 | [diff] [blame] | 375 | if (rc != 0) { |
| 376 | return rc; |
Juan Castillo | a08a5e7 | 2015-05-19 11:54:12 +0100 | [diff] [blame] | 377 | } |
| 378 | |
| 379 | #if TRUSTED_BOARD_BOOT |
| 380 | /* Authenticate it */ |
| 381 | rc = auth_mod_verify_img(image_id, |
| 382 | (void *)image_data->image_base, |
| 383 | image_data->image_size); |
| 384 | if (rc != 0) { |
Juan Castillo | 97dbcf1 | 2015-08-17 10:43:27 +0100 | [diff] [blame] | 385 | memset((void *)image_data->image_base, 0x00, |
| 386 | image_data->image_size); |
| 387 | flush_dcache_range(image_data->image_base, |
| 388 | image_data->image_size); |
Juan Castillo | ec813f5 | 2015-10-01 18:37:40 +0100 | [diff] [blame] | 389 | return -EAUTH; |
Juan Castillo | a08a5e7 | 2015-05-19 11:54:12 +0100 | [diff] [blame] | 390 | } |
| 391 | |
| 392 | /* After working with data, invalidate the data cache */ |
| 393 | inv_dcache_range(image_data->image_base, |
| 394 | (size_t)image_data->image_size); |
| 395 | #endif /* TRUSTED_BOARD_BOOT */ |
| 396 | |
Juan Castillo | ec813f5 | 2015-10-01 18:37:40 +0100 | [diff] [blame] | 397 | return 0; |
Juan Castillo | a08a5e7 | 2015-05-19 11:54:12 +0100 | [diff] [blame] | 398 | } |
Sandrine Bailleux | b2e224c | 2015-09-28 17:03:06 +0100 | [diff] [blame] | 399 | |
| 400 | /******************************************************************************* |
| 401 | * Print the content of an entry_point_info_t structure. |
| 402 | ******************************************************************************/ |
| 403 | void print_entry_point_info(const entry_point_info_t *ep_info) |
| 404 | { |
Soby Mathew | a0fedc4 | 2016-06-16 14:52:04 +0100 | [diff] [blame] | 405 | INFO("Entry point address = %p\n", (void *)ep_info->pc); |
| 406 | INFO("SPSR = 0x%x\n", ep_info->spsr); |
Sandrine Bailleux | b2e224c | 2015-09-28 17:03:06 +0100 | [diff] [blame] | 407 | |
| 408 | #define PRINT_IMAGE_ARG(n) \ |
| 409 | VERBOSE("Argument #" #n " = 0x%llx\n", \ |
| 410 | (unsigned long long) ep_info->args.arg##n) |
| 411 | |
| 412 | PRINT_IMAGE_ARG(0); |
| 413 | PRINT_IMAGE_ARG(1); |
| 414 | PRINT_IMAGE_ARG(2); |
| 415 | PRINT_IMAGE_ARG(3); |
| 416 | PRINT_IMAGE_ARG(4); |
| 417 | PRINT_IMAGE_ARG(5); |
| 418 | PRINT_IMAGE_ARG(6); |
| 419 | PRINT_IMAGE_ARG(7); |
| 420 | #undef PRINT_IMAGE_ARG |
| 421 | } |