blob: e77a9041299a48b71dab1f60948813914d86b982 [file] [log] [blame]
Ilias Apalodimas95537a62024-06-23 14:48:15 +03001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (c) 2023 Linaro Limited
4 */
5
6#include <dm.h>
7#include <dm/of_access.h>
8#include <tpm_api.h>
9#include <tpm-common.h>
10#include <tpm-v2.h>
11#include <tpm_tcg2.h>
12#include <u-boot/sha1.h>
13#include <u-boot/sha256.h>
14#include <u-boot/sha512.h>
15#include <version_string.h>
16#include <asm/io.h>
17#include <linux/bitops.h>
18#include <linux/unaligned/be_byteshift.h>
19#include <linux/unaligned/generic.h>
20#include <linux/unaligned/le_byteshift.h>
21#include "tpm-utils.h"
22
Raymond Mao6434c4a2024-12-24 08:01:06 -080023int tcg2_get_pcr_info(struct udevice *dev, u32 *supported_bank, u32 *active_bank,
24 u32 *bank_num)
Ilias Apalodimascb356612024-06-23 14:48:17 +030025{
Ilias Apalodimascb356612024-06-23 14:48:17 +030026 struct tpml_pcr_selection pcrs;
27 size_t i;
28 u32 ret;
29
Raymond Mao6434c4a2024-12-24 08:01:06 -080030 *supported_bank = 0;
31 *active_bank = 0;
32 *bank_num = 0;
Ilias Apalodimascb356612024-06-23 14:48:17 +030033
34 ret = tpm2_get_pcr_info(dev, &pcrs);
35 if (ret)
36 return ret;
37
38 for (i = 0; i < pcrs.count; i++) {
Raymond Mao43158122024-12-24 08:01:07 -080039 struct tpms_pcr_selection *sel = &pcrs.selection[i];
40 u32 hash_mask = tcg2_algorithm_to_mask(sel->hash);
Ilias Apalodimascb356612024-06-23 14:48:17 +030041
Raymond Mao43158122024-12-24 08:01:07 -080042 if (tpm2_algorithm_supported(sel->hash))
Raymond Mao6434c4a2024-12-24 08:01:06 -080043 *supported_bank |= hash_mask;
Raymond Mao43158122024-12-24 08:01:07 -080044 else
45 log_warning("%s: unknown algorithm %x\n", __func__,
46 sel->hash);
47
48 if (tpm2_is_active_bank(sel))
49 *active_bank |= hash_mask;
Ilias Apalodimascb356612024-06-23 14:48:17 +030050 }
51
Raymond Mao6434c4a2024-12-24 08:01:06 -080052 *bank_num = pcrs.count;
Ilias Apalodimascb356612024-06-23 14:48:17 +030053
54 return 0;
55}
56
Ilias Apalodimas95537a62024-06-23 14:48:15 +030057int tcg2_get_active_pcr_banks(struct udevice *dev, u32 *active_pcr_banks)
58{
59 u32 supported = 0;
60 u32 pcr_banks = 0;
61 u32 active = 0;
62 int rc;
63
Ilias Apalodimascb356612024-06-23 14:48:17 +030064 rc = tcg2_get_pcr_info(dev, &supported, &active, &pcr_banks);
Ilias Apalodimas95537a62024-06-23 14:48:15 +030065 if (rc)
66 return rc;
67
68 *active_pcr_banks = active;
69
70 return 0;
71}
72
73u32 tcg2_event_get_size(struct tpml_digest_values *digest_list)
74{
75 u32 len;
76 size_t i;
77
78 len = offsetof(struct tcg_pcr_event2, digests);
79 len += offsetof(struct tpml_digest_values, digests);
80 for (i = 0; i < digest_list->count; ++i) {
81 u16 l = tpm2_algorithm_to_len(digest_list->digests[i].hash_alg);
82
83 if (!l)
84 continue;
85
86 len += l + offsetof(struct tpmt_ha, digest);
87 }
88 len += sizeof(u32);
89
90 return len;
91}
92
93int tcg2_create_digest(struct udevice *dev, const u8 *input, u32 length,
94 struct tpml_digest_values *digest_list)
95{
96 u8 final[sizeof(union tpmu_ha)];
97 sha256_context ctx_256;
98 sha512_context ctx_512;
99 sha1_context ctx;
100 u32 active;
101 size_t i;
102 u32 len;
103 int rc;
104
105 rc = tcg2_get_active_pcr_banks(dev, &active);
106 if (rc)
107 return rc;
108
109 digest_list->count = 0;
110 for (i = 0; i < ARRAY_SIZE(hash_algo_list); ++i) {
111 if (!(active & hash_algo_list[i].hash_mask))
112 continue;
113
114 switch (hash_algo_list[i].hash_alg) {
115 case TPM2_ALG_SHA1:
116 sha1_starts(&ctx);
117 sha1_update(&ctx, input, length);
118 sha1_finish(&ctx, final);
119 len = TPM2_SHA1_DIGEST_SIZE;
120 break;
121 case TPM2_ALG_SHA256:
122 sha256_starts(&ctx_256);
123 sha256_update(&ctx_256, input, length);
124 sha256_finish(&ctx_256, final);
125 len = TPM2_SHA256_DIGEST_SIZE;
126 break;
127 case TPM2_ALG_SHA384:
128 sha384_starts(&ctx_512);
129 sha384_update(&ctx_512, input, length);
130 sha384_finish(&ctx_512, final);
131 len = TPM2_SHA384_DIGEST_SIZE;
132 break;
133 case TPM2_ALG_SHA512:
134 sha512_starts(&ctx_512);
135 sha512_update(&ctx_512, input, length);
136 sha512_finish(&ctx_512, final);
137 len = TPM2_SHA512_DIGEST_SIZE;
138 break;
139 default:
140 printf("%s: unsupported algorithm %x\n", __func__,
141 hash_algo_list[i].hash_alg);
142 continue;
143 }
144
145 digest_list->digests[digest_list->count].hash_alg =
146 hash_algo_list[i].hash_alg;
147 memcpy(&digest_list->digests[digest_list->count].digest, final,
148 len);
149 digest_list->count++;
150 }
151
152 return 0;
153}
154
155void tcg2_log_append(u32 pcr_index, u32 event_type,
156 struct tpml_digest_values *digest_list, u32 size,
157 const u8 *event, u8 *log)
158{
159 size_t len;
160 size_t pos;
161 u32 i;
162
163 pos = offsetof(struct tcg_pcr_event2, pcr_index);
164 put_unaligned_le32(pcr_index, log);
165 pos = offsetof(struct tcg_pcr_event2, event_type);
166 put_unaligned_le32(event_type, log + pos);
167 pos = offsetof(struct tcg_pcr_event2, digests) +
168 offsetof(struct tpml_digest_values, count);
169 put_unaligned_le32(digest_list->count, log + pos);
170
171 pos = offsetof(struct tcg_pcr_event2, digests) +
172 offsetof(struct tpml_digest_values, digests);
173 for (i = 0; i < digest_list->count; ++i) {
174 u16 hash_alg = digest_list->digests[i].hash_alg;
175
176 len = tpm2_algorithm_to_len(hash_alg);
177 if (!len)
178 continue;
179
180 pos += offsetof(struct tpmt_ha, hash_alg);
181 put_unaligned_le16(hash_alg, log + pos);
182 pos += offsetof(struct tpmt_ha, digest);
183 memcpy(log + pos, (u8 *)&digest_list->digests[i].digest, len);
184 pos += len;
185 }
186
187 put_unaligned_le32(size, log + pos);
188 pos += sizeof(u32);
189 memcpy(log + pos, event, size);
190}
191
192static int tcg2_log_append_check(struct tcg2_event_log *elog, u32 pcr_index,
193 u32 event_type,
194 struct tpml_digest_values *digest_list,
195 u32 size, const u8 *event)
196{
197 u32 event_size;
198 u8 *log;
199
200 event_size = size + tcg2_event_get_size(digest_list);
201 if (elog->log_position + event_size > elog->log_size) {
202 printf("%s: log too large: %u + %u > %u\n", __func__,
203 elog->log_position, event_size, elog->log_size);
204 return -ENOBUFS;
205 }
206
207 log = elog->log + elog->log_position;
208 elog->log_position += event_size;
209
210 tcg2_log_append(pcr_index, event_type, digest_list, size, event, log);
211
212 return 0;
213}
214
215static int tcg2_log_init(struct udevice *dev, struct tcg2_event_log *elog)
216{
217 struct tcg_efi_spec_id_event *ev;
218 struct tcg_pcr_event *log;
219 u32 event_size;
220 u32 count = 0;
221 u32 log_size;
222 u32 active;
223 size_t i;
224 u16 len;
225 int rc;
226
227 rc = tcg2_get_active_pcr_banks(dev, &active);
228 if (rc)
229 return rc;
230
231 event_size = offsetof(struct tcg_efi_spec_id_event, digest_sizes);
232 for (i = 0; i < ARRAY_SIZE(hash_algo_list); ++i) {
233 if (!(active & hash_algo_list[i].hash_mask))
234 continue;
235
236 switch (hash_algo_list[i].hash_alg) {
237 case TPM2_ALG_SHA1:
238 case TPM2_ALG_SHA256:
239 case TPM2_ALG_SHA384:
240 case TPM2_ALG_SHA512:
241 count++;
242 break;
243 default:
244 continue;
245 }
246 }
247
248 event_size += 1 +
249 (sizeof(struct tcg_efi_spec_id_event_algorithm_size) * count);
250 log_size = offsetof(struct tcg_pcr_event, event) + event_size;
251
252 if (log_size > elog->log_size) {
253 printf("%s: log too large: %u > %u\n", __func__, log_size,
254 elog->log_size);
255 return -ENOBUFS;
256 }
257
258 log = (struct tcg_pcr_event *)elog->log;
259 put_unaligned_le32(0, &log->pcr_index);
260 put_unaligned_le32(EV_NO_ACTION, &log->event_type);
261 memset(&log->digest, 0, sizeof(log->digest));
262 put_unaligned_le32(event_size, &log->event_size);
263
264 ev = (struct tcg_efi_spec_id_event *)log->event;
265 strlcpy((char *)ev->signature, TCG_EFI_SPEC_ID_EVENT_SIGNATURE_03,
266 sizeof(ev->signature));
267 put_unaligned_le32(0, &ev->platform_class);
268 ev->spec_version_minor = TCG_EFI_SPEC_ID_EVENT_SPEC_VERSION_MINOR_TPM2;
269 ev->spec_version_major = TCG_EFI_SPEC_ID_EVENT_SPEC_VERSION_MAJOR_TPM2;
270 ev->spec_errata = TCG_EFI_SPEC_ID_EVENT_SPEC_VERSION_ERRATA_TPM2;
271 ev->uintn_size = sizeof(size_t) / sizeof(u32);
272 put_unaligned_le32(count, &ev->number_of_algorithms);
273
274 count = 0;
275 for (i = 0; i < ARRAY_SIZE(hash_algo_list); ++i) {
276 if (!(active & hash_algo_list[i].hash_mask))
277 continue;
278
279 len = hash_algo_list[i].hash_len;
280 if (!len)
281 continue;
282
283 put_unaligned_le16(hash_algo_list[i].hash_alg,
284 &ev->digest_sizes[count].algorithm_id);
285 put_unaligned_le16(len, &ev->digest_sizes[count].digest_size);
286 count++;
287 }
288
289 *((u8 *)ev + (event_size - 1)) = 0;
290 elog->log_position = log_size;
291
292 return 0;
293}
294
295static int tcg2_replay_eventlog(struct tcg2_event_log *elog,
296 struct udevice *dev,
297 struct tpml_digest_values *digest_list,
298 u32 log_position)
299{
300 const u32 offset = offsetof(struct tcg_pcr_event2, digests) +
301 offsetof(struct tpml_digest_values, digests);
302 u32 event_size;
303 u32 count;
304 u16 algo;
305 u32 pcr;
306 u32 pos;
307 u16 len;
308 u8 *log;
309 int rc;
310 u32 i;
311
312 while (log_position + offset < elog->log_size) {
313 log = elog->log + log_position;
314
315 pos = offsetof(struct tcg_pcr_event2, pcr_index);
316 pcr = get_unaligned_le32(log + pos);
317 pos = offsetof(struct tcg_pcr_event2, event_type);
318 if (!get_unaligned_le32(log + pos))
319 return 0;
320
321 pos = offsetof(struct tcg_pcr_event2, digests) +
322 offsetof(struct tpml_digest_values, count);
323 count = get_unaligned_le32(log + pos);
324 if (count > ARRAY_SIZE(hash_algo_list) ||
325 (digest_list->count && digest_list->count != count))
326 return 0;
327
328 pos = offsetof(struct tcg_pcr_event2, digests) +
329 offsetof(struct tpml_digest_values, digests);
330 for (i = 0; i < count; ++i) {
331 pos += offsetof(struct tpmt_ha, hash_alg);
332 if (log_position + pos + sizeof(u16) >= elog->log_size)
333 return 0;
334
335 algo = get_unaligned_le16(log + pos);
336 pos += offsetof(struct tpmt_ha, digest);
337 switch (algo) {
338 case TPM2_ALG_SHA1:
339 case TPM2_ALG_SHA256:
340 case TPM2_ALG_SHA384:
341 case TPM2_ALG_SHA512:
342 len = tpm2_algorithm_to_len(algo);
343 break;
344 default:
345 return 0;
346 }
347
348 if (digest_list->count) {
349 if (algo != digest_list->digests[i].hash_alg ||
350 log_position + pos + len >= elog->log_size)
351 return 0;
352
353 memcpy(digest_list->digests[i].digest.sha512,
354 log + pos, len);
355 }
356
357 pos += len;
358 }
359
360 if (log_position + pos + sizeof(u32) >= elog->log_size)
361 return 0;
362
363 event_size = get_unaligned_le32(log + pos);
364 pos += event_size + sizeof(u32);
365 if (log_position + pos > elog->log_size)
366 return 0;
367
368 if (digest_list->count) {
369 rc = tcg2_pcr_extend(dev, pcr, digest_list);
370 if (rc)
371 return rc;
372 }
373
374 log_position += pos;
375 }
376
377 elog->log_position = log_position;
378 elog->found = true;
379 return 0;
380}
381
382static int tcg2_log_parse(struct udevice *dev, struct tcg2_event_log *elog)
383{
384 struct tpml_digest_values digest_list;
385 struct tcg_efi_spec_id_event *event;
386 struct tcg_pcr_event *log;
387 u32 log_active;
388 u32 calc_size;
389 u32 active;
390 u32 count;
391 u32 evsz;
392 u32 mask;
393 u16 algo;
394 u16 len;
395 int rc;
396 u32 i;
397 u16 j;
398
399 if (elog->log_size <= offsetof(struct tcg_pcr_event, event))
400 return 0;
401
402 log = (struct tcg_pcr_event *)elog->log;
403 if (get_unaligned_le32(&log->pcr_index) != 0 ||
404 get_unaligned_le32(&log->event_type) != EV_NO_ACTION)
405 return 0;
406
407 for (i = 0; i < sizeof(log->digest); i++) {
408 if (log->digest[i])
409 return 0;
410 }
411
412 evsz = get_unaligned_le32(&log->event_size);
413 if (evsz < offsetof(struct tcg_efi_spec_id_event, digest_sizes) ||
414 evsz + offsetof(struct tcg_pcr_event, event) > elog->log_size)
415 return 0;
416
417 event = (struct tcg_efi_spec_id_event *)log->event;
418 if (memcmp(event->signature, TCG_EFI_SPEC_ID_EVENT_SIGNATURE_03,
419 sizeof(TCG_EFI_SPEC_ID_EVENT_SIGNATURE_03)))
420 return 0;
421
422 if (event->spec_version_minor != TCG_EFI_SPEC_ID_EVENT_SPEC_VERSION_MINOR_TPM2 ||
423 event->spec_version_major != TCG_EFI_SPEC_ID_EVENT_SPEC_VERSION_MAJOR_TPM2)
424 return 0;
425
426 count = get_unaligned_le32(&event->number_of_algorithms);
427 if (count > ARRAY_SIZE(hash_algo_list))
428 return 0;
429
430 calc_size = offsetof(struct tcg_efi_spec_id_event, digest_sizes) +
431 (sizeof(struct tcg_efi_spec_id_event_algorithm_size) * count) +
432 1;
433 if (evsz != calc_size)
434 return 0;
435
436 rc = tcg2_get_active_pcr_banks(dev, &active);
437 if (rc)
438 return rc;
439
440 digest_list.count = 0;
441 log_active = 0;
442
443 for (i = 0; i < count; ++i) {
444 algo = get_unaligned_le16(&event->digest_sizes[i].algorithm_id);
445 mask = tcg2_algorithm_to_mask(algo);
446
447 if (!(active & mask))
448 return 0;
449
450 switch (algo) {
451 case TPM2_ALG_SHA1:
452 case TPM2_ALG_SHA256:
453 case TPM2_ALG_SHA384:
454 case TPM2_ALG_SHA512:
455 len = get_unaligned_le16(&event->digest_sizes[i].digest_size);
456 if (tpm2_algorithm_to_len(algo) != len)
457 return 0;
458 digest_list.digests[digest_list.count++].hash_alg = algo;
459 break;
460 default:
461 return 0;
462 }
463
464 log_active |= mask;
465 }
466
467 /* Ensure the previous firmware extended all the PCRs. */
468 if (log_active != active)
469 return 0;
470
471 /* Read PCR0 to check if previous firmware extended the PCRs or not. */
472 rc = tcg2_pcr_read(dev, 0, &digest_list);
473 if (rc)
474 return rc;
475
476 for (i = 0; i < digest_list.count; ++i) {
477 len = tpm2_algorithm_to_len(digest_list.digests[i].hash_alg);
478 for (j = 0; j < len; ++j) {
479 if (digest_list.digests[i].digest.sha512[j])
480 break;
481 }
482
483 /* PCR is non-zero; it has been extended, so skip extending. */
484 if (j != len) {
485 digest_list.count = 0;
486 break;
487 }
488 }
489
490 return tcg2_replay_eventlog(elog, dev, &digest_list,
491 offsetof(struct tcg_pcr_event, event) +
492 evsz);
493}
494
495int tcg2_pcr_extend(struct udevice *dev, u32 pcr_index,
496 struct tpml_digest_values *digest_list)
497{
498 u32 rc;
499 u32 i;
500
501 for (i = 0; i < digest_list->count; i++) {
502 u32 alg = digest_list->digests[i].hash_alg;
503
504 rc = tpm2_pcr_extend(dev, pcr_index, alg,
505 (u8 *)&digest_list->digests[i].digest,
506 tpm2_algorithm_to_len(alg));
507 if (rc) {
508 printf("%s: error pcr:%u alg:%08x\n", __func__,
509 pcr_index, alg);
510 return rc;
511 }
512 }
513
514 return 0;
515}
516
517int tcg2_pcr_read(struct udevice *dev, u32 pcr_index,
518 struct tpml_digest_values *digest_list)
519{
520 struct tpm_chip_priv *priv;
521 u32 rc;
522 u32 i;
523
524 priv = dev_get_uclass_priv(dev);
525 if (!priv)
526 return -ENODEV;
527
528 for (i = 0; i < digest_list->count; i++) {
529 u32 alg = digest_list->digests[i].hash_alg;
530 u8 *digest = (u8 *)&digest_list->digests[i].digest;
531
532 rc = tpm2_pcr_read(dev, pcr_index, priv->pcr_select_min, alg,
533 digest, tpm2_algorithm_to_len(alg), NULL);
534 if (rc) {
535 printf("%s: error pcr:%u alg:%08x\n", __func__,
536 pcr_index, alg);
537 return rc;
538 }
539 }
540
541 return 0;
542}
543
544int tcg2_measure_data(struct udevice *dev, struct tcg2_event_log *elog,
545 u32 pcr_index, u32 size, const u8 *data, u32 event_type,
546 u32 event_size, const u8 *event)
547{
548 struct tpml_digest_values digest_list;
549 int rc;
550
551 if (data)
552 rc = tcg2_create_digest(dev, data, size, &digest_list);
553 else
554 rc = tcg2_create_digest(dev, event, event_size, &digest_list);
555 if (rc)
556 return rc;
557
558 rc = tcg2_pcr_extend(dev, pcr_index, &digest_list);
559 if (rc)
560 return rc;
561
562 return tcg2_log_append_check(elog, pcr_index, event_type, &digest_list,
563 event_size, event);
564}
565
566int tcg2_log_prepare_buffer(struct udevice *dev, struct tcg2_event_log *elog,
567 bool ignore_existing_log)
568{
569 struct tcg2_event_log log;
Ilias Apalodimas7b1e5222024-12-24 08:01:08 -0800570 int rc, i;
Ilias Apalodimas95537a62024-06-23 14:48:15 +0300571
572 elog->log_position = 0;
573 elog->found = false;
574
Ilias Apalodimas7b1e5222024-12-24 08:01:08 -0800575 /*
576 * Make sure U-Boot is compiled with all the active PCRs
577 * since we are about to create an EventLog and we won't
578 * measure anything if the PCR banks don't match
579 */
580 if (!tpm2_check_active_banks(dev)) {
581 log_err("Cannot create EventLog\n");
582 log_err("Mismatch between U-Boot and TPM hash algos\n");
583 log_info("TPM:\n");
584 tpm2_print_active_banks(dev);
585 log_info("U-Boot:\n");
586 for (i = 0; i < ARRAY_SIZE(hash_algo_list); i++) {
587 const struct digest_info *algo = &hash_algo_list[i];
588 const char *str;
589
590 if (!algo->supported)
591 continue;
592
593 str = tpm2_algorithm_name(algo->hash_alg);
594 if (str)
595 log_info("%s\n", str);
596 }
597 return -EINVAL;
598 }
599
Ilias Apalodimas95537a62024-06-23 14:48:15 +0300600 rc = tcg2_platform_get_log(dev, (void **)&log.log, &log.log_size);
601 if (!rc) {
602 log.log_position = 0;
603 log.found = false;
604
605 if (!ignore_existing_log) {
606 rc = tcg2_log_parse(dev, &log);
607 if (rc)
608 return rc;
609 }
610
611 if (elog->log_size) {
612 if (log.found) {
613 if (elog->log_size < log.log_position)
614 return -ENOBUFS;
615
616 /*
617 * Copy the discovered log into the user buffer
618 * if there's enough space.
619 */
620 memcpy(elog->log, log.log, log.log_position);
621 }
622
623 unmap_physmem(log.log, MAP_NOCACHE);
624 } else {
625 elog->log = log.log;
626 elog->log_size = log.log_size;
627 }
628
629 elog->log_position = log.log_position;
630 elog->found = log.found;
631 }
632
633 /*
634 * Initialize the log buffer if no log was discovered and the buffer is
635 * valid. User's can pass in their own buffer as a fallback if no
636 * memory region is found.
637 */
638 if (!elog->found && elog->log_size)
639 rc = tcg2_log_init(dev, elog);
640
641 return rc;
642}
643
644int tcg2_measurement_init(struct udevice **dev, struct tcg2_event_log *elog,
645 bool ignore_existing_log)
646{
647 int rc;
648
649 rc = tcg2_platform_get_tpm2(dev);
650 if (rc)
651 return rc;
652
653 rc = tpm_auto_start(*dev);
654 if (rc)
655 return rc;
656
657 rc = tcg2_log_prepare_buffer(*dev, elog, ignore_existing_log);
658 if (rc) {
659 tcg2_measurement_term(*dev, elog, true);
660 return rc;
661 }
662
663 rc = tcg2_measure_event(*dev, elog, 0, EV_S_CRTM_VERSION,
664 strlen(version_string) + 1,
665 (u8 *)version_string);
666 if (rc) {
667 tcg2_measurement_term(*dev, elog, true);
668 return rc;
669 }
670
671 return 0;
672}
673
674void tcg2_measurement_term(struct udevice *dev, struct tcg2_event_log *elog,
675 bool error)
676{
677 u32 event = error ? 0x1 : 0xffffffff;
678 int i;
679
680 for (i = 0; i < 8; ++i)
681 tcg2_measure_event(dev, elog, i, EV_SEPARATOR, sizeof(event),
682 (const u8 *)&event);
683
684 if (elog->log)
685 unmap_physmem(elog->log, MAP_NOCACHE);
686}
687
688__weak int tcg2_platform_get_log(struct udevice *dev, void **addr, u32 *size)
689{
690 const __be32 *addr_prop;
691 const __be32 *size_prop;
692 int asize;
693 int ssize;
694
695 *addr = NULL;
696 *size = 0;
697
698 addr_prop = dev_read_prop(dev, "tpm_event_log_addr", &asize);
699 if (!addr_prop)
700 addr_prop = dev_read_prop(dev, "linux,sml-base", &asize);
701
702 size_prop = dev_read_prop(dev, "tpm_event_log_size", &ssize);
703 if (!size_prop)
704 size_prop = dev_read_prop(dev, "linux,sml-size", &ssize);
705
706 if (addr_prop && size_prop) {
707 u64 a = of_read_number(addr_prop, asize / sizeof(__be32));
708 u64 s = of_read_number(size_prop, ssize / sizeof(__be32));
709
710 *addr = map_physmem(a, s, MAP_NOCACHE);
711 *size = (u32)s;
712 } else {
713 struct ofnode_phandle_args args;
714 phys_addr_t a;
715 fdt_size_t s;
716
717 if (dev_read_phandle_with_args(dev, "memory-region", NULL, 0,
718 0, &args))
719 return -ENODEV;
720
721 a = ofnode_get_addr_size(args.node, "reg", &s);
722 if (a == FDT_ADDR_T_NONE)
723 return -ENOMEM;
724
725 *addr = map_physmem(a, s, MAP_NOCACHE);
726 *size = (u32)s;
727 }
728
729 return 0;
730}
731
732__weak int tcg2_platform_get_tpm2(struct udevice **dev)
733{
734 for_each_tpm_device(*dev) {
735 if (tpm_get_version(*dev) == TPM_V2)
736 return 0;
737 }
738
739 return -ENODEV;
740}
741
742u32 tcg2_algorithm_to_mask(enum tpm2_algorithms algo)
743{
744 size_t i;
745
746 for (i = 0; i < ARRAY_SIZE(hash_algo_list); i++) {
747 if (hash_algo_list[i].hash_alg == algo)
748 return hash_algo_list[i].hash_mask;
749 }
750
751 return 0;
752}
753
754__weak void tcg2_platform_startup_error(struct udevice *dev, int rc) {}