blob: 0e267ff0a7df5434d1e7d57dd00e7122a799a239 [file] [log] [blame]
Ilias Apalodimas95537a62024-06-23 14:48:15 +03001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (c) 2023 Linaro Limited
4 */
5
6#include <dm.h>
7#include <dm/of_access.h>
8#include <tpm_api.h>
9#include <tpm-common.h>
10#include <tpm-v2.h>
11#include <tpm_tcg2.h>
12#include <u-boot/sha1.h>
13#include <u-boot/sha256.h>
14#include <u-boot/sha512.h>
15#include <version_string.h>
16#include <asm/io.h>
17#include <linux/bitops.h>
18#include <linux/unaligned/be_byteshift.h>
19#include <linux/unaligned/generic.h>
20#include <linux/unaligned/le_byteshift.h>
21#include "tpm-utils.h"
22
Raymond Mao6434c4a2024-12-24 08:01:06 -080023int tcg2_get_pcr_info(struct udevice *dev, u32 *supported_bank, u32 *active_bank,
24 u32 *bank_num)
Ilias Apalodimascb356612024-06-23 14:48:17 +030025{
Ilias Apalodimascb356612024-06-23 14:48:17 +030026 struct tpml_pcr_selection pcrs;
27 size_t i;
28 u32 ret;
29
Raymond Mao6434c4a2024-12-24 08:01:06 -080030 *supported_bank = 0;
31 *active_bank = 0;
32 *bank_num = 0;
Ilias Apalodimascb356612024-06-23 14:48:17 +030033
34 ret = tpm2_get_pcr_info(dev, &pcrs);
35 if (ret)
36 return ret;
37
38 for (i = 0; i < pcrs.count; i++) {
39 u32 hash_mask = tcg2_algorithm_to_mask(pcrs.selection[i].hash);
40
41 if (hash_mask) {
Raymond Mao6434c4a2024-12-24 08:01:06 -080042 *supported_bank |= hash_mask;
Ilias Apalodimas9465f7a2024-12-24 08:01:04 -080043 if (tpm2_is_active_bank(&pcrs.selection[i]))
Raymond Mao6434c4a2024-12-24 08:01:06 -080044 *active_bank |= hash_mask;
Ilias Apalodimascb356612024-06-23 14:48:17 +030045 } else {
46 printf("%s: unknown algorithm %x\n", __func__,
47 pcrs.selection[i].hash);
48 }
49 }
50
Raymond Mao6434c4a2024-12-24 08:01:06 -080051 *bank_num = pcrs.count;
Ilias Apalodimascb356612024-06-23 14:48:17 +030052
53 return 0;
54}
55
Ilias Apalodimas95537a62024-06-23 14:48:15 +030056int tcg2_get_active_pcr_banks(struct udevice *dev, u32 *active_pcr_banks)
57{
58 u32 supported = 0;
59 u32 pcr_banks = 0;
60 u32 active = 0;
61 int rc;
62
Ilias Apalodimascb356612024-06-23 14:48:17 +030063 rc = tcg2_get_pcr_info(dev, &supported, &active, &pcr_banks);
Ilias Apalodimas95537a62024-06-23 14:48:15 +030064 if (rc)
65 return rc;
66
67 *active_pcr_banks = active;
68
69 return 0;
70}
71
72u32 tcg2_event_get_size(struct tpml_digest_values *digest_list)
73{
74 u32 len;
75 size_t i;
76
77 len = offsetof(struct tcg_pcr_event2, digests);
78 len += offsetof(struct tpml_digest_values, digests);
79 for (i = 0; i < digest_list->count; ++i) {
80 u16 l = tpm2_algorithm_to_len(digest_list->digests[i].hash_alg);
81
82 if (!l)
83 continue;
84
85 len += l + offsetof(struct tpmt_ha, digest);
86 }
87 len += sizeof(u32);
88
89 return len;
90}
91
92int tcg2_create_digest(struct udevice *dev, const u8 *input, u32 length,
93 struct tpml_digest_values *digest_list)
94{
95 u8 final[sizeof(union tpmu_ha)];
96 sha256_context ctx_256;
97 sha512_context ctx_512;
98 sha1_context ctx;
99 u32 active;
100 size_t i;
101 u32 len;
102 int rc;
103
104 rc = tcg2_get_active_pcr_banks(dev, &active);
105 if (rc)
106 return rc;
107
108 digest_list->count = 0;
109 for (i = 0; i < ARRAY_SIZE(hash_algo_list); ++i) {
110 if (!(active & hash_algo_list[i].hash_mask))
111 continue;
112
113 switch (hash_algo_list[i].hash_alg) {
114 case TPM2_ALG_SHA1:
115 sha1_starts(&ctx);
116 sha1_update(&ctx, input, length);
117 sha1_finish(&ctx, final);
118 len = TPM2_SHA1_DIGEST_SIZE;
119 break;
120 case TPM2_ALG_SHA256:
121 sha256_starts(&ctx_256);
122 sha256_update(&ctx_256, input, length);
123 sha256_finish(&ctx_256, final);
124 len = TPM2_SHA256_DIGEST_SIZE;
125 break;
126 case TPM2_ALG_SHA384:
127 sha384_starts(&ctx_512);
128 sha384_update(&ctx_512, input, length);
129 sha384_finish(&ctx_512, final);
130 len = TPM2_SHA384_DIGEST_SIZE;
131 break;
132 case TPM2_ALG_SHA512:
133 sha512_starts(&ctx_512);
134 sha512_update(&ctx_512, input, length);
135 sha512_finish(&ctx_512, final);
136 len = TPM2_SHA512_DIGEST_SIZE;
137 break;
138 default:
139 printf("%s: unsupported algorithm %x\n", __func__,
140 hash_algo_list[i].hash_alg);
141 continue;
142 }
143
144 digest_list->digests[digest_list->count].hash_alg =
145 hash_algo_list[i].hash_alg;
146 memcpy(&digest_list->digests[digest_list->count].digest, final,
147 len);
148 digest_list->count++;
149 }
150
151 return 0;
152}
153
154void tcg2_log_append(u32 pcr_index, u32 event_type,
155 struct tpml_digest_values *digest_list, u32 size,
156 const u8 *event, u8 *log)
157{
158 size_t len;
159 size_t pos;
160 u32 i;
161
162 pos = offsetof(struct tcg_pcr_event2, pcr_index);
163 put_unaligned_le32(pcr_index, log);
164 pos = offsetof(struct tcg_pcr_event2, event_type);
165 put_unaligned_le32(event_type, log + pos);
166 pos = offsetof(struct tcg_pcr_event2, digests) +
167 offsetof(struct tpml_digest_values, count);
168 put_unaligned_le32(digest_list->count, log + pos);
169
170 pos = offsetof(struct tcg_pcr_event2, digests) +
171 offsetof(struct tpml_digest_values, digests);
172 for (i = 0; i < digest_list->count; ++i) {
173 u16 hash_alg = digest_list->digests[i].hash_alg;
174
175 len = tpm2_algorithm_to_len(hash_alg);
176 if (!len)
177 continue;
178
179 pos += offsetof(struct tpmt_ha, hash_alg);
180 put_unaligned_le16(hash_alg, log + pos);
181 pos += offsetof(struct tpmt_ha, digest);
182 memcpy(log + pos, (u8 *)&digest_list->digests[i].digest, len);
183 pos += len;
184 }
185
186 put_unaligned_le32(size, log + pos);
187 pos += sizeof(u32);
188 memcpy(log + pos, event, size);
189}
190
191static int tcg2_log_append_check(struct tcg2_event_log *elog, u32 pcr_index,
192 u32 event_type,
193 struct tpml_digest_values *digest_list,
194 u32 size, const u8 *event)
195{
196 u32 event_size;
197 u8 *log;
198
199 event_size = size + tcg2_event_get_size(digest_list);
200 if (elog->log_position + event_size > elog->log_size) {
201 printf("%s: log too large: %u + %u > %u\n", __func__,
202 elog->log_position, event_size, elog->log_size);
203 return -ENOBUFS;
204 }
205
206 log = elog->log + elog->log_position;
207 elog->log_position += event_size;
208
209 tcg2_log_append(pcr_index, event_type, digest_list, size, event, log);
210
211 return 0;
212}
213
214static int tcg2_log_init(struct udevice *dev, struct tcg2_event_log *elog)
215{
216 struct tcg_efi_spec_id_event *ev;
217 struct tcg_pcr_event *log;
218 u32 event_size;
219 u32 count = 0;
220 u32 log_size;
221 u32 active;
222 size_t i;
223 u16 len;
224 int rc;
225
226 rc = tcg2_get_active_pcr_banks(dev, &active);
227 if (rc)
228 return rc;
229
230 event_size = offsetof(struct tcg_efi_spec_id_event, digest_sizes);
231 for (i = 0; i < ARRAY_SIZE(hash_algo_list); ++i) {
232 if (!(active & hash_algo_list[i].hash_mask))
233 continue;
234
235 switch (hash_algo_list[i].hash_alg) {
236 case TPM2_ALG_SHA1:
237 case TPM2_ALG_SHA256:
238 case TPM2_ALG_SHA384:
239 case TPM2_ALG_SHA512:
240 count++;
241 break;
242 default:
243 continue;
244 }
245 }
246
247 event_size += 1 +
248 (sizeof(struct tcg_efi_spec_id_event_algorithm_size) * count);
249 log_size = offsetof(struct tcg_pcr_event, event) + event_size;
250
251 if (log_size > elog->log_size) {
252 printf("%s: log too large: %u > %u\n", __func__, log_size,
253 elog->log_size);
254 return -ENOBUFS;
255 }
256
257 log = (struct tcg_pcr_event *)elog->log;
258 put_unaligned_le32(0, &log->pcr_index);
259 put_unaligned_le32(EV_NO_ACTION, &log->event_type);
260 memset(&log->digest, 0, sizeof(log->digest));
261 put_unaligned_le32(event_size, &log->event_size);
262
263 ev = (struct tcg_efi_spec_id_event *)log->event;
264 strlcpy((char *)ev->signature, TCG_EFI_SPEC_ID_EVENT_SIGNATURE_03,
265 sizeof(ev->signature));
266 put_unaligned_le32(0, &ev->platform_class);
267 ev->spec_version_minor = TCG_EFI_SPEC_ID_EVENT_SPEC_VERSION_MINOR_TPM2;
268 ev->spec_version_major = TCG_EFI_SPEC_ID_EVENT_SPEC_VERSION_MAJOR_TPM2;
269 ev->spec_errata = TCG_EFI_SPEC_ID_EVENT_SPEC_VERSION_ERRATA_TPM2;
270 ev->uintn_size = sizeof(size_t) / sizeof(u32);
271 put_unaligned_le32(count, &ev->number_of_algorithms);
272
273 count = 0;
274 for (i = 0; i < ARRAY_SIZE(hash_algo_list); ++i) {
275 if (!(active & hash_algo_list[i].hash_mask))
276 continue;
277
278 len = hash_algo_list[i].hash_len;
279 if (!len)
280 continue;
281
282 put_unaligned_le16(hash_algo_list[i].hash_alg,
283 &ev->digest_sizes[count].algorithm_id);
284 put_unaligned_le16(len, &ev->digest_sizes[count].digest_size);
285 count++;
286 }
287
288 *((u8 *)ev + (event_size - 1)) = 0;
289 elog->log_position = log_size;
290
291 return 0;
292}
293
294static int tcg2_replay_eventlog(struct tcg2_event_log *elog,
295 struct udevice *dev,
296 struct tpml_digest_values *digest_list,
297 u32 log_position)
298{
299 const u32 offset = offsetof(struct tcg_pcr_event2, digests) +
300 offsetof(struct tpml_digest_values, digests);
301 u32 event_size;
302 u32 count;
303 u16 algo;
304 u32 pcr;
305 u32 pos;
306 u16 len;
307 u8 *log;
308 int rc;
309 u32 i;
310
311 while (log_position + offset < elog->log_size) {
312 log = elog->log + log_position;
313
314 pos = offsetof(struct tcg_pcr_event2, pcr_index);
315 pcr = get_unaligned_le32(log + pos);
316 pos = offsetof(struct tcg_pcr_event2, event_type);
317 if (!get_unaligned_le32(log + pos))
318 return 0;
319
320 pos = offsetof(struct tcg_pcr_event2, digests) +
321 offsetof(struct tpml_digest_values, count);
322 count = get_unaligned_le32(log + pos);
323 if (count > ARRAY_SIZE(hash_algo_list) ||
324 (digest_list->count && digest_list->count != count))
325 return 0;
326
327 pos = offsetof(struct tcg_pcr_event2, digests) +
328 offsetof(struct tpml_digest_values, digests);
329 for (i = 0; i < count; ++i) {
330 pos += offsetof(struct tpmt_ha, hash_alg);
331 if (log_position + pos + sizeof(u16) >= elog->log_size)
332 return 0;
333
334 algo = get_unaligned_le16(log + pos);
335 pos += offsetof(struct tpmt_ha, digest);
336 switch (algo) {
337 case TPM2_ALG_SHA1:
338 case TPM2_ALG_SHA256:
339 case TPM2_ALG_SHA384:
340 case TPM2_ALG_SHA512:
341 len = tpm2_algorithm_to_len(algo);
342 break;
343 default:
344 return 0;
345 }
346
347 if (digest_list->count) {
348 if (algo != digest_list->digests[i].hash_alg ||
349 log_position + pos + len >= elog->log_size)
350 return 0;
351
352 memcpy(digest_list->digests[i].digest.sha512,
353 log + pos, len);
354 }
355
356 pos += len;
357 }
358
359 if (log_position + pos + sizeof(u32) >= elog->log_size)
360 return 0;
361
362 event_size = get_unaligned_le32(log + pos);
363 pos += event_size + sizeof(u32);
364 if (log_position + pos > elog->log_size)
365 return 0;
366
367 if (digest_list->count) {
368 rc = tcg2_pcr_extend(dev, pcr, digest_list);
369 if (rc)
370 return rc;
371 }
372
373 log_position += pos;
374 }
375
376 elog->log_position = log_position;
377 elog->found = true;
378 return 0;
379}
380
381static int tcg2_log_parse(struct udevice *dev, struct tcg2_event_log *elog)
382{
383 struct tpml_digest_values digest_list;
384 struct tcg_efi_spec_id_event *event;
385 struct tcg_pcr_event *log;
386 u32 log_active;
387 u32 calc_size;
388 u32 active;
389 u32 count;
390 u32 evsz;
391 u32 mask;
392 u16 algo;
393 u16 len;
394 int rc;
395 u32 i;
396 u16 j;
397
398 if (elog->log_size <= offsetof(struct tcg_pcr_event, event))
399 return 0;
400
401 log = (struct tcg_pcr_event *)elog->log;
402 if (get_unaligned_le32(&log->pcr_index) != 0 ||
403 get_unaligned_le32(&log->event_type) != EV_NO_ACTION)
404 return 0;
405
406 for (i = 0; i < sizeof(log->digest); i++) {
407 if (log->digest[i])
408 return 0;
409 }
410
411 evsz = get_unaligned_le32(&log->event_size);
412 if (evsz < offsetof(struct tcg_efi_spec_id_event, digest_sizes) ||
413 evsz + offsetof(struct tcg_pcr_event, event) > elog->log_size)
414 return 0;
415
416 event = (struct tcg_efi_spec_id_event *)log->event;
417 if (memcmp(event->signature, TCG_EFI_SPEC_ID_EVENT_SIGNATURE_03,
418 sizeof(TCG_EFI_SPEC_ID_EVENT_SIGNATURE_03)))
419 return 0;
420
421 if (event->spec_version_minor != TCG_EFI_SPEC_ID_EVENT_SPEC_VERSION_MINOR_TPM2 ||
422 event->spec_version_major != TCG_EFI_SPEC_ID_EVENT_SPEC_VERSION_MAJOR_TPM2)
423 return 0;
424
425 count = get_unaligned_le32(&event->number_of_algorithms);
426 if (count > ARRAY_SIZE(hash_algo_list))
427 return 0;
428
429 calc_size = offsetof(struct tcg_efi_spec_id_event, digest_sizes) +
430 (sizeof(struct tcg_efi_spec_id_event_algorithm_size) * count) +
431 1;
432 if (evsz != calc_size)
433 return 0;
434
435 rc = tcg2_get_active_pcr_banks(dev, &active);
436 if (rc)
437 return rc;
438
439 digest_list.count = 0;
440 log_active = 0;
441
442 for (i = 0; i < count; ++i) {
443 algo = get_unaligned_le16(&event->digest_sizes[i].algorithm_id);
444 mask = tcg2_algorithm_to_mask(algo);
445
446 if (!(active & mask))
447 return 0;
448
449 switch (algo) {
450 case TPM2_ALG_SHA1:
451 case TPM2_ALG_SHA256:
452 case TPM2_ALG_SHA384:
453 case TPM2_ALG_SHA512:
454 len = get_unaligned_le16(&event->digest_sizes[i].digest_size);
455 if (tpm2_algorithm_to_len(algo) != len)
456 return 0;
457 digest_list.digests[digest_list.count++].hash_alg = algo;
458 break;
459 default:
460 return 0;
461 }
462
463 log_active |= mask;
464 }
465
466 /* Ensure the previous firmware extended all the PCRs. */
467 if (log_active != active)
468 return 0;
469
470 /* Read PCR0 to check if previous firmware extended the PCRs or not. */
471 rc = tcg2_pcr_read(dev, 0, &digest_list);
472 if (rc)
473 return rc;
474
475 for (i = 0; i < digest_list.count; ++i) {
476 len = tpm2_algorithm_to_len(digest_list.digests[i].hash_alg);
477 for (j = 0; j < len; ++j) {
478 if (digest_list.digests[i].digest.sha512[j])
479 break;
480 }
481
482 /* PCR is non-zero; it has been extended, so skip extending. */
483 if (j != len) {
484 digest_list.count = 0;
485 break;
486 }
487 }
488
489 return tcg2_replay_eventlog(elog, dev, &digest_list,
490 offsetof(struct tcg_pcr_event, event) +
491 evsz);
492}
493
494int tcg2_pcr_extend(struct udevice *dev, u32 pcr_index,
495 struct tpml_digest_values *digest_list)
496{
497 u32 rc;
498 u32 i;
499
500 for (i = 0; i < digest_list->count; i++) {
501 u32 alg = digest_list->digests[i].hash_alg;
502
503 rc = tpm2_pcr_extend(dev, pcr_index, alg,
504 (u8 *)&digest_list->digests[i].digest,
505 tpm2_algorithm_to_len(alg));
506 if (rc) {
507 printf("%s: error pcr:%u alg:%08x\n", __func__,
508 pcr_index, alg);
509 return rc;
510 }
511 }
512
513 return 0;
514}
515
516int tcg2_pcr_read(struct udevice *dev, u32 pcr_index,
517 struct tpml_digest_values *digest_list)
518{
519 struct tpm_chip_priv *priv;
520 u32 rc;
521 u32 i;
522
523 priv = dev_get_uclass_priv(dev);
524 if (!priv)
525 return -ENODEV;
526
527 for (i = 0; i < digest_list->count; i++) {
528 u32 alg = digest_list->digests[i].hash_alg;
529 u8 *digest = (u8 *)&digest_list->digests[i].digest;
530
531 rc = tpm2_pcr_read(dev, pcr_index, priv->pcr_select_min, alg,
532 digest, tpm2_algorithm_to_len(alg), NULL);
533 if (rc) {
534 printf("%s: error pcr:%u alg:%08x\n", __func__,
535 pcr_index, alg);
536 return rc;
537 }
538 }
539
540 return 0;
541}
542
543int tcg2_measure_data(struct udevice *dev, struct tcg2_event_log *elog,
544 u32 pcr_index, u32 size, const u8 *data, u32 event_type,
545 u32 event_size, const u8 *event)
546{
547 struct tpml_digest_values digest_list;
548 int rc;
549
550 if (data)
551 rc = tcg2_create_digest(dev, data, size, &digest_list);
552 else
553 rc = tcg2_create_digest(dev, event, event_size, &digest_list);
554 if (rc)
555 return rc;
556
557 rc = tcg2_pcr_extend(dev, pcr_index, &digest_list);
558 if (rc)
559 return rc;
560
561 return tcg2_log_append_check(elog, pcr_index, event_type, &digest_list,
562 event_size, event);
563}
564
565int tcg2_log_prepare_buffer(struct udevice *dev, struct tcg2_event_log *elog,
566 bool ignore_existing_log)
567{
568 struct tcg2_event_log log;
569 int rc;
570
571 elog->log_position = 0;
572 elog->found = false;
573
574 rc = tcg2_platform_get_log(dev, (void **)&log.log, &log.log_size);
575 if (!rc) {
576 log.log_position = 0;
577 log.found = false;
578
579 if (!ignore_existing_log) {
580 rc = tcg2_log_parse(dev, &log);
581 if (rc)
582 return rc;
583 }
584
585 if (elog->log_size) {
586 if (log.found) {
587 if (elog->log_size < log.log_position)
588 return -ENOBUFS;
589
590 /*
591 * Copy the discovered log into the user buffer
592 * if there's enough space.
593 */
594 memcpy(elog->log, log.log, log.log_position);
595 }
596
597 unmap_physmem(log.log, MAP_NOCACHE);
598 } else {
599 elog->log = log.log;
600 elog->log_size = log.log_size;
601 }
602
603 elog->log_position = log.log_position;
604 elog->found = log.found;
605 }
606
607 /*
608 * Initialize the log buffer if no log was discovered and the buffer is
609 * valid. User's can pass in their own buffer as a fallback if no
610 * memory region is found.
611 */
612 if (!elog->found && elog->log_size)
613 rc = tcg2_log_init(dev, elog);
614
615 return rc;
616}
617
618int tcg2_measurement_init(struct udevice **dev, struct tcg2_event_log *elog,
619 bool ignore_existing_log)
620{
621 int rc;
622
623 rc = tcg2_platform_get_tpm2(dev);
624 if (rc)
625 return rc;
626
627 rc = tpm_auto_start(*dev);
628 if (rc)
629 return rc;
630
631 rc = tcg2_log_prepare_buffer(*dev, elog, ignore_existing_log);
632 if (rc) {
633 tcg2_measurement_term(*dev, elog, true);
634 return rc;
635 }
636
637 rc = tcg2_measure_event(*dev, elog, 0, EV_S_CRTM_VERSION,
638 strlen(version_string) + 1,
639 (u8 *)version_string);
640 if (rc) {
641 tcg2_measurement_term(*dev, elog, true);
642 return rc;
643 }
644
645 return 0;
646}
647
648void tcg2_measurement_term(struct udevice *dev, struct tcg2_event_log *elog,
649 bool error)
650{
651 u32 event = error ? 0x1 : 0xffffffff;
652 int i;
653
654 for (i = 0; i < 8; ++i)
655 tcg2_measure_event(dev, elog, i, EV_SEPARATOR, sizeof(event),
656 (const u8 *)&event);
657
658 if (elog->log)
659 unmap_physmem(elog->log, MAP_NOCACHE);
660}
661
662__weak int tcg2_platform_get_log(struct udevice *dev, void **addr, u32 *size)
663{
664 const __be32 *addr_prop;
665 const __be32 *size_prop;
666 int asize;
667 int ssize;
668
669 *addr = NULL;
670 *size = 0;
671
672 addr_prop = dev_read_prop(dev, "tpm_event_log_addr", &asize);
673 if (!addr_prop)
674 addr_prop = dev_read_prop(dev, "linux,sml-base", &asize);
675
676 size_prop = dev_read_prop(dev, "tpm_event_log_size", &ssize);
677 if (!size_prop)
678 size_prop = dev_read_prop(dev, "linux,sml-size", &ssize);
679
680 if (addr_prop && size_prop) {
681 u64 a = of_read_number(addr_prop, asize / sizeof(__be32));
682 u64 s = of_read_number(size_prop, ssize / sizeof(__be32));
683
684 *addr = map_physmem(a, s, MAP_NOCACHE);
685 *size = (u32)s;
686 } else {
687 struct ofnode_phandle_args args;
688 phys_addr_t a;
689 fdt_size_t s;
690
691 if (dev_read_phandle_with_args(dev, "memory-region", NULL, 0,
692 0, &args))
693 return -ENODEV;
694
695 a = ofnode_get_addr_size(args.node, "reg", &s);
696 if (a == FDT_ADDR_T_NONE)
697 return -ENOMEM;
698
699 *addr = map_physmem(a, s, MAP_NOCACHE);
700 *size = (u32)s;
701 }
702
703 return 0;
704}
705
706__weak int tcg2_platform_get_tpm2(struct udevice **dev)
707{
708 for_each_tpm_device(*dev) {
709 if (tpm_get_version(*dev) == TPM_V2)
710 return 0;
711 }
712
713 return -ENODEV;
714}
715
716u32 tcg2_algorithm_to_mask(enum tpm2_algorithms algo)
717{
718 size_t i;
719
720 for (i = 0; i < ARRAY_SIZE(hash_algo_list); i++) {
721 if (hash_algo_list[i].hash_alg == algo)
722 return hash_algo_list[i].hash_mask;
723 }
724
725 return 0;
726}
727
728__weak void tcg2_platform_startup_error(struct udevice *dev, int rc) {}