blob: 7f868cc883748745bce042bac7a27e51c27aa37c [file] [log] [blame]
Ilias Apalodimas95537a62024-06-23 14:48:15 +03001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (c) 2023 Linaro Limited
4 */
5
6#include <dm.h>
7#include <dm/of_access.h>
8#include <tpm_api.h>
9#include <tpm-common.h>
10#include <tpm-v2.h>
11#include <tpm_tcg2.h>
12#include <u-boot/sha1.h>
13#include <u-boot/sha256.h>
14#include <u-boot/sha512.h>
15#include <version_string.h>
16#include <asm/io.h>
17#include <linux/bitops.h>
18#include <linux/unaligned/be_byteshift.h>
19#include <linux/unaligned/generic.h>
20#include <linux/unaligned/le_byteshift.h>
21#include "tpm-utils.h"
22
Ilias Apalodimascb356612024-06-23 14:48:17 +030023int tcg2_get_pcr_info(struct udevice *dev, u32 *supported_pcr, u32 *active_pcr,
24 u32 *pcr_banks)
25{
26 u8 response[(sizeof(struct tpms_capability_data) -
27 offsetof(struct tpms_capability_data, data))];
28 struct tpml_pcr_selection pcrs;
29 size_t i;
30 u32 ret;
31
32 *supported_pcr = 0;
33 *active_pcr = 0;
34 *pcr_banks = 0;
35 memset(response, 0, sizeof(response));
36
37 ret = tpm2_get_pcr_info(dev, &pcrs);
38 if (ret)
39 return ret;
40
41 for (i = 0; i < pcrs.count; i++) {
42 u32 hash_mask = tcg2_algorithm_to_mask(pcrs.selection[i].hash);
43
44 if (hash_mask) {
45 *supported_pcr |= hash_mask;
46 if (tpm2_is_active_pcr(&pcrs.selection[i]))
47 *active_pcr |= hash_mask;
48 } else {
49 printf("%s: unknown algorithm %x\n", __func__,
50 pcrs.selection[i].hash);
51 }
52 }
53
54 *pcr_banks = pcrs.count;
55
56 return 0;
57}
58
Ilias Apalodimas95537a62024-06-23 14:48:15 +030059int tcg2_get_active_pcr_banks(struct udevice *dev, u32 *active_pcr_banks)
60{
61 u32 supported = 0;
62 u32 pcr_banks = 0;
63 u32 active = 0;
64 int rc;
65
Ilias Apalodimascb356612024-06-23 14:48:17 +030066 rc = tcg2_get_pcr_info(dev, &supported, &active, &pcr_banks);
Ilias Apalodimas95537a62024-06-23 14:48:15 +030067 if (rc)
68 return rc;
69
70 *active_pcr_banks = active;
71
72 return 0;
73}
74
75u32 tcg2_event_get_size(struct tpml_digest_values *digest_list)
76{
77 u32 len;
78 size_t i;
79
80 len = offsetof(struct tcg_pcr_event2, digests);
81 len += offsetof(struct tpml_digest_values, digests);
82 for (i = 0; i < digest_list->count; ++i) {
83 u16 l = tpm2_algorithm_to_len(digest_list->digests[i].hash_alg);
84
85 if (!l)
86 continue;
87
88 len += l + offsetof(struct tpmt_ha, digest);
89 }
90 len += sizeof(u32);
91
92 return len;
93}
94
95int tcg2_create_digest(struct udevice *dev, const u8 *input, u32 length,
96 struct tpml_digest_values *digest_list)
97{
98 u8 final[sizeof(union tpmu_ha)];
99 sha256_context ctx_256;
100 sha512_context ctx_512;
101 sha1_context ctx;
102 u32 active;
103 size_t i;
104 u32 len;
105 int rc;
106
107 rc = tcg2_get_active_pcr_banks(dev, &active);
108 if (rc)
109 return rc;
110
111 digest_list->count = 0;
112 for (i = 0; i < ARRAY_SIZE(hash_algo_list); ++i) {
113 if (!(active & hash_algo_list[i].hash_mask))
114 continue;
115
116 switch (hash_algo_list[i].hash_alg) {
117 case TPM2_ALG_SHA1:
118 sha1_starts(&ctx);
119 sha1_update(&ctx, input, length);
120 sha1_finish(&ctx, final);
121 len = TPM2_SHA1_DIGEST_SIZE;
122 break;
123 case TPM2_ALG_SHA256:
124 sha256_starts(&ctx_256);
125 sha256_update(&ctx_256, input, length);
126 sha256_finish(&ctx_256, final);
127 len = TPM2_SHA256_DIGEST_SIZE;
128 break;
129 case TPM2_ALG_SHA384:
130 sha384_starts(&ctx_512);
131 sha384_update(&ctx_512, input, length);
132 sha384_finish(&ctx_512, final);
133 len = TPM2_SHA384_DIGEST_SIZE;
134 break;
135 case TPM2_ALG_SHA512:
136 sha512_starts(&ctx_512);
137 sha512_update(&ctx_512, input, length);
138 sha512_finish(&ctx_512, final);
139 len = TPM2_SHA512_DIGEST_SIZE;
140 break;
141 default:
142 printf("%s: unsupported algorithm %x\n", __func__,
143 hash_algo_list[i].hash_alg);
144 continue;
145 }
146
147 digest_list->digests[digest_list->count].hash_alg =
148 hash_algo_list[i].hash_alg;
149 memcpy(&digest_list->digests[digest_list->count].digest, final,
150 len);
151 digest_list->count++;
152 }
153
154 return 0;
155}
156
157void tcg2_log_append(u32 pcr_index, u32 event_type,
158 struct tpml_digest_values *digest_list, u32 size,
159 const u8 *event, u8 *log)
160{
161 size_t len;
162 size_t pos;
163 u32 i;
164
165 pos = offsetof(struct tcg_pcr_event2, pcr_index);
166 put_unaligned_le32(pcr_index, log);
167 pos = offsetof(struct tcg_pcr_event2, event_type);
168 put_unaligned_le32(event_type, log + pos);
169 pos = offsetof(struct tcg_pcr_event2, digests) +
170 offsetof(struct tpml_digest_values, count);
171 put_unaligned_le32(digest_list->count, log + pos);
172
173 pos = offsetof(struct tcg_pcr_event2, digests) +
174 offsetof(struct tpml_digest_values, digests);
175 for (i = 0; i < digest_list->count; ++i) {
176 u16 hash_alg = digest_list->digests[i].hash_alg;
177
178 len = tpm2_algorithm_to_len(hash_alg);
179 if (!len)
180 continue;
181
182 pos += offsetof(struct tpmt_ha, hash_alg);
183 put_unaligned_le16(hash_alg, log + pos);
184 pos += offsetof(struct tpmt_ha, digest);
185 memcpy(log + pos, (u8 *)&digest_list->digests[i].digest, len);
186 pos += len;
187 }
188
189 put_unaligned_le32(size, log + pos);
190 pos += sizeof(u32);
191 memcpy(log + pos, event, size);
192}
193
194static int tcg2_log_append_check(struct tcg2_event_log *elog, u32 pcr_index,
195 u32 event_type,
196 struct tpml_digest_values *digest_list,
197 u32 size, const u8 *event)
198{
199 u32 event_size;
200 u8 *log;
201
202 event_size = size + tcg2_event_get_size(digest_list);
203 if (elog->log_position + event_size > elog->log_size) {
204 printf("%s: log too large: %u + %u > %u\n", __func__,
205 elog->log_position, event_size, elog->log_size);
206 return -ENOBUFS;
207 }
208
209 log = elog->log + elog->log_position;
210 elog->log_position += event_size;
211
212 tcg2_log_append(pcr_index, event_type, digest_list, size, event, log);
213
214 return 0;
215}
216
217static int tcg2_log_init(struct udevice *dev, struct tcg2_event_log *elog)
218{
219 struct tcg_efi_spec_id_event *ev;
220 struct tcg_pcr_event *log;
221 u32 event_size;
222 u32 count = 0;
223 u32 log_size;
224 u32 active;
225 size_t i;
226 u16 len;
227 int rc;
228
229 rc = tcg2_get_active_pcr_banks(dev, &active);
230 if (rc)
231 return rc;
232
233 event_size = offsetof(struct tcg_efi_spec_id_event, digest_sizes);
234 for (i = 0; i < ARRAY_SIZE(hash_algo_list); ++i) {
235 if (!(active & hash_algo_list[i].hash_mask))
236 continue;
237
238 switch (hash_algo_list[i].hash_alg) {
239 case TPM2_ALG_SHA1:
240 case TPM2_ALG_SHA256:
241 case TPM2_ALG_SHA384:
242 case TPM2_ALG_SHA512:
243 count++;
244 break;
245 default:
246 continue;
247 }
248 }
249
250 event_size += 1 +
251 (sizeof(struct tcg_efi_spec_id_event_algorithm_size) * count);
252 log_size = offsetof(struct tcg_pcr_event, event) + event_size;
253
254 if (log_size > elog->log_size) {
255 printf("%s: log too large: %u > %u\n", __func__, log_size,
256 elog->log_size);
257 return -ENOBUFS;
258 }
259
260 log = (struct tcg_pcr_event *)elog->log;
261 put_unaligned_le32(0, &log->pcr_index);
262 put_unaligned_le32(EV_NO_ACTION, &log->event_type);
263 memset(&log->digest, 0, sizeof(log->digest));
264 put_unaligned_le32(event_size, &log->event_size);
265
266 ev = (struct tcg_efi_spec_id_event *)log->event;
267 strlcpy((char *)ev->signature, TCG_EFI_SPEC_ID_EVENT_SIGNATURE_03,
268 sizeof(ev->signature));
269 put_unaligned_le32(0, &ev->platform_class);
270 ev->spec_version_minor = TCG_EFI_SPEC_ID_EVENT_SPEC_VERSION_MINOR_TPM2;
271 ev->spec_version_major = TCG_EFI_SPEC_ID_EVENT_SPEC_VERSION_MAJOR_TPM2;
272 ev->spec_errata = TCG_EFI_SPEC_ID_EVENT_SPEC_VERSION_ERRATA_TPM2;
273 ev->uintn_size = sizeof(size_t) / sizeof(u32);
274 put_unaligned_le32(count, &ev->number_of_algorithms);
275
276 count = 0;
277 for (i = 0; i < ARRAY_SIZE(hash_algo_list); ++i) {
278 if (!(active & hash_algo_list[i].hash_mask))
279 continue;
280
281 len = hash_algo_list[i].hash_len;
282 if (!len)
283 continue;
284
285 put_unaligned_le16(hash_algo_list[i].hash_alg,
286 &ev->digest_sizes[count].algorithm_id);
287 put_unaligned_le16(len, &ev->digest_sizes[count].digest_size);
288 count++;
289 }
290
291 *((u8 *)ev + (event_size - 1)) = 0;
292 elog->log_position = log_size;
293
294 return 0;
295}
296
297static int tcg2_replay_eventlog(struct tcg2_event_log *elog,
298 struct udevice *dev,
299 struct tpml_digest_values *digest_list,
300 u32 log_position)
301{
302 const u32 offset = offsetof(struct tcg_pcr_event2, digests) +
303 offsetof(struct tpml_digest_values, digests);
304 u32 event_size;
305 u32 count;
306 u16 algo;
307 u32 pcr;
308 u32 pos;
309 u16 len;
310 u8 *log;
311 int rc;
312 u32 i;
313
314 while (log_position + offset < elog->log_size) {
315 log = elog->log + log_position;
316
317 pos = offsetof(struct tcg_pcr_event2, pcr_index);
318 pcr = get_unaligned_le32(log + pos);
319 pos = offsetof(struct tcg_pcr_event2, event_type);
320 if (!get_unaligned_le32(log + pos))
321 return 0;
322
323 pos = offsetof(struct tcg_pcr_event2, digests) +
324 offsetof(struct tpml_digest_values, count);
325 count = get_unaligned_le32(log + pos);
326 if (count > ARRAY_SIZE(hash_algo_list) ||
327 (digest_list->count && digest_list->count != count))
328 return 0;
329
330 pos = offsetof(struct tcg_pcr_event2, digests) +
331 offsetof(struct tpml_digest_values, digests);
332 for (i = 0; i < count; ++i) {
333 pos += offsetof(struct tpmt_ha, hash_alg);
334 if (log_position + pos + sizeof(u16) >= elog->log_size)
335 return 0;
336
337 algo = get_unaligned_le16(log + pos);
338 pos += offsetof(struct tpmt_ha, digest);
339 switch (algo) {
340 case TPM2_ALG_SHA1:
341 case TPM2_ALG_SHA256:
342 case TPM2_ALG_SHA384:
343 case TPM2_ALG_SHA512:
344 len = tpm2_algorithm_to_len(algo);
345 break;
346 default:
347 return 0;
348 }
349
350 if (digest_list->count) {
351 if (algo != digest_list->digests[i].hash_alg ||
352 log_position + pos + len >= elog->log_size)
353 return 0;
354
355 memcpy(digest_list->digests[i].digest.sha512,
356 log + pos, len);
357 }
358
359 pos += len;
360 }
361
362 if (log_position + pos + sizeof(u32) >= elog->log_size)
363 return 0;
364
365 event_size = get_unaligned_le32(log + pos);
366 pos += event_size + sizeof(u32);
367 if (log_position + pos > elog->log_size)
368 return 0;
369
370 if (digest_list->count) {
371 rc = tcg2_pcr_extend(dev, pcr, digest_list);
372 if (rc)
373 return rc;
374 }
375
376 log_position += pos;
377 }
378
379 elog->log_position = log_position;
380 elog->found = true;
381 return 0;
382}
383
384static int tcg2_log_parse(struct udevice *dev, struct tcg2_event_log *elog)
385{
386 struct tpml_digest_values digest_list;
387 struct tcg_efi_spec_id_event *event;
388 struct tcg_pcr_event *log;
389 u32 log_active;
390 u32 calc_size;
391 u32 active;
392 u32 count;
393 u32 evsz;
394 u32 mask;
395 u16 algo;
396 u16 len;
397 int rc;
398 u32 i;
399 u16 j;
400
401 if (elog->log_size <= offsetof(struct tcg_pcr_event, event))
402 return 0;
403
404 log = (struct tcg_pcr_event *)elog->log;
405 if (get_unaligned_le32(&log->pcr_index) != 0 ||
406 get_unaligned_le32(&log->event_type) != EV_NO_ACTION)
407 return 0;
408
409 for (i = 0; i < sizeof(log->digest); i++) {
410 if (log->digest[i])
411 return 0;
412 }
413
414 evsz = get_unaligned_le32(&log->event_size);
415 if (evsz < offsetof(struct tcg_efi_spec_id_event, digest_sizes) ||
416 evsz + offsetof(struct tcg_pcr_event, event) > elog->log_size)
417 return 0;
418
419 event = (struct tcg_efi_spec_id_event *)log->event;
420 if (memcmp(event->signature, TCG_EFI_SPEC_ID_EVENT_SIGNATURE_03,
421 sizeof(TCG_EFI_SPEC_ID_EVENT_SIGNATURE_03)))
422 return 0;
423
424 if (event->spec_version_minor != TCG_EFI_SPEC_ID_EVENT_SPEC_VERSION_MINOR_TPM2 ||
425 event->spec_version_major != TCG_EFI_SPEC_ID_EVENT_SPEC_VERSION_MAJOR_TPM2)
426 return 0;
427
428 count = get_unaligned_le32(&event->number_of_algorithms);
429 if (count > ARRAY_SIZE(hash_algo_list))
430 return 0;
431
432 calc_size = offsetof(struct tcg_efi_spec_id_event, digest_sizes) +
433 (sizeof(struct tcg_efi_spec_id_event_algorithm_size) * count) +
434 1;
435 if (evsz != calc_size)
436 return 0;
437
438 rc = tcg2_get_active_pcr_banks(dev, &active);
439 if (rc)
440 return rc;
441
442 digest_list.count = 0;
443 log_active = 0;
444
445 for (i = 0; i < count; ++i) {
446 algo = get_unaligned_le16(&event->digest_sizes[i].algorithm_id);
447 mask = tcg2_algorithm_to_mask(algo);
448
449 if (!(active & mask))
450 return 0;
451
452 switch (algo) {
453 case TPM2_ALG_SHA1:
454 case TPM2_ALG_SHA256:
455 case TPM2_ALG_SHA384:
456 case TPM2_ALG_SHA512:
457 len = get_unaligned_le16(&event->digest_sizes[i].digest_size);
458 if (tpm2_algorithm_to_len(algo) != len)
459 return 0;
460 digest_list.digests[digest_list.count++].hash_alg = algo;
461 break;
462 default:
463 return 0;
464 }
465
466 log_active |= mask;
467 }
468
469 /* Ensure the previous firmware extended all the PCRs. */
470 if (log_active != active)
471 return 0;
472
473 /* Read PCR0 to check if previous firmware extended the PCRs or not. */
474 rc = tcg2_pcr_read(dev, 0, &digest_list);
475 if (rc)
476 return rc;
477
478 for (i = 0; i < digest_list.count; ++i) {
479 len = tpm2_algorithm_to_len(digest_list.digests[i].hash_alg);
480 for (j = 0; j < len; ++j) {
481 if (digest_list.digests[i].digest.sha512[j])
482 break;
483 }
484
485 /* PCR is non-zero; it has been extended, so skip extending. */
486 if (j != len) {
487 digest_list.count = 0;
488 break;
489 }
490 }
491
492 return tcg2_replay_eventlog(elog, dev, &digest_list,
493 offsetof(struct tcg_pcr_event, event) +
494 evsz);
495}
496
497int tcg2_pcr_extend(struct udevice *dev, u32 pcr_index,
498 struct tpml_digest_values *digest_list)
499{
500 u32 rc;
501 u32 i;
502
503 for (i = 0; i < digest_list->count; i++) {
504 u32 alg = digest_list->digests[i].hash_alg;
505
506 rc = tpm2_pcr_extend(dev, pcr_index, alg,
507 (u8 *)&digest_list->digests[i].digest,
508 tpm2_algorithm_to_len(alg));
509 if (rc) {
510 printf("%s: error pcr:%u alg:%08x\n", __func__,
511 pcr_index, alg);
512 return rc;
513 }
514 }
515
516 return 0;
517}
518
519int tcg2_pcr_read(struct udevice *dev, u32 pcr_index,
520 struct tpml_digest_values *digest_list)
521{
522 struct tpm_chip_priv *priv;
523 u32 rc;
524 u32 i;
525
526 priv = dev_get_uclass_priv(dev);
527 if (!priv)
528 return -ENODEV;
529
530 for (i = 0; i < digest_list->count; i++) {
531 u32 alg = digest_list->digests[i].hash_alg;
532 u8 *digest = (u8 *)&digest_list->digests[i].digest;
533
534 rc = tpm2_pcr_read(dev, pcr_index, priv->pcr_select_min, alg,
535 digest, tpm2_algorithm_to_len(alg), NULL);
536 if (rc) {
537 printf("%s: error pcr:%u alg:%08x\n", __func__,
538 pcr_index, alg);
539 return rc;
540 }
541 }
542
543 return 0;
544}
545
546int tcg2_measure_data(struct udevice *dev, struct tcg2_event_log *elog,
547 u32 pcr_index, u32 size, const u8 *data, u32 event_type,
548 u32 event_size, const u8 *event)
549{
550 struct tpml_digest_values digest_list;
551 int rc;
552
553 if (data)
554 rc = tcg2_create_digest(dev, data, size, &digest_list);
555 else
556 rc = tcg2_create_digest(dev, event, event_size, &digest_list);
557 if (rc)
558 return rc;
559
560 rc = tcg2_pcr_extend(dev, pcr_index, &digest_list);
561 if (rc)
562 return rc;
563
564 return tcg2_log_append_check(elog, pcr_index, event_type, &digest_list,
565 event_size, event);
566}
567
568int tcg2_log_prepare_buffer(struct udevice *dev, struct tcg2_event_log *elog,
569 bool ignore_existing_log)
570{
571 struct tcg2_event_log log;
572 int rc;
573
574 elog->log_position = 0;
575 elog->found = false;
576
577 rc = tcg2_platform_get_log(dev, (void **)&log.log, &log.log_size);
578 if (!rc) {
579 log.log_position = 0;
580 log.found = false;
581
582 if (!ignore_existing_log) {
583 rc = tcg2_log_parse(dev, &log);
584 if (rc)
585 return rc;
586 }
587
588 if (elog->log_size) {
589 if (log.found) {
590 if (elog->log_size < log.log_position)
591 return -ENOBUFS;
592
593 /*
594 * Copy the discovered log into the user buffer
595 * if there's enough space.
596 */
597 memcpy(elog->log, log.log, log.log_position);
598 }
599
600 unmap_physmem(log.log, MAP_NOCACHE);
601 } else {
602 elog->log = log.log;
603 elog->log_size = log.log_size;
604 }
605
606 elog->log_position = log.log_position;
607 elog->found = log.found;
608 }
609
610 /*
611 * Initialize the log buffer if no log was discovered and the buffer is
612 * valid. User's can pass in their own buffer as a fallback if no
613 * memory region is found.
614 */
615 if (!elog->found && elog->log_size)
616 rc = tcg2_log_init(dev, elog);
617
618 return rc;
619}
620
621int tcg2_measurement_init(struct udevice **dev, struct tcg2_event_log *elog,
622 bool ignore_existing_log)
623{
624 int rc;
625
626 rc = tcg2_platform_get_tpm2(dev);
627 if (rc)
628 return rc;
629
630 rc = tpm_auto_start(*dev);
631 if (rc)
632 return rc;
633
634 rc = tcg2_log_prepare_buffer(*dev, elog, ignore_existing_log);
635 if (rc) {
636 tcg2_measurement_term(*dev, elog, true);
637 return rc;
638 }
639
640 rc = tcg2_measure_event(*dev, elog, 0, EV_S_CRTM_VERSION,
641 strlen(version_string) + 1,
642 (u8 *)version_string);
643 if (rc) {
644 tcg2_measurement_term(*dev, elog, true);
645 return rc;
646 }
647
648 return 0;
649}
650
651void tcg2_measurement_term(struct udevice *dev, struct tcg2_event_log *elog,
652 bool error)
653{
654 u32 event = error ? 0x1 : 0xffffffff;
655 int i;
656
657 for (i = 0; i < 8; ++i)
658 tcg2_measure_event(dev, elog, i, EV_SEPARATOR, sizeof(event),
659 (const u8 *)&event);
660
661 if (elog->log)
662 unmap_physmem(elog->log, MAP_NOCACHE);
663}
664
665__weak int tcg2_platform_get_log(struct udevice *dev, void **addr, u32 *size)
666{
667 const __be32 *addr_prop;
668 const __be32 *size_prop;
669 int asize;
670 int ssize;
671
672 *addr = NULL;
673 *size = 0;
674
675 addr_prop = dev_read_prop(dev, "tpm_event_log_addr", &asize);
676 if (!addr_prop)
677 addr_prop = dev_read_prop(dev, "linux,sml-base", &asize);
678
679 size_prop = dev_read_prop(dev, "tpm_event_log_size", &ssize);
680 if (!size_prop)
681 size_prop = dev_read_prop(dev, "linux,sml-size", &ssize);
682
683 if (addr_prop && size_prop) {
684 u64 a = of_read_number(addr_prop, asize / sizeof(__be32));
685 u64 s = of_read_number(size_prop, ssize / sizeof(__be32));
686
687 *addr = map_physmem(a, s, MAP_NOCACHE);
688 *size = (u32)s;
689 } else {
690 struct ofnode_phandle_args args;
691 phys_addr_t a;
692 fdt_size_t s;
693
694 if (dev_read_phandle_with_args(dev, "memory-region", NULL, 0,
695 0, &args))
696 return -ENODEV;
697
698 a = ofnode_get_addr_size(args.node, "reg", &s);
699 if (a == FDT_ADDR_T_NONE)
700 return -ENOMEM;
701
702 *addr = map_physmem(a, s, MAP_NOCACHE);
703 *size = (u32)s;
704 }
705
706 return 0;
707}
708
709__weak int tcg2_platform_get_tpm2(struct udevice **dev)
710{
711 for_each_tpm_device(*dev) {
712 if (tpm_get_version(*dev) == TPM_V2)
713 return 0;
714 }
715
716 return -ENODEV;
717}
718
719u32 tcg2_algorithm_to_mask(enum tpm2_algorithms algo)
720{
721 size_t i;
722
723 for (i = 0; i < ARRAY_SIZE(hash_algo_list); i++) {
724 if (hash_algo_list[i].hash_alg == algo)
725 return hash_algo_list[i].hash_mask;
726 }
727
728 return 0;
729}
730
731__weak void tcg2_platform_startup_error(struct udevice *dev, int rc) {}