blob: a4e28d1da273d25aca95168fbaee416e16b0c080 [file] [log] [blame]
Andrew F. Davisa513b2a2018-05-04 19:06:09 +00001/*
2 * Texas Instruments System Control Interface Driver
3 * Based on Linux and U-Boot implementation
4 *
5 * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
6 *
7 * SPDX-License-Identifier: BSD-3-Clause
8 */
9
Andrew F. Davisa513b2a2018-05-04 19:06:09 +000010#include <errno.h>
Andrew F. Davisa513b2a2018-05-04 19:06:09 +000011#include <stdbool.h>
12#include <stddef.h>
13#include <string.h>
14
Antonio Nino Diaze0f90632018-12-14 00:18:21 +000015#include <platform_def.h>
16
17#include <common/debug.h>
Andrew F. Davisa513b2a2018-05-04 19:06:09 +000018#include <sec_proxy.h>
19
20#include "ti_sci_protocol.h"
21#include "ti_sci.h"
22
23/**
24 * struct ti_sci_desc - Description of SoC integration
25 * @host_id: Host identifier representing the compute entity
26 * @max_msg_size: Maximum size of data per message that can be handled
27 */
28struct ti_sci_desc {
29 uint8_t host_id;
30 int max_msg_size;
31};
32
33/**
34 * struct ti_sci_info - Structure representing a TI SCI instance
35 * @desc: SoC description for this instance
36 * @seq: Seq id used for verification for tx and rx message
37 */
38struct ti_sci_info {
39 const struct ti_sci_desc desc;
40 uint8_t seq;
41};
42
43static struct ti_sci_info info = {
44 .desc = {
45 .host_id = TI_SCI_HOST_ID,
46 .max_msg_size = TI_SCI_MAX_MESSAGE_SIZE,
47 },
48 .seq = 0x0a,
49};
50
51/**
52 * struct ti_sci_xfer - Structure representing a message flow
53 * @tx_message: Transmit message
54 * @rx_message: Receive message
55 */
56struct ti_sci_xfer {
57 struct k3_sec_proxy_msg tx_message;
58 struct k3_sec_proxy_msg rx_message;
59};
60
61/**
62 * ti_sci_setup_one_xfer() - Setup one message type
63 *
64 * @msg_type: Message type
65 * @msg_flags: Flag to set for the message
66 * @tx_buf: Buffer to be sent to mailbox channel
67 * @tx_message_size: transmit message size
68 * @rx_buf: Buffer to be received from mailbox channel
69 * @rx_message_size: receive message size
70 *
71 * Helper function which is used by various command functions that are
72 * exposed to clients of this driver for allocating a message traffic event.
73 *
74 * Return: 0 if all goes well, else appropriate error message
75 */
76static int ti_sci_setup_one_xfer(uint16_t msg_type, uint32_t msg_flags,
77 void *tx_buf,
78 size_t tx_message_size,
79 void *rx_buf,
80 size_t rx_message_size,
81 struct ti_sci_xfer *xfer)
82{
83 struct ti_sci_msg_hdr *hdr;
84
85 /* Ensure we have sane transfer sizes */
86 if (rx_message_size > info.desc.max_msg_size ||
87 tx_message_size > info.desc.max_msg_size ||
88 rx_message_size < sizeof(*hdr) ||
89 tx_message_size < sizeof(*hdr))
90 return -ERANGE;
91
92 info.seq++;
93
94 hdr = (struct ti_sci_msg_hdr *)tx_buf;
95 hdr->seq = info.seq;
96 hdr->type = msg_type;
97 hdr->host = info.desc.host_id;
98 hdr->flags = msg_flags;
99
100 xfer->tx_message.buf = tx_buf;
101 xfer->tx_message.len = tx_message_size;
102
103 xfer->rx_message.buf = rx_buf;
104 xfer->rx_message.len = rx_message_size;
105
106 return 0;
107}
108
109/**
110 * ti_sci_get_response() - Receive response from mailbox channel
111 *
112 * @xfer: Transfer to initiate and wait for response
113 * @chan: Channel to receive the response
114 *
115 * Return: 0 if all goes well, else appropriate error message
116 */
117static inline int ti_sci_get_response(struct ti_sci_xfer *xfer,
118 enum k3_sec_proxy_chan_id chan)
119{
120 struct k3_sec_proxy_msg *msg = &xfer->rx_message;
121 struct ti_sci_msg_hdr *hdr;
122 int ret;
123
124 /* Receive the response */
125 ret = k3_sec_proxy_recv(chan, msg);
126 if (ret) {
127 ERROR("Message receive failed (%d)\n", ret);
128 return ret;
129 }
130
131 /* msg is updated by Secure Proxy driver */
132 hdr = (struct ti_sci_msg_hdr *)msg->buf;
133
134 /* Sanity check for message response */
135 if (hdr->seq != info.seq) {
136 ERROR("Message for %d is not expected\n", hdr->seq);
137 return -EINVAL;
138 }
139
140 if (msg->len > info.desc.max_msg_size) {
141 ERROR("Unable to handle %lu xfer (max %d)\n",
142 msg->len, info.desc.max_msg_size);
143 return -EINVAL;
144 }
145
146 return 0;
147}
148
149/**
150 * ti_sci_do_xfer() - Do one transfer
151 *
152 * @xfer: Transfer to initiate and wait for response
153 *
154 * Return: 0 if all goes well, else appropriate error message
155 */
156static inline int ti_sci_do_xfer(struct ti_sci_xfer *xfer)
157{
158 struct k3_sec_proxy_msg *msg = &xfer->tx_message;
159 int ret;
160
Andrew F. Davis18d371d2019-01-04 12:49:16 -0600161 /* Clear any spurious messages in receive queue */
162 ret = k3_sec_proxy_clear_rx_thread(SP_RESPONSE);
163 if (ret) {
164 ERROR("Could not clear response queue (%d)\n", ret);
165 return ret;
166 }
167
Andrew F. Davisa513b2a2018-05-04 19:06:09 +0000168 /* Send the message */
169 ret = k3_sec_proxy_send(SP_HIGH_PRIORITY, msg);
170 if (ret) {
171 ERROR("Message sending failed (%d)\n", ret);
172 return ret;
173 }
174
Andrew F. Davis18d371d2019-01-04 12:49:16 -0600175 /* Get the response */
Andrew F. Davisa513b2a2018-05-04 19:06:09 +0000176 ret = ti_sci_get_response(xfer, SP_RESPONSE);
177 if (ret) {
178 ERROR("Failed to get response (%d)\n", ret);
179 return ret;
180 }
181
182 return 0;
183}
184
185/**
186 * ti_sci_get_revision() - Get the revision of the SCI entity
187 *
188 * Updates the SCI information in the internal data structure.
189 *
190 * Return: 0 if all goes well, else appropriate error message
191 */
192int ti_sci_get_revision(struct ti_sci_msg_resp_version *rev_info)
193{
194 struct ti_sci_msg_hdr hdr;
195 struct ti_sci_xfer xfer;
196 int ret;
197
198 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_VERSION, 0x0,
199 &hdr, sizeof(hdr),
200 rev_info, sizeof(*rev_info),
201 &xfer);
202 if (ret) {
203 ERROR("Message alloc failed (%d)\n", ret);
204 return ret;
205 }
206
207 ret = ti_sci_do_xfer(&xfer);
208 if (ret) {
209 ERROR("Transfer send failed (%d)\n", ret);
210 return ret;
211 }
212
213 return 0;
214}
215
216/**
Andrew F. Davis4f2a0552018-05-04 19:06:10 +0000217 * ti_sci_is_response_ack() - Generic ACK/NACK message check
218 *
219 * @r: pointer to response buffer
220 *
221 * Return: true if the response was an ACK, else returns false
222 */
223static inline bool ti_sci_is_response_ack(void *r)
224{
225 struct ti_sci_msg_hdr *hdr = r;
226
227 return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false;
228}
229
230/**
231 * ti_sci_device_set_state() - Set device state
232 *
233 * @id: Device identifier
234 * @flags: flags to setup for the device
235 * @state: State to move the device to
236 *
237 * Return: 0 if all goes well, else appropriate error message
238 */
Andrew F. Davis8335bc22019-02-11 12:55:25 -0600239static int ti_sci_device_set_state(uint32_t id, uint32_t flags, uint8_t state)
Andrew F. Davis4f2a0552018-05-04 19:06:10 +0000240{
241 struct ti_sci_msg_req_set_device_state req;
242 struct ti_sci_msg_hdr resp;
243
244 struct ti_sci_xfer xfer;
245 int ret;
246
247 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_DEVICE_STATE,
248 flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
249 &req, sizeof(req),
250 &resp, sizeof(resp),
251 &xfer);
252 if (ret) {
253 ERROR("Message alloc failed (%d)\n", ret);
254 return ret;
255 }
256
257 req.id = id;
258 req.state = state;
259
260 ret = ti_sci_do_xfer(&xfer);
261 if (ret) {
262 ERROR("Transfer send failed (%d)\n", ret);
263 return ret;
264 }
265
266 if (!ti_sci_is_response_ack(&resp))
267 return -ENODEV;
268
269 return 0;
270}
271
272/**
273 * ti_sci_device_get_state() - Get device state
274 *
275 * @id: Device Identifier
276 * @clcnt: Pointer to Context Loss Count
277 * @resets: pointer to resets
278 * @p_state: pointer to p_state
279 * @c_state: pointer to c_state
280 *
281 * Return: 0 if all goes well, else appropriate error message
282 */
Andrew F. Davis8335bc22019-02-11 12:55:25 -0600283static int ti_sci_device_get_state(uint32_t id, uint32_t *clcnt,
284 uint32_t *resets, uint8_t *p_state,
285 uint8_t *c_state)
Andrew F. Davis4f2a0552018-05-04 19:06:10 +0000286{
287 struct ti_sci_msg_req_get_device_state req;
288 struct ti_sci_msg_resp_get_device_state resp;
289
290 struct ti_sci_xfer xfer;
291 int ret;
292
293 if (!clcnt && !resets && !p_state && !c_state)
294 return -EINVAL;
295
296 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_GET_DEVICE_STATE, 0,
297 &req, sizeof(req),
298 &resp, sizeof(resp),
299 &xfer);
300 if (ret) {
301 ERROR("Message alloc failed (%d)\n", ret);
302 return ret;
303 }
304
305 req.id = id;
306
307 ret = ti_sci_do_xfer(&xfer);
308 if (ret) {
309 ERROR("Transfer send failed (%d)\n", ret);
310 return ret;
311 }
312
313 if (!ti_sci_is_response_ack(&resp))
314 return -ENODEV;
315
316 if (clcnt)
317 *clcnt = resp.context_loss_count;
318 if (resets)
319 *resets = resp.resets;
320 if (p_state)
321 *p_state = resp.programmed_state;
322 if (c_state)
323 *c_state = resp.current_state;
324
325 return 0;
326}
327
328/**
329 * ti_sci_device_get() - Request for device managed by TISCI
330 *
331 * @id: Device Identifier
332 *
333 * Request for the device - NOTE: the client MUST maintain integrity of
334 * usage count by balancing get_device with put_device. No refcounting is
335 * managed by driver for that purpose.
336 *
Andrew F. Davis4f2a0552018-05-04 19:06:10 +0000337 * Return: 0 if all goes well, else appropriate error message
338 */
339int ti_sci_device_get(uint32_t id)
340{
Andrew F. Davisfbe6c062019-02-11 12:58:32 -0600341 return ti_sci_device_set_state(id, 0, MSG_DEVICE_SW_STATE_ON);
342}
343
344/**
345 * ti_sci_device_get_exclusive() - Exclusive request for device managed by TISCI
346 *
347 * @id: Device Identifier
348 *
349 * Request for the device - NOTE: the client MUST maintain integrity of
350 * usage count by balancing get_device with put_device. No refcounting is
351 * managed by driver for that purpose.
352 *
353 * NOTE: This _exclusive version of the get API is for exclusive access to the
354 * device. Any other host in the system will fail to get this device after this
355 * call until exclusive access is released with device_put or a non-exclusive
356 * set call.
357 *
358 * Return: 0 if all goes well, else appropriate error message
359 */
360int ti_sci_device_get_exclusive(uint32_t id)
361{
Andrew F. Davis4f2a0552018-05-04 19:06:10 +0000362 return ti_sci_device_set_state(id,
363 MSG_FLAG_DEVICE_EXCLUSIVE,
364 MSG_DEVICE_SW_STATE_ON);
365}
366
367/**
368 * ti_sci_device_idle() - Idle a device managed by TISCI
369 *
370 * @id: Device Identifier
371 *
372 * Request for the device - NOTE: the client MUST maintain integrity of
373 * usage count by balancing get_device with put_device. No refcounting is
374 * managed by driver for that purpose.
375 *
376 * Return: 0 if all goes well, else appropriate error message
377 */
378int ti_sci_device_idle(uint32_t id)
379{
Andrew F. Davisfbe6c062019-02-11 12:58:32 -0600380 return ti_sci_device_set_state(id, 0, MSG_DEVICE_SW_STATE_RETENTION);
381}
382
383/**
384 * ti_sci_device_idle_exclusive() - Exclusive idle a device managed by TISCI
385 *
386 * @id: Device Identifier
387 *
388 * Request for the device - NOTE: the client MUST maintain integrity of
389 * usage count by balancing get_device with put_device. No refcounting is
390 * managed by driver for that purpose.
391 *
392 * NOTE: This _exclusive version of the idle API is for exclusive access to
393 * the device. Any other host in the system will fail to get this device after
394 * this call until exclusive access is released with device_put or a
395 * non-exclusive set call.
396 *
397 * Return: 0 if all goes well, else appropriate error message
398 */
399int ti_sci_device_idle_exclusive(uint32_t id)
400{
Andrew F. Davis4f2a0552018-05-04 19:06:10 +0000401 return ti_sci_device_set_state(id,
402 MSG_FLAG_DEVICE_EXCLUSIVE,
403 MSG_DEVICE_SW_STATE_RETENTION);
404}
405
406/**
407 * ti_sci_device_put() - Release a device managed by TISCI
408 *
409 * @id: Device Identifier
410 *
411 * Request for the device - NOTE: the client MUST maintain integrity of
412 * usage count by balancing get_device with put_device. No refcounting is
413 * managed by driver for that purpose.
414 *
415 * Return: 0 if all goes well, else appropriate error message
416 */
417int ti_sci_device_put(uint32_t id)
418{
419 return ti_sci_device_set_state(id, 0, MSG_DEVICE_SW_STATE_AUTO_OFF);
420}
421
422/**
423 * ti_sci_device_is_valid() - Is the device valid
424 *
425 * @id: Device Identifier
426 *
427 * Return: 0 if all goes well and the device ID is valid, else return
428 * appropriate error
429 */
430int ti_sci_device_is_valid(uint32_t id)
431{
432 uint8_t unused;
433
434 /* check the device state which will also tell us if the ID is valid */
435 return ti_sci_device_get_state(id, NULL, NULL, NULL, &unused);
436}
437
438/**
439 * ti_sci_device_get_clcnt() - Get context loss counter
440 *
441 * @id: Device Identifier
442 * @count: Pointer to Context Loss counter to populate
443 *
444 * Return: 0 if all goes well, else appropriate error message
445 */
446int ti_sci_device_get_clcnt(uint32_t id, uint32_t *count)
447{
448 return ti_sci_device_get_state(id, count, NULL, NULL, NULL);
449}
450
451/**
452 * ti_sci_device_is_idle() - Check if the device is requested to be idle
453 *
454 * @id: Device Identifier
455 * @r_state: true if requested to be idle
456 *
457 * Return: 0 if all goes well, else appropriate error message
458 */
459int ti_sci_device_is_idle(uint32_t id, bool *r_state)
460{
461 int ret;
462 uint8_t state;
463
464 if (!r_state)
465 return -EINVAL;
466
467 ret = ti_sci_device_get_state(id, NULL, NULL, &state, NULL);
468 if (ret)
469 return ret;
470
471 *r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
472
473 return 0;
474}
475
476/**
477 * ti_sci_device_is_stop() - Check if the device is requested to be stopped
478 *
479 * @id: Device Identifier
480 * @r_state: true if requested to be stopped
481 * @curr_state: true if currently stopped
482 *
483 * Return: 0 if all goes well, else appropriate error message
484 */
485int ti_sci_device_is_stop(uint32_t id, bool *r_state, bool *curr_state)
486{
487 int ret;
488 uint8_t p_state, c_state;
489
490 if (!r_state && !curr_state)
491 return -EINVAL;
492
493 ret = ti_sci_device_get_state(id, NULL, NULL, &p_state, &c_state);
494 if (ret)
495 return ret;
496
497 if (r_state)
498 *r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
499 if (curr_state)
500 *curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
501
502 return 0;
503}
504
505/**
506 * ti_sci_device_is_on() - Check if the device is requested to be ON
507 *
508 * @id: Device Identifier
509 * @r_state: true if requested to be ON
510 * @curr_state: true if currently ON and active
511 *
512 * Return: 0 if all goes well, else appropriate error message
513 */
514int ti_sci_device_is_on(uint32_t id, bool *r_state, bool *curr_state)
515{
516 int ret;
517 uint8_t p_state, c_state;
518
519 if (!r_state && !curr_state)
520 return -EINVAL;
521
522 ret =
523 ti_sci_device_get_state(id, NULL, NULL, &p_state, &c_state);
524 if (ret)
525 return ret;
526
527 if (r_state)
528 *r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
529 if (curr_state)
530 *curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
531
532 return 0;
533}
534
535/**
536 * ti_sci_device_is_trans() - Check if the device is currently transitioning
537 *
538 * @id: Device Identifier
539 * @curr_state: true if currently transitioning
540 *
541 * Return: 0 if all goes well, else appropriate error message
542 */
543int ti_sci_device_is_trans(uint32_t id, bool *curr_state)
544{
545 int ret;
546 uint8_t state;
547
548 if (!curr_state)
549 return -EINVAL;
550
551 ret = ti_sci_device_get_state(id, NULL, NULL, NULL, &state);
552 if (ret)
553 return ret;
554
555 *curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
556
557 return 0;
558}
559
560/**
561 * ti_sci_device_set_resets() - Set resets for device managed by TISCI
562 *
563 * @id: Device Identifier
564 * @reset_state: Device specific reset bit field
565 *
566 * Return: 0 if all goes well, else appropriate error message
567 */
568int ti_sci_device_set_resets(uint32_t id, uint32_t reset_state)
569{
570 struct ti_sci_msg_req_set_device_resets req;
571 struct ti_sci_msg_hdr resp;
572
573 struct ti_sci_xfer xfer;
574 int ret;
575
576 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_DEVICE_RESETS,
577 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
578 &req, sizeof(req),
579 &resp, sizeof(resp),
580 &xfer);
581 if (ret) {
582 ERROR("Message alloc failed (%d)\n", ret);
583 return ret;
584 }
585
586 req.id = id;
587 req.resets = reset_state;
588
589 ret = ti_sci_do_xfer(&xfer);
590 if (ret) {
591 ERROR("Transfer send failed (%d)\n", ret);
592 return ret;
593 }
594
595 if (!ti_sci_is_response_ack(&resp))
596 return -ENODEV;
597
598 return 0;
599}
600
601/**
602 * ti_sci_device_get_resets() - Get reset state for device managed by TISCI
603 *
604 * @id: Device Identifier
605 * @reset_state: Pointer to reset state to populate
606 *
607 * Return: 0 if all goes well, else appropriate error message
608 */
609int ti_sci_device_get_resets(uint32_t id, uint32_t *reset_state)
610{
611 return ti_sci_device_get_state(id, NULL, reset_state, NULL, NULL);
612}
613
614/**
Andrew F. Davisdc08adf2018-05-04 19:06:11 +0000615 * ti_sci_clock_set_state() - Set clock state helper
616 *
617 * @dev_id: Device identifier this request is for
618 * @clk_id: Clock identifier for the device for this request,
619 * Each device has its own set of clock inputs, This indexes
620 * which clock input to modify
621 * @flags: Header flags as needed
622 * @state: State to request for the clock
623 *
624 * Return: 0 if all goes well, else appropriate error message
625 */
626int ti_sci_clock_set_state(uint32_t dev_id, uint8_t clk_id,
627 uint32_t flags, uint8_t state)
628{
629 struct ti_sci_msg_req_set_clock_state req;
630 struct ti_sci_msg_hdr resp;
631
632 struct ti_sci_xfer xfer;
633 int ret;
634
635 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_CLOCK_STATE,
636 flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
637 &req, sizeof(req),
638 &resp, sizeof(resp),
639 &xfer);
640 if (ret) {
641 ERROR("Message alloc failed (%d)\n", ret);
642 return ret;
643 }
644
645 req.dev_id = dev_id;
646 req.clk_id = clk_id;
647 req.request_state = state;
648
649 ret = ti_sci_do_xfer(&xfer);
650 if (ret) {
651 ERROR("Transfer send failed (%d)\n", ret);
652 return ret;
653 }
654
655 if (!ti_sci_is_response_ack(&resp))
656 return -ENODEV;
657
658 return 0;
659}
660
661/**
662 * ti_sci_clock_get_state() - Get clock state helper
663 *
664 * @dev_id: Device identifier this request is for
665 * @clk_id: Clock identifier for the device for this request.
666 * Each device has its own set of clock inputs. This indexes
667 * which clock input to modify.
668 * @programmed_state: State requested for clock to move to
669 * @current_state: State that the clock is currently in
670 *
671 * Return: 0 if all goes well, else appropriate error message
672 */
673int ti_sci_clock_get_state(uint32_t dev_id, uint8_t clk_id,
674 uint8_t *programmed_state,
675 uint8_t *current_state)
676{
677 struct ti_sci_msg_req_get_clock_state req;
678 struct ti_sci_msg_resp_get_clock_state resp;
679
680 struct ti_sci_xfer xfer;
681 int ret;
682
683 if (!programmed_state && !current_state)
684 return -EINVAL;
685
686 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_GET_CLOCK_STATE,
687 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
688 &req, sizeof(req),
689 &resp, sizeof(resp),
690 &xfer);
691 if (ret) {
692 ERROR("Message alloc failed (%d)\n", ret);
693 return ret;
694 }
695
696 req.dev_id = dev_id;
697 req.clk_id = clk_id;
698
699 ret = ti_sci_do_xfer(&xfer);
700 if (ret) {
701 ERROR("Transfer send failed (%d)\n", ret);
702 return ret;
703 }
704
705 if (!ti_sci_is_response_ack(&resp))
706 return -ENODEV;
707
708 if (programmed_state)
709 *programmed_state = resp.programmed_state;
710 if (current_state)
711 *current_state = resp.current_state;
712
713 return 0;
714}
715
716/**
717 * ti_sci_clock_get() - Get control of a clock from TI SCI
718
719 * @dev_id: Device identifier this request is for
720 * @clk_id: Clock identifier for the device for this request.
721 * Each device has its own set of clock inputs. This indexes
722 * which clock input to modify.
723 * @needs_ssc: 'true' iff Spread Spectrum clock is desired
724 * @can_change_freq: 'true' iff frequency change is desired
725 * @enable_input_term: 'true' iff input termination is desired
726 *
727 * Return: 0 if all goes well, else appropriate error message
728 */
729int ti_sci_clock_get(uint32_t dev_id, uint8_t clk_id,
730 bool needs_ssc, bool can_change_freq,
731 bool enable_input_term)
732{
733 uint32_t flags = 0;
734
735 flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
736 flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
737 flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
738
739 return ti_sci_clock_set_state(dev_id, clk_id, flags,
740 MSG_CLOCK_SW_STATE_REQ);
741}
742
743/**
744 * ti_sci_clock_idle() - Idle a clock which is in our control
745
746 * @dev_id: Device identifier this request is for
747 * @clk_id: Clock identifier for the device for this request.
748 * Each device has its own set of clock inputs. This indexes
749 * which clock input to modify.
750 *
751 * NOTE: This clock must have been requested by get_clock previously.
752 *
753 * Return: 0 if all goes well, else appropriate error message
754 */
755int ti_sci_clock_idle(uint32_t dev_id, uint8_t clk_id)
756{
757 return ti_sci_clock_set_state(dev_id, clk_id, 0,
758 MSG_CLOCK_SW_STATE_UNREQ);
759}
760
761/**
762 * ti_sci_clock_put() - Release a clock from our control
763 *
764 * @dev_id: Device identifier this request is for
765 * @clk_id: Clock identifier for the device for this request.
766 * Each device has its own set of clock inputs. This indexes
767 * which clock input to modify.
768 *
769 * NOTE: This clock must have been requested by get_clock previously.
770 *
771 * Return: 0 if all goes well, else appropriate error message
772 */
773int ti_sci_clock_put(uint32_t dev_id, uint8_t clk_id)
774{
775 return ti_sci_clock_set_state(dev_id, clk_id, 0,
776 MSG_CLOCK_SW_STATE_AUTO);
777}
778
779/**
780 * ti_sci_clock_is_auto() - Is the clock being auto managed
781 *
782 * @dev_id: Device identifier this request is for
783 * @clk_id: Clock identifier for the device for this request.
784 * Each device has its own set of clock inputs. This indexes
785 * which clock input to modify.
786 * @req_state: state indicating if the clock is auto managed
787 *
788 * Return: 0 if all goes well, else appropriate error message
789 */
790int ti_sci_clock_is_auto(uint32_t dev_id, uint8_t clk_id, bool *req_state)
791{
792 uint8_t state = 0;
793 int ret;
794
795 if (!req_state)
796 return -EINVAL;
797
798 ret = ti_sci_clock_get_state(dev_id, clk_id, &state, NULL);
799 if (ret)
800 return ret;
801
802 *req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
803
804 return 0;
805}
806
807/**
808 * ti_sci_clock_is_on() - Is the clock ON
809 *
810 * @dev_id: Device identifier this request is for
811 * @clk_id: Clock identifier for the device for this request.
812 * Each device has its own set of clock inputs. This indexes
813 * which clock input to modify.
814 * @req_state: state indicating if the clock is managed by us and enabled
815 * @curr_state: state indicating if the clock is ready for operation
816 *
817 * Return: 0 if all goes well, else appropriate error message
818 */
819int ti_sci_clock_is_on(uint32_t dev_id, uint8_t clk_id,
820 bool *req_state, bool *curr_state)
821{
822 uint8_t c_state = 0, r_state = 0;
823 int ret;
824
825 if (!req_state && !curr_state)
826 return -EINVAL;
827
828 ret = ti_sci_clock_get_state(dev_id, clk_id, &r_state, &c_state);
829 if (ret)
830 return ret;
831
832 if (req_state)
833 *req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
834 if (curr_state)
835 *curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
836
837 return 0;
838}
839
840/**
841 * ti_sci_clock_is_off() - Is the clock OFF
842 *
843 * @dev_id: Device identifier this request is for
844 * @clk_id: Clock identifier for the device for this request.
845 * Each device has its own set of clock inputs. This indexes
846 * which clock input to modify.
847 * @req_state: state indicating if the clock is managed by us and disabled
848 * @curr_state: state indicating if the clock is NOT ready for operation
849 *
850 * Return: 0 if all goes well, else appropriate error message
851 */
852int ti_sci_clock_is_off(uint32_t dev_id, uint8_t clk_id,
853 bool *req_state, bool *curr_state)
854{
855 uint8_t c_state = 0, r_state = 0;
856 int ret;
857
858 if (!req_state && !curr_state)
859 return -EINVAL;
860
861 ret = ti_sci_clock_get_state(dev_id, clk_id, &r_state, &c_state);
862 if (ret)
863 return ret;
864
865 if (req_state)
866 *req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
867 if (curr_state)
868 *curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
869
870 return 0;
871}
872
873/**
874 * ti_sci_clock_set_parent() - Set the clock source of a specific device clock
875 *
876 * @dev_id: Device identifier this request is for
877 * @clk_id: Clock identifier for the device for this request.
878 * Each device has its own set of clock inputs. This indexes
879 * which clock input to modify.
880 * @parent_id: Parent clock identifier to set
881 *
882 * Return: 0 if all goes well, else appropriate error message
883 */
884int ti_sci_clock_set_parent(uint32_t dev_id, uint8_t clk_id, uint8_t parent_id)
885{
886 struct ti_sci_msg_req_set_clock_parent req;
887 struct ti_sci_msg_hdr resp;
888
889 struct ti_sci_xfer xfer;
890 int ret;
891
892 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_CLOCK_PARENT,
893 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
894 &req, sizeof(req),
895 &resp, sizeof(resp),
896 &xfer);
897 if (ret) {
898 ERROR("Message alloc failed (%d)\n", ret);
899 return ret;
900 }
901
902 req.dev_id = dev_id;
903 req.clk_id = clk_id;
904 req.parent_id = parent_id;
905
906 ret = ti_sci_do_xfer(&xfer);
907 if (ret) {
908 ERROR("Transfer send failed (%d)\n", ret);
909 return ret;
910 }
911
912 if (!ti_sci_is_response_ack(&resp))
913 return -ENODEV;
914
915 return 0;
916}
917
918/**
919 * ti_sci_clock_get_parent() - Get current parent clock source
920 *
921 * @dev_id: Device identifier this request is for
922 * @clk_id: Clock identifier for the device for this request.
923 * Each device has its own set of clock inputs. This indexes
924 * which clock input to modify.
925 * @parent_id: Current clock parent
926 *
927 * Return: 0 if all goes well, else appropriate error message
928 */
929int ti_sci_clock_get_parent(uint32_t dev_id, uint8_t clk_id, uint8_t *parent_id)
930{
931 struct ti_sci_msg_req_get_clock_parent req;
932 struct ti_sci_msg_resp_get_clock_parent resp;
933
934 struct ti_sci_xfer xfer;
935 int ret;
936
937 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_GET_CLOCK_PARENT,
938 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
939 &req, sizeof(req),
940 &resp, sizeof(resp),
941 &xfer);
942 if (ret) {
943 ERROR("Message alloc failed (%d)\n", ret);
944 return ret;
945 }
946
947 req.dev_id = dev_id;
948 req.clk_id = clk_id;
949
950 ret = ti_sci_do_xfer(&xfer);
951 if (ret) {
952 ERROR("Transfer send failed (%d)\n", ret);
953 return ret;
954 }
955
956 if (!ti_sci_is_response_ack(&resp))
957 return -ENODEV;
958
959 *parent_id = resp.parent_id;
960
961 return 0;
962}
963
964/**
965 * ti_sci_clock_get_num_parents() - Get num parents of the current clk source
966 *
967 * @dev_id: Device identifier this request is for
968 * @clk_id: Clock identifier for the device for this request.
969 * Each device has its own set of clock inputs. This indexes
970 * which clock input to modify.
971 * @num_parents: Returns he number of parents to the current clock.
972 *
973 * Return: 0 if all goes well, else appropriate error message
974 */
975int ti_sci_clock_get_num_parents(uint32_t dev_id, uint8_t clk_id,
976 uint8_t *num_parents)
977{
978 struct ti_sci_msg_req_get_clock_num_parents req;
979 struct ti_sci_msg_resp_get_clock_num_parents resp;
980
981 struct ti_sci_xfer xfer;
982 int ret;
983
984 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_GET_NUM_CLOCK_PARENTS,
985 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
986 &req, sizeof(req),
987 &resp, sizeof(resp),
988 &xfer);
989 if (ret) {
990 ERROR("Message alloc failed (%d)\n", ret);
991 return ret;
992 }
993
994 req.dev_id = dev_id;
995 req.clk_id = clk_id;
996
997 ret = ti_sci_do_xfer(&xfer);
998 if (ret) {
999 ERROR("Transfer send failed (%d)\n", ret);
1000 return ret;
1001 }
1002
1003 if (!ti_sci_is_response_ack(&resp))
1004 return -ENODEV;
1005
1006 *num_parents = resp.num_parents;
1007
1008 return 0;
1009}
1010
1011/**
1012 * ti_sci_clock_get_match_freq() - Find a good match for frequency
1013 *
1014 * @dev_id: Device identifier this request is for
1015 * @clk_id: Clock identifier for the device for this request.
1016 * Each device has its own set of clock inputs. This indexes
1017 * which clock input to modify.
1018 * @min_freq: The minimum allowable frequency in Hz. This is the minimum
1019 * allowable programmed frequency and does not account for clock
1020 * tolerances and jitter.
1021 * @target_freq: The target clock frequency in Hz. A frequency will be
1022 * processed as close to this target frequency as possible.
1023 * @max_freq: The maximum allowable frequency in Hz. This is the maximum
1024 * allowable programmed frequency and does not account for clock
1025 * tolerances and jitter.
1026 * @match_freq: Frequency match in Hz response.
1027 *
1028 * Return: 0 if all goes well, else appropriate error message
1029 */
1030int ti_sci_clock_get_match_freq(uint32_t dev_id, uint8_t clk_id,
1031 uint64_t min_freq, uint64_t target_freq,
1032 uint64_t max_freq, uint64_t *match_freq)
1033{
1034 struct ti_sci_msg_req_query_clock_freq req;
1035 struct ti_sci_msg_resp_query_clock_freq resp;
1036
1037 struct ti_sci_xfer xfer;
1038 int ret;
1039
1040 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_QUERY_CLOCK_FREQ,
1041 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1042 &req, sizeof(req),
1043 &resp, sizeof(resp),
1044 &xfer);
1045 if (ret) {
1046 ERROR("Message alloc failed (%d)\n", ret);
1047 return ret;
1048 }
1049
1050 req.dev_id = dev_id;
1051 req.clk_id = clk_id;
1052 req.min_freq_hz = min_freq;
1053 req.target_freq_hz = target_freq;
1054 req.max_freq_hz = max_freq;
1055
1056 ret = ti_sci_do_xfer(&xfer);
1057 if (ret) {
1058 ERROR("Transfer send failed (%d)\n", ret);
1059 return ret;
1060 }
1061
1062 if (!ti_sci_is_response_ack(&resp))
1063 return -ENODEV;
1064
1065 *match_freq = resp.freq_hz;
1066
1067 return 0;
1068}
1069
1070/**
1071 * ti_sci_clock_set_freq() - Set a frequency for clock
1072 *
1073 * @dev_id: Device identifier this request is for
1074 * @clk_id: Clock identifier for the device for this request.
1075 * Each device has its own set of clock inputs. This indexes
1076 * which clock input to modify.
1077 * @min_freq: The minimum allowable frequency in Hz. This is the minimum
1078 * allowable programmed frequency and does not account for clock
1079 * tolerances and jitter.
1080 * @target_freq: The target clock frequency in Hz. A frequency will be
1081 * processed as close to this target frequency as possible.
1082 * @max_freq: The maximum allowable frequency in Hz. This is the maximum
1083 * allowable programmed frequency and does not account for clock
1084 * tolerances and jitter.
1085 *
1086 * Return: 0 if all goes well, else appropriate error message
1087 */
1088int ti_sci_clock_set_freq(uint32_t dev_id, uint8_t clk_id, uint64_t min_freq,
1089 uint64_t target_freq, uint64_t max_freq)
1090{
1091 struct ti_sci_msg_req_set_clock_freq req;
1092 struct ti_sci_msg_hdr resp;
1093
1094 struct ti_sci_xfer xfer;
1095 int ret;
1096
1097 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_CLOCK_FREQ,
1098 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1099 &req, sizeof(req),
1100 &resp, sizeof(resp),
1101 &xfer);
1102 if (ret) {
1103 ERROR("Message alloc failed (%d)\n", ret);
1104 return ret;
1105 }
1106 req.dev_id = dev_id;
1107 req.clk_id = clk_id;
1108 req.min_freq_hz = min_freq;
1109 req.target_freq_hz = target_freq;
1110 req.max_freq_hz = max_freq;
1111
1112 ret = ti_sci_do_xfer(&xfer);
1113 if (ret) {
1114 ERROR("Transfer send failed (%d)\n", ret);
1115 return ret;
1116 }
1117
1118 if (!ti_sci_is_response_ack(&resp))
1119 return -ENODEV;
1120
1121 return 0;
1122}
1123
1124/**
1125 * ti_sci_clock_get_freq() - Get current frequency
1126 *
1127 * @dev_id: Device identifier this request is for
1128 * @clk_id: Clock identifier for the device for this request.
1129 * Each device has its own set of clock inputs. This indexes
1130 * which clock input to modify.
1131 * @freq: Currently frequency in Hz
1132 *
1133 * Return: 0 if all goes well, else appropriate error message
1134 */
1135int ti_sci_clock_get_freq(uint32_t dev_id, uint8_t clk_id, uint64_t *freq)
1136{
1137 struct ti_sci_msg_req_get_clock_freq req;
1138 struct ti_sci_msg_resp_get_clock_freq resp;
1139
1140 struct ti_sci_xfer xfer;
1141 int ret;
1142
1143 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_GET_CLOCK_FREQ,
1144 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1145 &req, sizeof(req),
1146 &resp, sizeof(resp),
1147 &xfer);
1148 if (ret) {
1149 ERROR("Message alloc failed (%d)\n", ret);
1150 return ret;
1151 }
1152
1153 req.dev_id = dev_id;
1154 req.clk_id = clk_id;
1155
1156 ret = ti_sci_do_xfer(&xfer);
1157 if (ret) {
1158 ERROR("Transfer send failed (%d)\n", ret);
1159 return ret;
1160 }
1161
1162 if (!ti_sci_is_response_ack(&resp))
1163 return -ENODEV;
1164
1165 *freq = resp.freq_hz;
1166
1167 return 0;
1168}
1169
1170/**
Andrew F. Davis0d449302018-05-04 19:06:12 +00001171 * ti_sci_core_reboot() - Command to request system reset
1172 *
1173 * Return: 0 if all goes well, else appropriate error message
1174 */
1175int ti_sci_core_reboot(void)
1176{
1177 struct ti_sci_msg_req_reboot req;
1178 struct ti_sci_msg_hdr resp;
1179
1180 struct ti_sci_xfer xfer;
1181 int ret;
1182
1183 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SYS_RESET,
1184 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1185 &req, sizeof(req),
1186 &resp, sizeof(resp),
1187 &xfer);
1188 if (ret) {
1189 ERROR("Message alloc failed (%d)\n", ret);
1190 return ret;
1191 }
1192
1193 ret = ti_sci_do_xfer(&xfer);
1194 if (ret) {
1195 ERROR("Transfer send failed (%d)\n", ret);
1196 return ret;
1197 }
1198
1199 if (!ti_sci_is_response_ack(&resp))
1200 return -ENODEV;
1201
1202 return 0;
1203}
1204
1205/**
Andrew F. Davisd92fdfb2018-05-04 19:06:13 +00001206 * ti_sci_proc_request() - Request a physical processor control
1207 *
1208 * @proc_id: Processor ID this request is for
1209 *
1210 * Return: 0 if all goes well, else appropriate error message
1211 */
1212int ti_sci_proc_request(uint8_t proc_id)
1213{
1214 struct ti_sci_msg_req_proc_request req;
1215 struct ti_sci_msg_hdr resp;
1216
1217 struct ti_sci_xfer xfer;
1218 int ret;
1219
1220 ret = ti_sci_setup_one_xfer(TISCI_MSG_PROC_REQUEST,
1221 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1222 &req, sizeof(req),
1223 &resp, sizeof(resp),
1224 &xfer);
1225 if (ret) {
1226 ERROR("Message alloc failed (%d)\n", ret);
1227 return ret;
1228 }
1229
1230 req.processor_id = proc_id;
1231
1232 ret = ti_sci_do_xfer(&xfer);
1233 if (ret) {
1234 ERROR("Transfer send failed (%d)\n", ret);
1235 return ret;
1236 }
1237
1238 if (!ti_sci_is_response_ack(&resp))
1239 return -ENODEV;
1240
1241 return 0;
1242}
1243
1244/**
1245 * ti_sci_proc_release() - Release a physical processor control
1246 *
1247 * @proc_id: Processor ID this request is for
1248 *
1249 * Return: 0 if all goes well, else appropriate error message
1250 */
1251int ti_sci_proc_release(uint8_t proc_id)
1252{
1253 struct ti_sci_msg_req_proc_release req;
1254 struct ti_sci_msg_hdr resp;
1255
1256 struct ti_sci_xfer xfer;
1257 int ret;
1258
1259 ret = ti_sci_setup_one_xfer(TISCI_MSG_PROC_RELEASE,
1260 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1261 &req, sizeof(req),
1262 &resp, sizeof(resp),
1263 &xfer);
1264 if (ret) {
1265 ERROR("Message alloc failed (%d)\n", ret);
1266 return ret;
1267 }
1268
1269 req.processor_id = proc_id;
1270
1271 ret = ti_sci_do_xfer(&xfer);
1272 if (ret) {
1273 ERROR("Transfer send failed (%d)\n", ret);
1274 return ret;
1275 }
1276
1277 if (!ti_sci_is_response_ack(&resp))
1278 return -ENODEV;
1279
1280 return 0;
1281}
1282
1283/**
1284 * ti_sci_proc_handover() - Handover a physical processor control to a host in
1285 * the processor's access control list.
1286 *
1287 * @proc_id: Processor ID this request is for
1288 * @host_id: Host ID to get the control of the processor
1289 *
1290 * Return: 0 if all goes well, else appropriate error message
1291 */
1292int ti_sci_proc_handover(uint8_t proc_id, uint8_t host_id)
1293{
1294 struct ti_sci_msg_req_proc_handover req;
1295 struct ti_sci_msg_hdr resp;
1296
1297 struct ti_sci_xfer xfer;
1298 int ret;
1299
1300 ret = ti_sci_setup_one_xfer(TISCI_MSG_PROC_HANDOVER,
1301 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1302 &req, sizeof(req),
1303 &resp, sizeof(resp),
1304 &xfer);
1305 if (ret) {
1306 ERROR("Message alloc failed (%d)\n", ret);
1307 return ret;
1308 }
1309
1310 req.processor_id = proc_id;
1311 req.host_id = host_id;
1312
1313 ret = ti_sci_do_xfer(&xfer);
1314 if (ret) {
1315 ERROR("Transfer send failed (%d)\n", ret);
1316 return ret;
1317 }
1318
1319 if (!ti_sci_is_response_ack(&resp))
1320 return -ENODEV;
1321
1322 return 0;
1323}
1324
1325/**
1326 * ti_sci_proc_set_boot_cfg() - Set the processor boot configuration flags
1327 *
1328 * @proc_id: Processor ID this request is for
1329 * @config_flags_set: Configuration flags to be set
1330 * @config_flags_clear: Configuration flags to be cleared
1331 *
1332 * Return: 0 if all goes well, else appropriate error message
1333 */
1334int ti_sci_proc_set_boot_cfg(uint8_t proc_id, uint64_t bootvector,
1335 uint32_t config_flags_set,
1336 uint32_t config_flags_clear)
1337{
1338 struct ti_sci_msg_req_set_proc_boot_config req;
1339 struct ti_sci_msg_hdr resp;
1340
1341 struct ti_sci_xfer xfer;
1342 int ret;
1343
1344 ret = ti_sci_setup_one_xfer(TISCI_MSG_SET_PROC_BOOT_CONFIG,
1345 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1346 &req, sizeof(req),
1347 &resp, sizeof(resp),
1348 &xfer);
1349 if (ret) {
1350 ERROR("Message alloc failed (%d)\n", ret);
1351 return ret;
1352 }
1353
1354 req.processor_id = proc_id;
1355 req.bootvector_low = bootvector & TISCI_ADDR_LOW_MASK;
1356 req.bootvector_high = (bootvector & TISCI_ADDR_HIGH_MASK) >>
1357 TISCI_ADDR_HIGH_SHIFT;
1358 req.config_flags_set = config_flags_set;
1359 req.config_flags_clear = config_flags_clear;
1360
1361 ret = ti_sci_do_xfer(&xfer);
1362 if (ret) {
1363 ERROR("Transfer send failed (%d)\n", ret);
1364 return ret;
1365 }
1366
1367 if (!ti_sci_is_response_ack(&resp))
1368 return -ENODEV;
1369
1370 return 0;
1371}
1372
1373/**
1374 * ti_sci_proc_set_boot_ctrl() - Set the processor boot control flags
1375 *
1376 * @proc_id: Processor ID this request is for
1377 * @control_flags_set: Control flags to be set
1378 * @control_flags_clear: Control flags to be cleared
1379 *
1380 * Return: 0 if all goes well, else appropriate error message
1381 */
1382int ti_sci_proc_set_boot_ctrl(uint8_t proc_id, uint32_t control_flags_set,
1383 uint32_t control_flags_clear)
1384{
1385 struct ti_sci_msg_req_set_proc_boot_ctrl req;
1386 struct ti_sci_msg_hdr resp;
1387
1388 struct ti_sci_xfer xfer;
1389 int ret;
1390
1391 ret = ti_sci_setup_one_xfer(TISCI_MSG_SET_PROC_BOOT_CTRL,
1392 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1393 &req, sizeof(req),
1394 &resp, sizeof(resp),
1395 &xfer);
1396 if (ret) {
1397 ERROR("Message alloc failed (%d)\n", ret);
1398 return ret;
1399 }
1400
1401 req.processor_id = proc_id;
1402 req.control_flags_set = control_flags_set;
1403 req.control_flags_clear = control_flags_clear;
1404
1405 ret = ti_sci_do_xfer(&xfer);
1406 if (ret) {
1407 ERROR("Transfer send failed (%d)\n", ret);
1408 return ret;
1409 }
1410
1411 if (!ti_sci_is_response_ack(&resp))
1412 return -ENODEV;
1413
1414 return 0;
1415}
1416
1417/**
1418 * ti_sci_proc_auth_boot_image() - Authenticate and load image and then set the
1419 * processor configuration flags
1420 *
1421 * @proc_id: Processor ID this request is for
1422 * @cert_addr: Memory address at which payload image certificate is located
1423 *
1424 * Return: 0 if all goes well, else appropriate error message
1425 */
1426int ti_sci_proc_auth_boot_image(uint8_t proc_id, uint64_t cert_addr)
1427{
1428 struct ti_sci_msg_req_proc_auth_boot_image req;
1429 struct ti_sci_msg_hdr resp;
1430
1431 struct ti_sci_xfer xfer;
1432 int ret;
1433
1434 ret = ti_sci_setup_one_xfer(TISCI_MSG_PROC_AUTH_BOOT_IMIAGE,
1435 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1436 &req, sizeof(req),
1437 &resp, sizeof(resp),
1438 &xfer);
1439 if (ret) {
1440 ERROR("Message alloc failed (%d)\n", ret);
1441 return ret;
1442 }
1443
1444 req.processor_id = proc_id;
1445 req.cert_addr_low = cert_addr & TISCI_ADDR_LOW_MASK;
1446 req.cert_addr_high = (cert_addr & TISCI_ADDR_HIGH_MASK) >>
1447 TISCI_ADDR_HIGH_SHIFT;
1448
1449 ret = ti_sci_do_xfer(&xfer);
1450 if (ret) {
1451 ERROR("Transfer send failed (%d)\n", ret);
1452 return ret;
1453 }
1454
1455 if (!ti_sci_is_response_ack(&resp))
1456 return -ENODEV;
1457
1458 return 0;
1459}
1460
1461/**
1462 * ti_sci_proc_get_boot_status() - Get the processor boot status
1463 *
1464 * @proc_id: Processor ID this request is for
1465 *
1466 * Return: 0 if all goes well, else appropriate error message
1467 */
1468int ti_sci_proc_get_boot_status(uint8_t proc_id, uint64_t *bv,
1469 uint32_t *cfg_flags,
1470 uint32_t *ctrl_flags,
1471 uint32_t *sts_flags)
1472{
1473 struct ti_sci_msg_req_get_proc_boot_status req;
1474 struct ti_sci_msg_resp_get_proc_boot_status resp;
1475
1476 struct ti_sci_xfer xfer;
1477 int ret;
1478
1479 ret = ti_sci_setup_one_xfer(TISCI_MSG_GET_PROC_BOOT_STATUS,
1480 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1481 &req, sizeof(req),
1482 &resp, sizeof(resp),
1483 &xfer);
1484 if (ret) {
1485 ERROR("Message alloc failed (%d)\n", ret);
1486 return ret;
1487 }
1488
1489 req.processor_id = proc_id;
1490
1491 ret = ti_sci_do_xfer(&xfer);
1492 if (ret) {
1493 ERROR("Transfer send failed (%d)\n", ret);
1494 return ret;
1495 }
1496
1497 if (!ti_sci_is_response_ack(&resp))
1498 return -ENODEV;
1499
1500 *bv = (resp.bootvector_low & TISCI_ADDR_LOW_MASK) |
1501 (((uint64_t)resp.bootvector_high << TISCI_ADDR_HIGH_SHIFT) &
1502 TISCI_ADDR_HIGH_MASK);
1503 *cfg_flags = resp.config_flags;
1504 *ctrl_flags = resp.control_flags;
1505 *sts_flags = resp.status_flags;
1506
1507 return 0;
1508}
1509
1510/**
Andrew F. Davisb62cc1e2018-12-18 13:21:12 -06001511 * ti_sci_proc_wait_boot_status() - Wait for a processor boot status
1512 *
1513 * @proc_id: Processor ID this request is for
1514 * @num_wait_iterations Total number of iterations we will check before
1515 * we will timeout and give up
1516 * @num_match_iterations How many iterations should we have continued
1517 * status to account for status bits glitching.
1518 * This is to make sure that match occurs for
1519 * consecutive checks. This implies that the
1520 * worst case should consider that the stable
1521 * time should at the worst be num_wait_iterations
1522 * num_match_iterations to prevent timeout.
1523 * @delay_per_iteration_us Specifies how long to wait (in micro seconds)
1524 * between each status checks. This is the minimum
1525 * duration, and overhead of register reads and
1526 * checks are on top of this and can vary based on
1527 * varied conditions.
1528 * @delay_before_iterations_us Specifies how long to wait (in micro seconds)
1529 * before the very first check in the first
1530 * iteration of status check loop. This is the
1531 * minimum duration, and overhead of register
1532 * reads and checks are.
1533 * @status_flags_1_set_all_wait If non-zero, Specifies that all bits of the
1534 * status matching this field requested MUST be 1.
1535 * @status_flags_1_set_any_wait If non-zero, Specifies that at least one of the
1536 * bits matching this field requested MUST be 1.
1537 * @status_flags_1_clr_all_wait If non-zero, Specifies that all bits of the
1538 * status matching this field requested MUST be 0.
1539 * @status_flags_1_clr_any_wait If non-zero, Specifies that at least one of the
1540 * bits matching this field requested MUST be 0.
1541 *
1542 * Return: 0 if all goes well, else appropriate error message
1543 */
1544int ti_sci_proc_wait_boot_status(uint8_t proc_id, uint8_t num_wait_iterations,
1545 uint8_t num_match_iterations,
1546 uint8_t delay_per_iteration_us,
1547 uint8_t delay_before_iterations_us,
1548 uint32_t status_flags_1_set_all_wait,
1549 uint32_t status_flags_1_set_any_wait,
1550 uint32_t status_flags_1_clr_all_wait,
1551 uint32_t status_flags_1_clr_any_wait)
1552{
1553 struct ti_sci_msg_req_wait_proc_boot_status req;
1554 struct ti_sci_msg_hdr resp;
1555
1556 struct ti_sci_xfer xfer;
1557 int ret;
1558
1559 ret = ti_sci_setup_one_xfer(TISCI_MSG_WAIT_PROC_BOOT_STATUS,
1560 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1561 &req, sizeof(req),
1562 &resp, sizeof(resp),
1563 &xfer);
1564 if (ret) {
1565 ERROR("Message alloc failed (%d)\n", ret);
1566 return ret;
1567 }
1568
1569 req.processor_id = proc_id;
1570 req.num_wait_iterations = num_wait_iterations;
1571 req.num_match_iterations = num_match_iterations;
1572 req.delay_per_iteration_us = delay_per_iteration_us;
1573 req.delay_before_iterations_us = delay_before_iterations_us;
1574 req.status_flags_1_set_all_wait = status_flags_1_set_all_wait;
1575 req.status_flags_1_set_any_wait = status_flags_1_set_any_wait;
1576 req.status_flags_1_clr_all_wait = status_flags_1_clr_all_wait;
1577 req.status_flags_1_clr_any_wait = status_flags_1_clr_any_wait;
1578
1579 ret = ti_sci_do_xfer(&xfer);
1580 if (ret) {
1581 ERROR("Transfer send failed (%d)\n", ret);
1582 return ret;
1583 }
1584
1585 if (!ti_sci_is_response_ack(&resp))
1586 return -ENODEV;
1587
1588 return 0;
1589}
1590
1591/**
Andrew F. Davised8a4f82019-01-03 13:23:52 -06001592 * ti_sci_proc_shutdown() - Shutdown Processor without waiting for ACKs
1593 *
1594 * @proc_id: Processor ID this request is for
1595 * @dev_id: Device identifier this request is for
1596 *
1597 * Return: 0 if all goes well, else appropriate error message
1598 */
1599int ti_sci_proc_shutdown(uint8_t proc_id, uint32_t dev_id)
1600{
1601 struct ti_sci_msg_req_wait_proc_boot_status wait_req;
1602 struct ti_sci_msg_req_set_device_state set_req;
1603 /*
1604 * We will not be waiting for this response, but declare one anyway
1605 * to pass to the setup function so the checks will still pass
1606 */
1607 struct ti_sci_msg_hdr resp;
1608
1609 struct ti_sci_xfer xfer;
1610 int ret;
1611
1612 /* Start by sending wait command */
1613
1614 /* Setup with NORESPONSE flag to keep response queue clean */
1615 ret = ti_sci_setup_one_xfer(TISCI_MSG_WAIT_PROC_BOOT_STATUS,
1616 TI_SCI_FLAG_REQ_GENERIC_NORESPONSE,
1617 &wait_req, sizeof(wait_req),
1618 &resp, sizeof(resp),
1619 &xfer);
1620 if (ret) {
1621 ERROR("Message alloc failed (%d)\n", ret);
1622 return ret;
1623 }
1624
1625 wait_req.processor_id = proc_id;
1626 /*
1627 * Wait maximum time to give us the best chance to get
1628 * to WFI before this command timeouts
1629 */
1630 wait_req.delay_before_iterations_us = UINT8_MAX;
1631 wait_req.num_wait_iterations = UINT8_MAX;
1632 wait_req.delay_per_iteration_us = UINT8_MAX; /* TODO: optimize time */
1633 wait_req.num_match_iterations = 2;
1634 wait_req.status_flags_1_set_all_wait = 0;
1635 /* Wait for either WFE or WFI */
1636 wait_req.status_flags_1_set_any_wait = PROC_BOOT_STATUS_FLAG_ARMV8_WFE |
1637 PROC_BOOT_STATUS_FLAG_ARMV8_WFI;
1638 wait_req.status_flags_1_clr_all_wait = 0;
1639 wait_req.status_flags_1_clr_any_wait = 0;
1640
1641 /* Send wait message */
1642 ret = k3_sec_proxy_send(SP_HIGH_PRIORITY, &xfer.tx_message);
1643 if (ret) {
1644 ERROR("Message sending failed (%d)\n", ret);
1645 return ret;
1646 }
1647
1648 /* Now queue up the shutdown request */
1649 ret = ti_sci_setup_one_xfer(TI_SCI_MSG_SET_DEVICE_STATE,
1650 TI_SCI_FLAG_REQ_GENERIC_NORESPONSE,
1651 &set_req, sizeof(set_req),
1652 &resp, sizeof(resp),
1653 &xfer);
1654 if (ret) {
1655 ERROR("Message alloc failed (%d)\n", ret);
1656 return ret;
1657 }
1658
1659 set_req.id = dev_id;
1660 set_req.state = MSG_DEVICE_SW_STATE_AUTO_OFF;
1661
1662 /* Send shutdown message */
1663 ret = k3_sec_proxy_send(SP_HIGH_PRIORITY, &xfer.tx_message);
1664 if (ret) {
1665 ERROR("Message sending failed (%d)\n", ret);
1666 return ret;
1667 }
1668
1669 /* Return without waiting for responses */
1670 return 0;
1671}
1672
1673/**
Andrew F. Davisa513b2a2018-05-04 19:06:09 +00001674 * ti_sci_init() - Basic initialization
1675 *
1676 * Return: 0 if all goes well, else appropriate error message
1677 */
1678int ti_sci_init(void)
1679{
1680 struct ti_sci_msg_resp_version rev_info;
1681 int ret;
1682
1683 ret = ti_sci_get_revision(&rev_info);
1684 if (ret) {
1685 ERROR("Unable to communicate with control firmware (%d)\n", ret);
1686 return ret;
1687 }
1688
1689 INFO("SYSFW ABI: %d.%d (firmware rev 0x%04x '%s')\n",
1690 rev_info.abi_major, rev_info.abi_minor,
1691 rev_info.firmware_revision,
1692 rev_info.firmware_description);
1693
1694 return 0;
1695}