blob: 303aa6a631147168b260428bceba294c1417c160 [file] [log] [blame]
Lokesh Vutla5af02db2018-08-27 15:57:32 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Texas Instruments System Control Interface Protocol Driver
4 * Based on drivers/firmware/ti_sci.c from Linux.
5 *
6 * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
7 * Lokesh Vutla <lokeshvutla@ti.com>
8 */
9
10#include <common.h>
11#include <dm.h>
12#include <errno.h>
13#include <mailbox.h>
14#include <dm/device.h>
Grygorii Strashkod64c5b22019-02-05 17:31:21 +053015#include <linux/compat.h>
Lokesh Vutla5af02db2018-08-27 15:57:32 +053016#include <linux/err.h>
17#include <linux/soc/ti/k3-sec-proxy.h>
18#include <linux/soc/ti/ti_sci_protocol.h>
19
20#include "ti_sci.h"
21
22/* List of all TI SCI devices active in system */
23static LIST_HEAD(ti_sci_list);
24
25/**
26 * struct ti_sci_xfer - Structure representing a message flow
27 * @tx_message: Transmit message
28 * @rx_len: Receive message length
29 */
30struct ti_sci_xfer {
31 struct k3_sec_proxy_msg tx_message;
32 u8 rx_len;
33};
34
35/**
Grygorii Strashkod64c5b22019-02-05 17:31:21 +053036 * struct ti_sci_rm_type_map - Structure representing TISCI Resource
37 * management representation of dev_ids.
38 * @dev_id: TISCI device ID
39 * @type: Corresponding id as identified by TISCI RM.
40 *
41 * Note: This is used only as a work around for using RM range apis
42 * for AM654 SoC. For future SoCs dev_id will be used as type
43 * for RM range APIs. In order to maintain ABI backward compatibility
44 * type is not being changed for AM654 SoC.
45 */
46struct ti_sci_rm_type_map {
47 u32 dev_id;
48 u16 type;
49};
50
51/**
Lokesh Vutla5af02db2018-08-27 15:57:32 +053052 * struct ti_sci_desc - Description of SoC integration
Grygorii Strashkod64c5b22019-02-05 17:31:21 +053053 * @default_host_id: Host identifier representing the compute entity
54 * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
55 * @max_msgs: Maximum number of messages that can be pending
56 * simultaneously in the system
57 * @max_msg_size: Maximum size of data per message that can be handled.
58 * @rm_type_map: RM resource type mapping structure.
Lokesh Vutla5af02db2018-08-27 15:57:32 +053059 */
60struct ti_sci_desc {
Grygorii Strashkod64c5b22019-02-05 17:31:21 +053061 u8 default_host_id;
62 int max_rx_timeout_ms;
63 int max_msgs;
Lokesh Vutla5af02db2018-08-27 15:57:32 +053064 int max_msg_size;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +053065 struct ti_sci_rm_type_map *rm_type_map;
Lokesh Vutla5af02db2018-08-27 15:57:32 +053066};
67
68/**
69 * struct ti_sci_info - Structure representing a TI SCI instance
70 * @dev: Device pointer
71 * @desc: SoC description for this instance
72 * @handle: Instance of TI SCI handle to send to clients.
73 * @chan_tx: Transmit mailbox channel
74 * @chan_rx: Receive mailbox channel
75 * @xfer: xfer info
76 * @list: list head
77 * @is_secure: Determines if the communication is through secure threads.
78 * @host_id: Host identifier representing the compute entity
79 * @seq: Seq id used for verification for tx and rx message.
80 */
81struct ti_sci_info {
82 struct udevice *dev;
83 const struct ti_sci_desc *desc;
84 struct ti_sci_handle handle;
85 struct mbox_chan chan_tx;
86 struct mbox_chan chan_rx;
87 struct mbox_chan chan_notify;
88 struct ti_sci_xfer xfer;
89 struct list_head list;
90 bool is_secure;
91 u8 host_id;
92 u8 seq;
93};
94
95#define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle)
96
97/**
98 * ti_sci_setup_one_xfer() - Setup one message type
99 * @info: Pointer to SCI entity information
100 * @msg_type: Message type
101 * @msg_flags: Flag to set for the message
102 * @buf: Buffer to be send to mailbox channel
103 * @tx_message_size: transmit message size
104 * @rx_message_size: receive message size
105 *
106 * Helper function which is used by various command functions that are
107 * exposed to clients of this driver for allocating a message traffic event.
108 *
109 * Return: Corresponding ti_sci_xfer pointer if all went fine,
110 * else appropriate error pointer.
111 */
112static struct ti_sci_xfer *ti_sci_setup_one_xfer(struct ti_sci_info *info,
113 u16 msg_type, u32 msg_flags,
114 u32 *buf,
115 size_t tx_message_size,
116 size_t rx_message_size)
117{
118 struct ti_sci_xfer *xfer = &info->xfer;
119 struct ti_sci_msg_hdr *hdr;
120
121 /* Ensure we have sane transfer sizes */
122 if (rx_message_size > info->desc->max_msg_size ||
123 tx_message_size > info->desc->max_msg_size ||
124 rx_message_size < sizeof(*hdr) || tx_message_size < sizeof(*hdr))
125 return ERR_PTR(-ERANGE);
126
127 info->seq = ~info->seq;
128 xfer->tx_message.buf = buf;
129 xfer->tx_message.len = tx_message_size;
130 xfer->rx_len = (u8)rx_message_size;
131
132 hdr = (struct ti_sci_msg_hdr *)buf;
133 hdr->seq = info->seq;
134 hdr->type = msg_type;
135 hdr->host = info->host_id;
136 hdr->flags = msg_flags;
137
138 return xfer;
139}
140
141/**
142 * ti_sci_get_response() - Receive response from mailbox channel
143 * @info: Pointer to SCI entity information
144 * @xfer: Transfer to initiate and wait for response
145 * @chan: Channel to receive the response
146 *
147 * Return: -ETIMEDOUT in case of no response, if transmit error,
148 * return corresponding error, else if all goes well,
149 * return 0.
150 */
151static inline int ti_sci_get_response(struct ti_sci_info *info,
152 struct ti_sci_xfer *xfer,
153 struct mbox_chan *chan)
154{
155 struct k3_sec_proxy_msg *msg = &xfer->tx_message;
156 struct ti_sci_secure_msg_hdr *secure_hdr;
157 struct ti_sci_msg_hdr *hdr;
158 int ret;
159
160 /* Receive the response */
Andreas Dannenberg607d4ca2019-04-24 14:20:08 -0500161 ret = mbox_recv(chan, msg, info->desc->max_rx_timeout_ms * 1000);
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530162 if (ret) {
163 dev_err(info->dev, "%s: Message receive failed. ret = %d\n",
164 __func__, ret);
165 return ret;
166 }
167
168 /* ToDo: Verify checksum */
169 if (info->is_secure) {
170 secure_hdr = (struct ti_sci_secure_msg_hdr *)msg->buf;
171 msg->buf = (u32 *)((void *)msg->buf + sizeof(*secure_hdr));
172 }
173
174 /* msg is updated by mailbox driver */
175 hdr = (struct ti_sci_msg_hdr *)msg->buf;
176
177 /* Sanity check for message response */
178 if (hdr->seq != info->seq) {
179 dev_dbg(info->dev, "%s: Message for %d is not expected\n",
180 __func__, hdr->seq);
181 return ret;
182 }
183
184 if (msg->len > info->desc->max_msg_size) {
185 dev_err(info->dev, "%s: Unable to handle %zu xfer (max %d)\n",
186 __func__, msg->len, info->desc->max_msg_size);
187 return -EINVAL;
188 }
189
190 if (msg->len < xfer->rx_len) {
191 dev_err(info->dev, "%s: Recv xfer %zu < expected %d length\n",
192 __func__, msg->len, xfer->rx_len);
193 }
194
195 return ret;
196}
197
198/**
199 * ti_sci_do_xfer() - Do one transfer
200 * @info: Pointer to SCI entity information
201 * @xfer: Transfer to initiate and wait for response
202 *
203 * Return: 0 if all went fine, else return appropriate error.
204 */
205static inline int ti_sci_do_xfer(struct ti_sci_info *info,
206 struct ti_sci_xfer *xfer)
207{
208 struct k3_sec_proxy_msg *msg = &xfer->tx_message;
209 u8 secure_buf[info->desc->max_msg_size];
210 struct ti_sci_secure_msg_hdr secure_hdr;
211 int ret;
212
213 if (info->is_secure) {
214 /* ToDo: get checksum of the entire message */
215 secure_hdr.checksum = 0;
216 secure_hdr.reserved = 0;
217 memcpy(&secure_buf[sizeof(secure_hdr)], xfer->tx_message.buf,
218 xfer->tx_message.len);
219
220 xfer->tx_message.buf = (u32 *)secure_buf;
221 xfer->tx_message.len += sizeof(secure_hdr);
222 xfer->rx_len += sizeof(secure_hdr);
223 }
224
225 /* Send the message */
226 ret = mbox_send(&info->chan_tx, msg);
227 if (ret) {
228 dev_err(info->dev, "%s: Message sending failed. ret = %d\n",
229 __func__, ret);
230 return ret;
231 }
232
233 return ti_sci_get_response(info, xfer, &info->chan_rx);
234}
235
236/**
237 * ti_sci_cmd_get_revision() - command to get the revision of the SCI entity
238 * @handle: pointer to TI SCI handle
239 *
240 * Updates the SCI information in the internal data structure.
241 *
242 * Return: 0 if all went fine, else return appropriate error.
243 */
244static int ti_sci_cmd_get_revision(struct ti_sci_handle *handle)
245{
246 struct ti_sci_msg_resp_version *rev_info;
247 struct ti_sci_version_info *ver;
248 struct ti_sci_msg_hdr hdr;
249 struct ti_sci_info *info;
250 struct ti_sci_xfer *xfer;
251 int ret;
252
253 if (IS_ERR(handle))
254 return PTR_ERR(handle);
255 if (!handle)
256 return -EINVAL;
257
258 info = handle_to_ti_sci_info(handle);
259
Andrew F. Davis8928fbd2019-04-29 09:04:11 -0400260 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_VERSION,
261 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530262 (u32 *)&hdr, sizeof(struct ti_sci_msg_hdr),
263 sizeof(*rev_info));
264 if (IS_ERR(xfer)) {
265 ret = PTR_ERR(xfer);
266 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
267 return ret;
268 }
269
270 ret = ti_sci_do_xfer(info, xfer);
271 if (ret) {
272 dev_err(info->dev, "Mbox communication fail %d\n", ret);
273 return ret;
274 }
275
276 rev_info = (struct ti_sci_msg_resp_version *)xfer->tx_message.buf;
277
278 ver = &handle->version;
279 ver->abi_major = rev_info->abi_major;
280 ver->abi_minor = rev_info->abi_minor;
281 ver->firmware_revision = rev_info->firmware_revision;
282 strncpy(ver->firmware_description, rev_info->firmware_description,
283 sizeof(ver->firmware_description));
284
285 return 0;
286}
287
288/**
289 * ti_sci_is_response_ack() - Generic ACK/NACK message checkup
290 * @r: pointer to response buffer
291 *
292 * Return: true if the response was an ACK, else returns false.
293 */
294static inline bool ti_sci_is_response_ack(void *r)
295{
296 struct ti_sci_msg_hdr *hdr = r;
297
298 return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false;
299}
300
301/**
Andreas Dannenberg5299c4c2018-08-27 15:57:33 +0530302 * cmd_set_board_config_using_msg() - Common command to send board configuration
303 * message
304 * @handle: pointer to TI SCI handle
305 * @msg_type: One of the TISCI message types to set board configuration
306 * @addr: Address where the board config structure is located
307 * @size: Size of the board config structure
308 *
309 * Return: 0 if all went well, else returns appropriate error value.
310 */
311static int cmd_set_board_config_using_msg(const struct ti_sci_handle *handle,
312 u16 msg_type, u64 addr, u32 size)
313{
314 struct ti_sci_msg_board_config req;
315 struct ti_sci_msg_hdr *resp;
316 struct ti_sci_info *info;
317 struct ti_sci_xfer *xfer;
318 int ret = 0;
319
320 if (IS_ERR(handle))
321 return PTR_ERR(handle);
322 if (!handle)
323 return -EINVAL;
324
325 info = handle_to_ti_sci_info(handle);
326
327 xfer = ti_sci_setup_one_xfer(info, msg_type,
328 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
329 (u32 *)&req, sizeof(req), sizeof(*resp));
330 if (IS_ERR(xfer)) {
331 ret = PTR_ERR(xfer);
332 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
333 return ret;
334 }
335 req.boardcfgp_high = (addr >> 32) & 0xffffffff;
336 req.boardcfgp_low = addr & 0xffffffff;
337 req.boardcfg_size = size;
338
339 ret = ti_sci_do_xfer(info, xfer);
340 if (ret) {
341 dev_err(info->dev, "Mbox send fail %d\n", ret);
342 return ret;
343 }
344
345 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
346
347 if (!ti_sci_is_response_ack(resp))
348 return -ENODEV;
349
350 return ret;
351}
352
353/**
354 * ti_sci_cmd_set_board_config() - Command to send board configuration message
355 * @handle: pointer to TI SCI handle
356 * @addr: Address where the board config structure is located
357 * @size: Size of the board config structure
358 *
359 * Return: 0 if all went well, else returns appropriate error value.
360 */
361static int ti_sci_cmd_set_board_config(const struct ti_sci_handle *handle,
362 u64 addr, u32 size)
363{
364 return cmd_set_board_config_using_msg(handle,
365 TI_SCI_MSG_BOARD_CONFIG,
366 addr, size);
367}
368
369/**
370 * ti_sci_cmd_set_board_config_rm() - Command to send board resource
371 * management configuration
372 * @handle: pointer to TI SCI handle
373 * @addr: Address where the board RM config structure is located
374 * @size: Size of the RM config structure
375 *
376 * Return: 0 if all went well, else returns appropriate error value.
377 */
378static
379int ti_sci_cmd_set_board_config_rm(const struct ti_sci_handle *handle,
380 u64 addr, u32 size)
381{
382 return cmd_set_board_config_using_msg(handle,
383 TI_SCI_MSG_BOARD_CONFIG_RM,
384 addr, size);
385}
386
387/**
388 * ti_sci_cmd_set_board_config_security() - Command to send board security
389 * configuration message
390 * @handle: pointer to TI SCI handle
391 * @addr: Address where the board security config structure is located
392 * @size: Size of the security config structure
393 *
394 * Return: 0 if all went well, else returns appropriate error value.
395 */
396static
397int ti_sci_cmd_set_board_config_security(const struct ti_sci_handle *handle,
398 u64 addr, u32 size)
399{
400 return cmd_set_board_config_using_msg(handle,
401 TI_SCI_MSG_BOARD_CONFIG_SECURITY,
402 addr, size);
403}
404
405/**
406 * ti_sci_cmd_set_board_config_pm() - Command to send board power and clock
407 * configuration message
408 * @handle: pointer to TI SCI handle
409 * @addr: Address where the board PM config structure is located
410 * @size: Size of the PM config structure
411 *
412 * Return: 0 if all went well, else returns appropriate error value.
413 */
414static int ti_sci_cmd_set_board_config_pm(const struct ti_sci_handle *handle,
415 u64 addr, u32 size)
416{
417 return cmd_set_board_config_using_msg(handle,
418 TI_SCI_MSG_BOARD_CONFIG_PM,
419 addr, size);
420}
421
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530422/**
423 * ti_sci_set_device_state() - Set device state helper
424 * @handle: pointer to TI SCI handle
425 * @id: Device identifier
426 * @flags: flags to setup for the device
427 * @state: State to move the device to
428 *
429 * Return: 0 if all went well, else returns appropriate error value.
430 */
431static int ti_sci_set_device_state(const struct ti_sci_handle *handle,
432 u32 id, u32 flags, u8 state)
433{
434 struct ti_sci_msg_req_set_device_state req;
435 struct ti_sci_msg_hdr *resp;
436 struct ti_sci_info *info;
437 struct ti_sci_xfer *xfer;
438 int ret = 0;
439
440 if (IS_ERR(handle))
441 return PTR_ERR(handle);
442 if (!handle)
443 return -EINVAL;
444
445 info = handle_to_ti_sci_info(handle);
446
447 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
448 flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
449 (u32 *)&req, sizeof(req), sizeof(*resp));
450 if (IS_ERR(xfer)) {
451 ret = PTR_ERR(xfer);
452 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
453 return ret;
454 }
455 req.id = id;
456 req.state = state;
457
458 ret = ti_sci_do_xfer(info, xfer);
459 if (ret) {
460 dev_err(info->dev, "Mbox send fail %d\n", ret);
461 return ret;
462 }
463
464 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
465
466 if (!ti_sci_is_response_ack(resp))
467 return -ENODEV;
468
469 return ret;
470}
471
472/**
473 * ti_sci_get_device_state() - Get device state helper
474 * @handle: Handle to the device
475 * @id: Device Identifier
476 * @clcnt: Pointer to Context Loss Count
477 * @resets: pointer to resets
478 * @p_state: pointer to p_state
479 * @c_state: pointer to c_state
480 *
481 * Return: 0 if all went fine, else return appropriate error.
482 */
483static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
484 u32 id, u32 *clcnt, u32 *resets,
485 u8 *p_state, u8 *c_state)
486{
487 struct ti_sci_msg_resp_get_device_state *resp;
488 struct ti_sci_msg_req_get_device_state req;
489 struct ti_sci_info *info;
490 struct ti_sci_xfer *xfer;
491 int ret = 0;
492
493 if (IS_ERR(handle))
494 return PTR_ERR(handle);
495 if (!handle)
496 return -EINVAL;
497
498 if (!clcnt && !resets && !p_state && !c_state)
499 return -EINVAL;
500
501 info = handle_to_ti_sci_info(handle);
502
Andrew F. Davis8928fbd2019-04-29 09:04:11 -0400503 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
504 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530505 (u32 *)&req, sizeof(req), sizeof(*resp));
506 if (IS_ERR(xfer)) {
507 ret = PTR_ERR(xfer);
508 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
509 return ret;
510 }
511 req.id = id;
512
513 ret = ti_sci_do_xfer(info, xfer);
514 if (ret) {
515 dev_err(dev, "Mbox send fail %d\n", ret);
516 return ret;
517 }
518
519 resp = (struct ti_sci_msg_resp_get_device_state *)xfer->tx_message.buf;
520 if (!ti_sci_is_response_ack(resp))
521 return -ENODEV;
522
523 if (clcnt)
524 *clcnt = resp->context_loss_count;
525 if (resets)
526 *resets = resp->resets;
527 if (p_state)
528 *p_state = resp->programmed_state;
529 if (c_state)
530 *c_state = resp->current_state;
531
532 return ret;
533}
534
535/**
536 * ti_sci_cmd_get_device() - command to request for device managed by TISCI
537 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
538 * @id: Device Identifier
539 *
540 * Request for the device - NOTE: the client MUST maintain integrity of
541 * usage count by balancing get_device with put_device. No refcounting is
542 * managed by driver for that purpose.
543 *
544 * NOTE: The request is for exclusive access for the processor.
545 *
546 * Return: 0 if all went fine, else return appropriate error.
547 */
548static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
549{
550 return ti_sci_set_device_state(handle, id,
551 MSG_FLAG_DEVICE_EXCLUSIVE,
552 MSG_DEVICE_SW_STATE_ON);
553}
554
555/**
556 * ti_sci_cmd_idle_device() - Command to idle a device managed by TISCI
557 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
558 * @id: Device Identifier
559 *
560 * Request for the device - NOTE: the client MUST maintain integrity of
561 * usage count by balancing get_device with put_device. No refcounting is
562 * managed by driver for that purpose.
563 *
564 * Return: 0 if all went fine, else return appropriate error.
565 */
566static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)
567{
568 return ti_sci_set_device_state(handle, id,
569 MSG_FLAG_DEVICE_EXCLUSIVE,
570 MSG_DEVICE_SW_STATE_RETENTION);
571}
572
573/**
574 * ti_sci_cmd_put_device() - command to release a device managed by TISCI
575 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
576 * @id: Device Identifier
577 *
578 * Request for the device - NOTE: the client MUST maintain integrity of
579 * usage count by balancing get_device with put_device. No refcounting is
580 * managed by driver for that purpose.
581 *
582 * Return: 0 if all went fine, else return appropriate error.
583 */
584static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id)
585{
586 return ti_sci_set_device_state(handle, id,
587 0, MSG_DEVICE_SW_STATE_AUTO_OFF);
588}
589
590/**
591 * ti_sci_cmd_dev_is_valid() - Is the device valid
592 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
593 * @id: Device Identifier
594 *
595 * Return: 0 if all went fine and the device ID is valid, else return
596 * appropriate error.
597 */
598static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id)
599{
600 u8 unused;
601
602 /* check the device state which will also tell us if the ID is valid */
603 return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused);
604}
605
606/**
607 * ti_sci_cmd_dev_get_clcnt() - Get context loss counter
608 * @handle: Pointer to TISCI handle
609 * @id: Device Identifier
610 * @count: Pointer to Context Loss counter to populate
611 *
612 * Return: 0 if all went fine, else return appropriate error.
613 */
614static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id,
615 u32 *count)
616{
617 return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL);
618}
619
620/**
621 * ti_sci_cmd_dev_is_idle() - Check if the device is requested to be idle
622 * @handle: Pointer to TISCI handle
623 * @id: Device Identifier
624 * @r_state: true if requested to be idle
625 *
626 * Return: 0 if all went fine, else return appropriate error.
627 */
628static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id,
629 bool *r_state)
630{
631 int ret;
632 u8 state;
633
634 if (!r_state)
635 return -EINVAL;
636
637 ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL);
638 if (ret)
639 return ret;
640
641 *r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
642
643 return 0;
644}
645
646/**
647 * ti_sci_cmd_dev_is_stop() - Check if the device is requested to be stopped
648 * @handle: Pointer to TISCI handle
649 * @id: Device Identifier
650 * @r_state: true if requested to be stopped
651 * @curr_state: true if currently stopped.
652 *
653 * Return: 0 if all went fine, else return appropriate error.
654 */
655static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id,
656 bool *r_state, bool *curr_state)
657{
658 int ret;
659 u8 p_state, c_state;
660
661 if (!r_state && !curr_state)
662 return -EINVAL;
663
664 ret =
665 ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
666 if (ret)
667 return ret;
668
669 if (r_state)
670 *r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
671 if (curr_state)
672 *curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
673
674 return 0;
675}
676
677/**
678 * ti_sci_cmd_dev_is_on() - Check if the device is requested to be ON
679 * @handle: Pointer to TISCI handle
680 * @id: Device Identifier
681 * @r_state: true if requested to be ON
682 * @curr_state: true if currently ON and active
683 *
684 * Return: 0 if all went fine, else return appropriate error.
685 */
686static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id,
687 bool *r_state, bool *curr_state)
688{
689 int ret;
690 u8 p_state, c_state;
691
692 if (!r_state && !curr_state)
693 return -EINVAL;
694
695 ret =
696 ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
697 if (ret)
698 return ret;
699
700 if (r_state)
701 *r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
702 if (curr_state)
703 *curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
704
705 return 0;
706}
707
708/**
709 * ti_sci_cmd_dev_is_trans() - Check if the device is currently transitioning
710 * @handle: Pointer to TISCI handle
711 * @id: Device Identifier
712 * @curr_state: true if currently transitioning.
713 *
714 * Return: 0 if all went fine, else return appropriate error.
715 */
716static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id,
717 bool *curr_state)
718{
719 int ret;
720 u8 state;
721
722 if (!curr_state)
723 return -EINVAL;
724
725 ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state);
726 if (ret)
727 return ret;
728
729 *curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
730
731 return 0;
732}
733
734/**
735 * ti_sci_cmd_set_device_resets() - command to set resets for device managed
736 * by TISCI
737 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
738 * @id: Device Identifier
739 * @reset_state: Device specific reset bit field
740 *
741 * Return: 0 if all went fine, else return appropriate error.
742 */
743static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle,
744 u32 id, u32 reset_state)
745{
746 struct ti_sci_msg_req_set_device_resets req;
747 struct ti_sci_msg_hdr *resp;
748 struct ti_sci_info *info;
749 struct ti_sci_xfer *xfer;
750 int ret = 0;
751
752 if (IS_ERR(handle))
753 return PTR_ERR(handle);
754 if (!handle)
755 return -EINVAL;
756
757 info = handle_to_ti_sci_info(handle);
758
759 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS,
760 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
761 (u32 *)&req, sizeof(req), sizeof(*resp));
762 if (IS_ERR(xfer)) {
763 ret = PTR_ERR(xfer);
764 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
765 return ret;
766 }
767 req.id = id;
768 req.resets = reset_state;
769
770 ret = ti_sci_do_xfer(info, xfer);
771 if (ret) {
772 dev_err(info->dev, "Mbox send fail %d\n", ret);
773 return ret;
774 }
775
776 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
777
778 if (!ti_sci_is_response_ack(resp))
779 return -ENODEV;
780
781 return ret;
782}
783
784/**
785 * ti_sci_cmd_get_device_resets() - Get reset state for device managed
786 * by TISCI
787 * @handle: Pointer to TISCI handle
788 * @id: Device Identifier
789 * @reset_state: Pointer to reset state to populate
790 *
791 * Return: 0 if all went fine, else return appropriate error.
792 */
793static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle,
794 u32 id, u32 *reset_state)
795{
796 return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL,
797 NULL);
798}
799
Lokesh Vutlad10c80c2018-08-27 15:57:35 +0530800/**
801 * ti_sci_set_clock_state() - Set clock state helper
802 * @handle: pointer to TI SCI handle
803 * @dev_id: Device identifier this request is for
804 * @clk_id: Clock identifier for the device for this request.
805 * Each device has it's own set of clock inputs. This indexes
806 * which clock input to modify.
807 * @flags: Header flags as needed
808 * @state: State to request for the clock.
809 *
810 * Return: 0 if all went well, else returns appropriate error value.
811 */
812static int ti_sci_set_clock_state(const struct ti_sci_handle *handle,
813 u32 dev_id, u8 clk_id,
814 u32 flags, u8 state)
815{
816 struct ti_sci_msg_req_set_clock_state req;
817 struct ti_sci_msg_hdr *resp;
818 struct ti_sci_info *info;
819 struct ti_sci_xfer *xfer;
820 int ret = 0;
821
822 if (IS_ERR(handle))
823 return PTR_ERR(handle);
824 if (!handle)
825 return -EINVAL;
826
827 info = handle_to_ti_sci_info(handle);
828
829 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE,
830 flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
831 (u32 *)&req, sizeof(req), sizeof(*resp));
832 if (IS_ERR(xfer)) {
833 ret = PTR_ERR(xfer);
834 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
835 return ret;
836 }
837 req.dev_id = dev_id;
838 req.clk_id = clk_id;
839 req.request_state = state;
840
841 ret = ti_sci_do_xfer(info, xfer);
842 if (ret) {
843 dev_err(info->dev, "Mbox send fail %d\n", ret);
844 return ret;
845 }
846
847 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
848
849 if (!ti_sci_is_response_ack(resp))
850 return -ENODEV;
851
852 return ret;
853}
854
855/**
856 * ti_sci_cmd_get_clock_state() - Get clock state helper
857 * @handle: pointer to TI SCI handle
858 * @dev_id: Device identifier this request is for
859 * @clk_id: Clock identifier for the device for this request.
860 * Each device has it's own set of clock inputs. This indexes
861 * which clock input to modify.
862 * @programmed_state: State requested for clock to move to
863 * @current_state: State that the clock is currently in
864 *
865 * Return: 0 if all went well, else returns appropriate error value.
866 */
867static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle,
868 u32 dev_id, u8 clk_id,
869 u8 *programmed_state, u8 *current_state)
870{
871 struct ti_sci_msg_resp_get_clock_state *resp;
872 struct ti_sci_msg_req_get_clock_state req;
873 struct ti_sci_info *info;
874 struct ti_sci_xfer *xfer;
875 int ret = 0;
876
877 if (IS_ERR(handle))
878 return PTR_ERR(handle);
879 if (!handle)
880 return -EINVAL;
881
882 if (!programmed_state && !current_state)
883 return -EINVAL;
884
885 info = handle_to_ti_sci_info(handle);
886
887 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE,
888 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
889 (u32 *)&req, sizeof(req), sizeof(*resp));
890 if (IS_ERR(xfer)) {
891 ret = PTR_ERR(xfer);
892 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
893 return ret;
894 }
895 req.dev_id = dev_id;
896 req.clk_id = clk_id;
897
898 ret = ti_sci_do_xfer(info, xfer);
899 if (ret) {
900 dev_err(info->dev, "Mbox send fail %d\n", ret);
901 return ret;
902 }
903
904 resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->tx_message.buf;
905
906 if (!ti_sci_is_response_ack(resp))
907 return -ENODEV;
908
909 if (programmed_state)
910 *programmed_state = resp->programmed_state;
911 if (current_state)
912 *current_state = resp->current_state;
913
914 return ret;
915}
916
917/**
918 * ti_sci_cmd_get_clock() - Get control of a clock from TI SCI
919 * @handle: pointer to TI SCI handle
920 * @dev_id: Device identifier this request is for
921 * @clk_id: Clock identifier for the device for this request.
922 * Each device has it's own set of clock inputs. This indexes
923 * which clock input to modify.
924 * @needs_ssc: 'true' if Spread Spectrum clock is desired, else 'false'
925 * @can_change_freq: 'true' if frequency change is desired, else 'false'
926 * @enable_input_term: 'true' if input termination is desired, else 'false'
927 *
928 * Return: 0 if all went well, else returns appropriate error value.
929 */
930static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id,
931 u8 clk_id, bool needs_ssc, bool can_change_freq,
932 bool enable_input_term)
933{
934 u32 flags = 0;
935
936 flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
937 flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
938 flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
939
940 return ti_sci_set_clock_state(handle, dev_id, clk_id, flags,
941 MSG_CLOCK_SW_STATE_REQ);
942}
943
944/**
945 * ti_sci_cmd_idle_clock() - Idle a clock which is in our control
946 * @handle: pointer to TI SCI handle
947 * @dev_id: Device identifier this request is for
948 * @clk_id: Clock identifier for the device for this request.
949 * Each device has it's own set of clock inputs. This indexes
950 * which clock input to modify.
951 *
952 * NOTE: This clock must have been requested by get_clock previously.
953 *
954 * Return: 0 if all went well, else returns appropriate error value.
955 */
956static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle,
957 u32 dev_id, u8 clk_id)
958{
959 return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
960 MSG_CLOCK_SW_STATE_UNREQ);
961}
962
963/**
964 * ti_sci_cmd_put_clock() - Release a clock from our control back to TISCI
965 * @handle: pointer to TI SCI handle
966 * @dev_id: Device identifier this request is for
967 * @clk_id: Clock identifier for the device for this request.
968 * Each device has it's own set of clock inputs. This indexes
969 * which clock input to modify.
970 *
971 * NOTE: This clock must have been requested by get_clock previously.
972 *
973 * Return: 0 if all went well, else returns appropriate error value.
974 */
975static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle,
976 u32 dev_id, u8 clk_id)
977{
978 return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
979 MSG_CLOCK_SW_STATE_AUTO);
980}
981
982/**
983 * ti_sci_cmd_clk_is_auto() - Is the clock being auto managed
984 * @handle: pointer to TI SCI handle
985 * @dev_id: Device identifier this request is for
986 * @clk_id: Clock identifier for the device for this request.
987 * Each device has it's own set of clock inputs. This indexes
988 * which clock input to modify.
989 * @req_state: state indicating if the clock is auto managed
990 *
991 * Return: 0 if all went well, else returns appropriate error value.
992 */
993static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle,
994 u32 dev_id, u8 clk_id, bool *req_state)
995{
996 u8 state = 0;
997 int ret;
998
999 if (!req_state)
1000 return -EINVAL;
1001
1002 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL);
1003 if (ret)
1004 return ret;
1005
1006 *req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
1007 return 0;
1008}
1009
1010/**
1011 * ti_sci_cmd_clk_is_on() - Is the clock ON
1012 * @handle: pointer to TI SCI handle
1013 * @dev_id: Device identifier this request is for
1014 * @clk_id: Clock identifier for the device for this request.
1015 * Each device has it's own set of clock inputs. This indexes
1016 * which clock input to modify.
1017 * @req_state: state indicating if the clock is managed by us and enabled
1018 * @curr_state: state indicating if the clock is ready for operation
1019 *
1020 * Return: 0 if all went well, else returns appropriate error value.
1021 */
1022static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id,
1023 u8 clk_id, bool *req_state, bool *curr_state)
1024{
1025 u8 c_state = 0, r_state = 0;
1026 int ret;
1027
1028 if (!req_state && !curr_state)
1029 return -EINVAL;
1030
1031 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1032 &r_state, &c_state);
1033 if (ret)
1034 return ret;
1035
1036 if (req_state)
1037 *req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
1038 if (curr_state)
1039 *curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
1040 return 0;
1041}
1042
1043/**
1044 * ti_sci_cmd_clk_is_off() - Is the clock OFF
1045 * @handle: pointer to TI SCI handle
1046 * @dev_id: Device identifier this request is for
1047 * @clk_id: Clock identifier for the device for this request.
1048 * Each device has it's own set of clock inputs. This indexes
1049 * which clock input to modify.
1050 * @req_state: state indicating if the clock is managed by us and disabled
1051 * @curr_state: state indicating if the clock is NOT ready for operation
1052 *
1053 * Return: 0 if all went well, else returns appropriate error value.
1054 */
1055static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id,
1056 u8 clk_id, bool *req_state, bool *curr_state)
1057{
1058 u8 c_state = 0, r_state = 0;
1059 int ret;
1060
1061 if (!req_state && !curr_state)
1062 return -EINVAL;
1063
1064 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1065 &r_state, &c_state);
1066 if (ret)
1067 return ret;
1068
1069 if (req_state)
1070 *req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
1071 if (curr_state)
1072 *curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
1073 return 0;
1074}
1075
1076/**
1077 * ti_sci_cmd_clk_set_parent() - Set the clock source of a specific device clock
1078 * @handle: pointer to TI SCI handle
1079 * @dev_id: Device identifier this request is for
1080 * @clk_id: Clock identifier for the device for this request.
1081 * Each device has it's own set of clock inputs. This indexes
1082 * which clock input to modify.
1083 * @parent_id: Parent clock identifier to set
1084 *
1085 * Return: 0 if all went well, else returns appropriate error value.
1086 */
1087static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle,
1088 u32 dev_id, u8 clk_id, u8 parent_id)
1089{
1090 struct ti_sci_msg_req_set_clock_parent req;
1091 struct ti_sci_msg_hdr *resp;
1092 struct ti_sci_info *info;
1093 struct ti_sci_xfer *xfer;
1094 int ret = 0;
1095
1096 if (IS_ERR(handle))
1097 return PTR_ERR(handle);
1098 if (!handle)
1099 return -EINVAL;
1100
1101 info = handle_to_ti_sci_info(handle);
1102
1103 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT,
1104 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1105 (u32 *)&req, sizeof(req), sizeof(*resp));
1106 if (IS_ERR(xfer)) {
1107 ret = PTR_ERR(xfer);
1108 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1109 return ret;
1110 }
1111 req.dev_id = dev_id;
1112 req.clk_id = clk_id;
1113 req.parent_id = parent_id;
1114
1115 ret = ti_sci_do_xfer(info, xfer);
1116 if (ret) {
1117 dev_err(info->dev, "Mbox send fail %d\n", ret);
1118 return ret;
1119 }
1120
1121 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1122
1123 if (!ti_sci_is_response_ack(resp))
1124 return -ENODEV;
1125
1126 return ret;
1127}
1128
1129/**
1130 * ti_sci_cmd_clk_get_parent() - Get current parent clock source
1131 * @handle: pointer to TI SCI handle
1132 * @dev_id: Device identifier this request is for
1133 * @clk_id: Clock identifier for the device for this request.
1134 * Each device has it's own set of clock inputs. This indexes
1135 * which clock input to modify.
1136 * @parent_id: Current clock parent
1137 *
1138 * Return: 0 if all went well, else returns appropriate error value.
1139 */
1140static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle,
1141 u32 dev_id, u8 clk_id, u8 *parent_id)
1142{
1143 struct ti_sci_msg_resp_get_clock_parent *resp;
1144 struct ti_sci_msg_req_get_clock_parent req;
1145 struct ti_sci_info *info;
1146 struct ti_sci_xfer *xfer;
1147 int ret = 0;
1148
1149 if (IS_ERR(handle))
1150 return PTR_ERR(handle);
1151 if (!handle || !parent_id)
1152 return -EINVAL;
1153
1154 info = handle_to_ti_sci_info(handle);
1155
1156 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT,
1157 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1158 (u32 *)&req, sizeof(req), sizeof(*resp));
1159 if (IS_ERR(xfer)) {
1160 ret = PTR_ERR(xfer);
1161 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1162 return ret;
1163 }
1164 req.dev_id = dev_id;
1165 req.clk_id = clk_id;
1166
1167 ret = ti_sci_do_xfer(info, xfer);
1168 if (ret) {
1169 dev_err(info->dev, "Mbox send fail %d\n", ret);
1170 return ret;
1171 }
1172
1173 resp = (struct ti_sci_msg_resp_get_clock_parent *)xfer->tx_message.buf;
1174
1175 if (!ti_sci_is_response_ack(resp))
1176 ret = -ENODEV;
1177 else
1178 *parent_id = resp->parent_id;
1179
1180 return ret;
1181}
1182
1183/**
1184 * ti_sci_cmd_clk_get_num_parents() - Get num parents of the current clk source
1185 * @handle: pointer to TI SCI handle
1186 * @dev_id: Device identifier this request is for
1187 * @clk_id: Clock identifier for the device for this request.
1188 * Each device has it's own set of clock inputs. This indexes
1189 * which clock input to modify.
1190 * @num_parents: Returns he number of parents to the current clock.
1191 *
1192 * Return: 0 if all went well, else returns appropriate error value.
1193 */
1194static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle,
1195 u32 dev_id, u8 clk_id,
1196 u8 *num_parents)
1197{
1198 struct ti_sci_msg_resp_get_clock_num_parents *resp;
1199 struct ti_sci_msg_req_get_clock_num_parents req;
1200 struct ti_sci_info *info;
1201 struct ti_sci_xfer *xfer;
1202 int ret = 0;
1203
1204 if (IS_ERR(handle))
1205 return PTR_ERR(handle);
1206 if (!handle || !num_parents)
1207 return -EINVAL;
1208
1209 info = handle_to_ti_sci_info(handle);
1210
1211 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS,
1212 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1213 (u32 *)&req, sizeof(req), sizeof(*resp));
1214 if (IS_ERR(xfer)) {
1215 ret = PTR_ERR(xfer);
1216 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1217 return ret;
1218 }
1219 req.dev_id = dev_id;
1220 req.clk_id = clk_id;
1221
1222 ret = ti_sci_do_xfer(info, xfer);
1223 if (ret) {
1224 dev_err(info->dev, "Mbox send fail %d\n", ret);
1225 return ret;
1226 }
1227
1228 resp = (struct ti_sci_msg_resp_get_clock_num_parents *)
1229 xfer->tx_message.buf;
1230
1231 if (!ti_sci_is_response_ack(resp))
1232 ret = -ENODEV;
1233 else
1234 *num_parents = resp->num_parents;
1235
1236 return ret;
1237}
1238
1239/**
1240 * ti_sci_cmd_clk_get_match_freq() - Find a good match for frequency
1241 * @handle: pointer to TI SCI handle
1242 * @dev_id: Device identifier this request is for
1243 * @clk_id: Clock identifier for the device for this request.
1244 * Each device has it's own set of clock inputs. This indexes
1245 * which clock input to modify.
1246 * @min_freq: The minimum allowable frequency in Hz. This is the minimum
1247 * allowable programmed frequency and does not account for clock
1248 * tolerances and jitter.
1249 * @target_freq: The target clock frequency in Hz. A frequency will be
1250 * processed as close to this target frequency as possible.
1251 * @max_freq: The maximum allowable frequency in Hz. This is the maximum
1252 * allowable programmed frequency and does not account for clock
1253 * tolerances and jitter.
1254 * @match_freq: Frequency match in Hz response.
1255 *
1256 * Return: 0 if all went well, else returns appropriate error value.
1257 */
1258static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle,
1259 u32 dev_id, u8 clk_id, u64 min_freq,
1260 u64 target_freq, u64 max_freq,
1261 u64 *match_freq)
1262{
1263 struct ti_sci_msg_resp_query_clock_freq *resp;
1264 struct ti_sci_msg_req_query_clock_freq req;
1265 struct ti_sci_info *info;
1266 struct ti_sci_xfer *xfer;
1267 int ret = 0;
1268
1269 if (IS_ERR(handle))
1270 return PTR_ERR(handle);
1271 if (!handle || !match_freq)
1272 return -EINVAL;
1273
1274 info = handle_to_ti_sci_info(handle);
1275
1276 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ,
1277 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1278 (u32 *)&req, sizeof(req), sizeof(*resp));
1279 if (IS_ERR(xfer)) {
1280 ret = PTR_ERR(xfer);
1281 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1282 return ret;
1283 }
1284 req.dev_id = dev_id;
1285 req.clk_id = clk_id;
1286 req.min_freq_hz = min_freq;
1287 req.target_freq_hz = target_freq;
1288 req.max_freq_hz = max_freq;
1289
1290 ret = ti_sci_do_xfer(info, xfer);
1291 if (ret) {
1292 dev_err(info->dev, "Mbox send fail %d\n", ret);
1293 return ret;
1294 }
1295
1296 resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->tx_message.buf;
1297
1298 if (!ti_sci_is_response_ack(resp))
1299 ret = -ENODEV;
1300 else
1301 *match_freq = resp->freq_hz;
1302
1303 return ret;
1304}
1305
1306/**
1307 * ti_sci_cmd_clk_set_freq() - Set a frequency for clock
1308 * @handle: pointer to TI SCI handle
1309 * @dev_id: Device identifier this request is for
1310 * @clk_id: Clock identifier for the device for this request.
1311 * Each device has it's own set of clock inputs. This indexes
1312 * which clock input to modify.
1313 * @min_freq: The minimum allowable frequency in Hz. This is the minimum
1314 * allowable programmed frequency and does not account for clock
1315 * tolerances and jitter.
1316 * @target_freq: The target clock frequency in Hz. A frequency will be
1317 * processed as close to this target frequency as possible.
1318 * @max_freq: The maximum allowable frequency in Hz. This is the maximum
1319 * allowable programmed frequency and does not account for clock
1320 * tolerances and jitter.
1321 *
1322 * Return: 0 if all went well, else returns appropriate error value.
1323 */
1324static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle,
1325 u32 dev_id, u8 clk_id, u64 min_freq,
1326 u64 target_freq, u64 max_freq)
1327{
1328 struct ti_sci_msg_req_set_clock_freq req;
1329 struct ti_sci_msg_hdr *resp;
1330 struct ti_sci_info *info;
1331 struct ti_sci_xfer *xfer;
1332 int ret = 0;
1333
1334 if (IS_ERR(handle))
1335 return PTR_ERR(handle);
1336 if (!handle)
1337 return -EINVAL;
1338
1339 info = handle_to_ti_sci_info(handle);
1340
1341 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ,
1342 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1343 (u32 *)&req, sizeof(req), sizeof(*resp));
1344 if (IS_ERR(xfer)) {
1345 ret = PTR_ERR(xfer);
1346 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1347 return ret;
1348 }
1349 req.dev_id = dev_id;
1350 req.clk_id = clk_id;
1351 req.min_freq_hz = min_freq;
1352 req.target_freq_hz = target_freq;
1353 req.max_freq_hz = max_freq;
1354
1355 ret = ti_sci_do_xfer(info, xfer);
1356 if (ret) {
1357 dev_err(info->dev, "Mbox send fail %d\n", ret);
1358 return ret;
1359 }
1360
1361 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1362
1363 if (!ti_sci_is_response_ack(resp))
1364 return -ENODEV;
1365
1366 return ret;
1367}
1368
1369/**
1370 * ti_sci_cmd_clk_get_freq() - Get current frequency
1371 * @handle: pointer to TI SCI handle
1372 * @dev_id: Device identifier this request is for
1373 * @clk_id: Clock identifier for the device for this request.
1374 * Each device has it's own set of clock inputs. This indexes
1375 * which clock input to modify.
1376 * @freq: Currently frequency in Hz
1377 *
1378 * Return: 0 if all went well, else returns appropriate error value.
1379 */
1380static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle,
1381 u32 dev_id, u8 clk_id, u64 *freq)
1382{
1383 struct ti_sci_msg_resp_get_clock_freq *resp;
1384 struct ti_sci_msg_req_get_clock_freq req;
1385 struct ti_sci_info *info;
1386 struct ti_sci_xfer *xfer;
1387 int ret = 0;
1388
1389 if (IS_ERR(handle))
1390 return PTR_ERR(handle);
1391 if (!handle || !freq)
1392 return -EINVAL;
1393
1394 info = handle_to_ti_sci_info(handle);
1395
1396 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ,
1397 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1398 (u32 *)&req, sizeof(req), sizeof(*resp));
1399 if (IS_ERR(xfer)) {
1400 ret = PTR_ERR(xfer);
1401 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1402 return ret;
1403 }
1404 req.dev_id = dev_id;
1405 req.clk_id = clk_id;
1406
1407 ret = ti_sci_do_xfer(info, xfer);
1408 if (ret) {
1409 dev_err(info->dev, "Mbox send fail %d\n", ret);
1410 return ret;
1411 }
1412
1413 resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->tx_message.buf;
1414
1415 if (!ti_sci_is_response_ack(resp))
1416 ret = -ENODEV;
1417 else
1418 *freq = resp->freq_hz;
1419
1420 return ret;
1421}
1422
Andreas Dannenberg5bd08372018-08-27 15:57:36 +05301423/**
1424 * ti_sci_cmd_core_reboot() - Command to request system reset
1425 * @handle: pointer to TI SCI handle
1426 *
1427 * Return: 0 if all went well, else returns appropriate error value.
1428 */
1429static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
1430{
1431 struct ti_sci_msg_req_reboot req;
1432 struct ti_sci_msg_hdr *resp;
1433 struct ti_sci_info *info;
1434 struct ti_sci_xfer *xfer;
1435 int ret = 0;
1436
1437 if (IS_ERR(handle))
1438 return PTR_ERR(handle);
1439 if (!handle)
1440 return -EINVAL;
1441
1442 info = handle_to_ti_sci_info(handle);
1443
1444 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SYS_RESET,
1445 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1446 (u32 *)&req, sizeof(req), sizeof(*resp));
1447 if (IS_ERR(xfer)) {
1448 ret = PTR_ERR(xfer);
1449 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1450 return ret;
1451 }
1452
1453 ret = ti_sci_do_xfer(info, xfer);
1454 if (ret) {
1455 dev_err(dev, "Mbox send fail %d\n", ret);
1456 return ret;
1457 }
1458
1459 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1460
1461 if (!ti_sci_is_response_ack(resp))
1462 return -ENODEV;
1463
1464 return ret;
1465}
1466
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05301467static int ti_sci_get_resource_type(struct ti_sci_info *info, u16 dev_id,
1468 u16 *type)
1469{
1470 struct ti_sci_rm_type_map *rm_type_map = info->desc->rm_type_map;
1471 bool found = false;
1472 int i;
1473
1474 /* If map is not provided then assume dev_id is used as type */
1475 if (!rm_type_map) {
1476 *type = dev_id;
1477 return 0;
1478 }
1479
1480 for (i = 0; rm_type_map[i].dev_id; i++) {
1481 if (rm_type_map[i].dev_id == dev_id) {
1482 *type = rm_type_map[i].type;
1483 found = true;
1484 break;
1485 }
1486 }
1487
1488 if (!found)
1489 return -EINVAL;
1490
1491 return 0;
1492}
1493
1494/**
1495 * ti_sci_get_resource_range - Helper to get a range of resources assigned
1496 * to a host. Resource is uniquely identified by
1497 * type and subtype.
1498 * @handle: Pointer to TISCI handle.
1499 * @dev_id: TISCI device ID.
1500 * @subtype: Resource assignment subtype that is being requested
1501 * from the given device.
1502 * @s_host: Host processor ID to which the resources are allocated
1503 * @range_start: Start index of the resource range
1504 * @range_num: Number of resources in the range
1505 *
1506 * Return: 0 if all went fine, else return appropriate error.
1507 */
1508static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
1509 u32 dev_id, u8 subtype, u8 s_host,
1510 u16 *range_start, u16 *range_num)
1511{
1512 struct ti_sci_msg_resp_get_resource_range *resp;
1513 struct ti_sci_msg_req_get_resource_range req;
1514 struct ti_sci_xfer *xfer;
1515 struct ti_sci_info *info;
1516 u16 type;
1517 int ret = 0;
1518
1519 if (IS_ERR(handle))
1520 return PTR_ERR(handle);
1521 if (!handle)
1522 return -EINVAL;
1523
1524 info = handle_to_ti_sci_info(handle);
1525
1526 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_RESOURCE_RANGE,
1527 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1528 (u32 *)&req, sizeof(req), sizeof(*resp));
1529 if (IS_ERR(xfer)) {
1530 ret = PTR_ERR(xfer);
1531 dev_err(dev, "Message alloc failed(%d)\n", ret);
1532 return ret;
1533 }
1534
1535 ret = ti_sci_get_resource_type(info, dev_id, &type);
1536 if (ret) {
1537 dev_err(dev, "rm type lookup failed for %u\n", dev_id);
1538 goto fail;
1539 }
1540
1541 req.secondary_host = s_host;
1542 req.type = type & MSG_RM_RESOURCE_TYPE_MASK;
1543 req.subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK;
1544
1545 ret = ti_sci_do_xfer(info, xfer);
1546 if (ret) {
1547 dev_err(dev, "Mbox send fail %d\n", ret);
1548 goto fail;
1549 }
1550
1551 resp = (struct ti_sci_msg_resp_get_resource_range *)xfer->tx_message.buf;
1552 if (!ti_sci_is_response_ack(resp)) {
1553 ret = -ENODEV;
1554 } else if (!resp->range_start && !resp->range_num) {
1555 ret = -ENODEV;
1556 } else {
1557 *range_start = resp->range_start;
1558 *range_num = resp->range_num;
1559 };
1560
1561fail:
1562 return ret;
1563}
1564
1565/**
1566 * ti_sci_cmd_get_resource_range - Get a range of resources assigned to host
1567 * that is same as ti sci interface host.
1568 * @handle: Pointer to TISCI handle.
1569 * @dev_id: TISCI device ID.
1570 * @subtype: Resource assignment subtype that is being requested
1571 * from the given device.
1572 * @range_start: Start index of the resource range
1573 * @range_num: Number of resources in the range
1574 *
1575 * Return: 0 if all went fine, else return appropriate error.
1576 */
1577static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
1578 u32 dev_id, u8 subtype,
1579 u16 *range_start, u16 *range_num)
1580{
1581 return ti_sci_get_resource_range(handle, dev_id, subtype,
1582 TI_SCI_IRQ_SECONDARY_HOST_INVALID,
1583 range_start, range_num);
1584}
1585
1586/**
1587 * ti_sci_cmd_get_resource_range_from_shost - Get a range of resources
1588 * assigned to a specified host.
1589 * @handle: Pointer to TISCI handle.
1590 * @dev_id: TISCI device ID.
1591 * @subtype: Resource assignment subtype that is being requested
1592 * from the given device.
1593 * @s_host: Host processor ID to which the resources are allocated
1594 * @range_start: Start index of the resource range
1595 * @range_num: Number of resources in the range
1596 *
1597 * Return: 0 if all went fine, else return appropriate error.
1598 */
1599static
1600int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle,
1601 u32 dev_id, u8 subtype, u8 s_host,
1602 u16 *range_start, u16 *range_num)
1603{
1604 return ti_sci_get_resource_range(handle, dev_id, subtype, s_host,
1605 range_start, range_num);
1606}
1607
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301608/**
Lokesh Vutla032dce82019-03-08 11:47:32 +05301609 * ti_sci_cmd_query_msmc() - Command to query currently available msmc memory
1610 * @handle: pointer to TI SCI handle
1611 * @msms_start: MSMC start as returned by tisci
1612 * @msmc_end: MSMC end as returned by tisci
1613 *
1614 * Return: 0 if all went well, else returns appropriate error value.
1615 */
1616static int ti_sci_cmd_query_msmc(const struct ti_sci_handle *handle,
1617 u64 *msmc_start, u64 *msmc_end)
1618{
1619 struct ti_sci_msg_resp_query_msmc *resp;
1620 struct ti_sci_msg_hdr req;
1621 struct ti_sci_info *info;
1622 struct ti_sci_xfer *xfer;
1623 int ret = 0;
1624
1625 if (IS_ERR(handle))
1626 return PTR_ERR(handle);
1627 if (!handle)
1628 return -EINVAL;
1629
1630 info = handle_to_ti_sci_info(handle);
1631
1632 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_QUERY_MSMC,
1633 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1634 (u32 *)&req, sizeof(req), sizeof(*resp));
1635 if (IS_ERR(xfer)) {
1636 ret = PTR_ERR(xfer);
1637 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1638 return ret;
1639 }
1640
1641 ret = ti_sci_do_xfer(info, xfer);
1642 if (ret) {
1643 dev_err(dev, "Mbox send fail %d\n", ret);
1644 return ret;
1645 }
1646
1647 resp = (struct ti_sci_msg_resp_query_msmc *)xfer->tx_message.buf;
1648
1649 if (!ti_sci_is_response_ack(resp))
1650 return -ENODEV;
1651
1652 *msmc_start = ((u64)resp->msmc_start_high << TISCI_ADDR_HIGH_SHIFT) |
1653 resp->msmc_start_low;
1654 *msmc_end = ((u64)resp->msmc_end_high << TISCI_ADDR_HIGH_SHIFT) |
1655 resp->msmc_end_low;
1656
1657 return ret;
1658}
1659
1660/**
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301661 * ti_sci_cmd_proc_request() - Command to request a physical processor control
1662 * @handle: Pointer to TI SCI handle
1663 * @proc_id: Processor ID this request is for
1664 *
1665 * Return: 0 if all went well, else returns appropriate error value.
1666 */
1667static int ti_sci_cmd_proc_request(const struct ti_sci_handle *handle,
1668 u8 proc_id)
1669{
1670 struct ti_sci_msg_req_proc_request req;
1671 struct ti_sci_msg_hdr *resp;
1672 struct ti_sci_info *info;
1673 struct ti_sci_xfer *xfer;
1674 int ret = 0;
1675
1676 if (IS_ERR(handle))
1677 return PTR_ERR(handle);
1678 if (!handle)
1679 return -EINVAL;
1680
1681 info = handle_to_ti_sci_info(handle);
1682
1683 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_REQUEST,
1684 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1685 (u32 *)&req, sizeof(req), sizeof(*resp));
1686 if (IS_ERR(xfer)) {
1687 ret = PTR_ERR(xfer);
1688 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1689 return ret;
1690 }
1691 req.processor_id = proc_id;
1692
1693 ret = ti_sci_do_xfer(info, xfer);
1694 if (ret) {
1695 dev_err(info->dev, "Mbox send fail %d\n", ret);
1696 return ret;
1697 }
1698
1699 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1700
1701 if (!ti_sci_is_response_ack(resp))
1702 ret = -ENODEV;
1703
1704 return ret;
1705}
1706
1707/**
1708 * ti_sci_cmd_proc_release() - Command to release a physical processor control
1709 * @handle: Pointer to TI SCI handle
1710 * @proc_id: Processor ID this request is for
1711 *
1712 * Return: 0 if all went well, else returns appropriate error value.
1713 */
1714static int ti_sci_cmd_proc_release(const struct ti_sci_handle *handle,
1715 u8 proc_id)
1716{
1717 struct ti_sci_msg_req_proc_release req;
1718 struct ti_sci_msg_hdr *resp;
1719 struct ti_sci_info *info;
1720 struct ti_sci_xfer *xfer;
1721 int ret = 0;
1722
1723 if (IS_ERR(handle))
1724 return PTR_ERR(handle);
1725 if (!handle)
1726 return -EINVAL;
1727
1728 info = handle_to_ti_sci_info(handle);
1729
1730 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_RELEASE,
1731 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1732 (u32 *)&req, sizeof(req), sizeof(*resp));
1733 if (IS_ERR(xfer)) {
1734 ret = PTR_ERR(xfer);
1735 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1736 return ret;
1737 }
1738 req.processor_id = proc_id;
1739
1740 ret = ti_sci_do_xfer(info, xfer);
1741 if (ret) {
1742 dev_err(info->dev, "Mbox send fail %d\n", ret);
1743 return ret;
1744 }
1745
1746 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1747
1748 if (!ti_sci_is_response_ack(resp))
1749 ret = -ENODEV;
1750
1751 return ret;
1752}
1753
1754/**
1755 * ti_sci_cmd_proc_handover() - Command to handover a physical processor
1756 * control to a host in the processor's access
1757 * control list.
1758 * @handle: Pointer to TI SCI handle
1759 * @proc_id: Processor ID this request is for
1760 * @host_id: Host ID to get the control of the processor
1761 *
1762 * Return: 0 if all went well, else returns appropriate error value.
1763 */
1764static int ti_sci_cmd_proc_handover(const struct ti_sci_handle *handle,
1765 u8 proc_id, u8 host_id)
1766{
1767 struct ti_sci_msg_req_proc_handover req;
1768 struct ti_sci_msg_hdr *resp;
1769 struct ti_sci_info *info;
1770 struct ti_sci_xfer *xfer;
1771 int ret = 0;
1772
1773 if (IS_ERR(handle))
1774 return PTR_ERR(handle);
1775 if (!handle)
1776 return -EINVAL;
1777
1778 info = handle_to_ti_sci_info(handle);
1779
1780 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_HANDOVER,
1781 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1782 (u32 *)&req, sizeof(req), sizeof(*resp));
1783 if (IS_ERR(xfer)) {
1784 ret = PTR_ERR(xfer);
1785 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1786 return ret;
1787 }
1788 req.processor_id = proc_id;
1789 req.host_id = host_id;
1790
1791 ret = ti_sci_do_xfer(info, xfer);
1792 if (ret) {
1793 dev_err(info->dev, "Mbox send fail %d\n", ret);
1794 return ret;
1795 }
1796
1797 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1798
1799 if (!ti_sci_is_response_ack(resp))
1800 ret = -ENODEV;
1801
1802 return ret;
1803}
1804
1805/**
1806 * ti_sci_cmd_set_proc_boot_cfg() - Command to set the processor boot
1807 * configuration flags
1808 * @handle: Pointer to TI SCI handle
1809 * @proc_id: Processor ID this request is for
1810 * @config_flags_set: Configuration flags to be set
1811 * @config_flags_clear: Configuration flags to be cleared.
1812 *
1813 * Return: 0 if all went well, else returns appropriate error value.
1814 */
1815static int ti_sci_cmd_set_proc_boot_cfg(const struct ti_sci_handle *handle,
1816 u8 proc_id, u64 bootvector,
1817 u32 config_flags_set,
1818 u32 config_flags_clear)
1819{
1820 struct ti_sci_msg_req_set_proc_boot_config req;
1821 struct ti_sci_msg_hdr *resp;
1822 struct ti_sci_info *info;
1823 struct ti_sci_xfer *xfer;
1824 int ret = 0;
1825
1826 if (IS_ERR(handle))
1827 return PTR_ERR(handle);
1828 if (!handle)
1829 return -EINVAL;
1830
1831 info = handle_to_ti_sci_info(handle);
1832
1833 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_SET_PROC_BOOT_CONFIG,
1834 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1835 (u32 *)&req, sizeof(req), sizeof(*resp));
1836 if (IS_ERR(xfer)) {
1837 ret = PTR_ERR(xfer);
1838 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1839 return ret;
1840 }
1841 req.processor_id = proc_id;
1842 req.bootvector_low = bootvector & TISCI_ADDR_LOW_MASK;
1843 req.bootvector_high = (bootvector & TISCI_ADDR_HIGH_MASK) >>
1844 TISCI_ADDR_HIGH_SHIFT;
1845 req.config_flags_set = config_flags_set;
1846 req.config_flags_clear = config_flags_clear;
1847
1848 ret = ti_sci_do_xfer(info, xfer);
1849 if (ret) {
1850 dev_err(info->dev, "Mbox send fail %d\n", ret);
1851 return ret;
1852 }
1853
1854 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1855
1856 if (!ti_sci_is_response_ack(resp))
1857 ret = -ENODEV;
1858
1859 return ret;
1860}
1861
1862/**
1863 * ti_sci_cmd_set_proc_boot_ctrl() - Command to set the processor boot
1864 * control flags
1865 * @handle: Pointer to TI SCI handle
1866 * @proc_id: Processor ID this request is for
1867 * @control_flags_set: Control flags to be set
1868 * @control_flags_clear: Control flags to be cleared
1869 *
1870 * Return: 0 if all went well, else returns appropriate error value.
1871 */
1872static int ti_sci_cmd_set_proc_boot_ctrl(const struct ti_sci_handle *handle,
1873 u8 proc_id, u32 control_flags_set,
1874 u32 control_flags_clear)
1875{
1876 struct ti_sci_msg_req_set_proc_boot_ctrl req;
1877 struct ti_sci_msg_hdr *resp;
1878 struct ti_sci_info *info;
1879 struct ti_sci_xfer *xfer;
1880 int ret = 0;
1881
1882 if (IS_ERR(handle))
1883 return PTR_ERR(handle);
1884 if (!handle)
1885 return -EINVAL;
1886
1887 info = handle_to_ti_sci_info(handle);
1888
1889 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_SET_PROC_BOOT_CTRL,
1890 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1891 (u32 *)&req, sizeof(req), sizeof(*resp));
1892 if (IS_ERR(xfer)) {
1893 ret = PTR_ERR(xfer);
1894 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1895 return ret;
1896 }
1897 req.processor_id = proc_id;
1898 req.control_flags_set = control_flags_set;
1899 req.control_flags_clear = control_flags_clear;
1900
1901 ret = ti_sci_do_xfer(info, xfer);
1902 if (ret) {
1903 dev_err(info->dev, "Mbox send fail %d\n", ret);
1904 return ret;
1905 }
1906
1907 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
1908
1909 if (!ti_sci_is_response_ack(resp))
1910 ret = -ENODEV;
1911
1912 return ret;
1913}
1914
1915/**
1916 * ti_sci_cmd_proc_auth_boot_image() - Command to authenticate and load the
1917 * image and then set the processor configuration flags.
1918 * @handle: Pointer to TI SCI handle
Andrew F. Davis7aa9a082019-04-12 12:54:44 -04001919 * @image_addr: Memory address at which payload image and certificate is
1920 * located in memory, this is updated if the image data is
1921 * moved during authentication.
1922 * @image_size: This is updated with the final size of the image after
1923 * authentication.
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301924 *
1925 * Return: 0 if all went well, else returns appropriate error value.
1926 */
1927static int ti_sci_cmd_proc_auth_boot_image(const struct ti_sci_handle *handle,
Andrew F. Davis7aa9a082019-04-12 12:54:44 -04001928 u64 *image_addr, u32 *image_size)
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301929{
1930 struct ti_sci_msg_req_proc_auth_boot_image req;
Andrew F. Davis7aa9a082019-04-12 12:54:44 -04001931 struct ti_sci_msg_resp_proc_auth_boot_image *resp;
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301932 struct ti_sci_info *info;
1933 struct ti_sci_xfer *xfer;
1934 int ret = 0;
1935
1936 if (IS_ERR(handle))
1937 return PTR_ERR(handle);
1938 if (!handle)
1939 return -EINVAL;
1940
1941 info = handle_to_ti_sci_info(handle);
1942
1943 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_AUTH_BOOT_IMIAGE,
1944 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1945 (u32 *)&req, sizeof(req), sizeof(*resp));
1946 if (IS_ERR(xfer)) {
1947 ret = PTR_ERR(xfer);
1948 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
1949 return ret;
1950 }
Andrew F. Davis7aa9a082019-04-12 12:54:44 -04001951 req.cert_addr_low = *image_addr & TISCI_ADDR_LOW_MASK;
1952 req.cert_addr_high = (*image_addr & TISCI_ADDR_HIGH_MASK) >>
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301953 TISCI_ADDR_HIGH_SHIFT;
1954
1955 ret = ti_sci_do_xfer(info, xfer);
1956 if (ret) {
1957 dev_err(info->dev, "Mbox send fail %d\n", ret);
1958 return ret;
1959 }
1960
Andrew F. Davis7aa9a082019-04-12 12:54:44 -04001961 resp = (struct ti_sci_msg_resp_proc_auth_boot_image *)xfer->tx_message.buf;
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301962
1963 if (!ti_sci_is_response_ack(resp))
Andrew F. Davis7aa9a082019-04-12 12:54:44 -04001964 return -ENODEV;
1965
1966 *image_addr = (resp->image_addr_low & TISCI_ADDR_LOW_MASK) |
1967 (((u64)resp->image_addr_high <<
1968 TISCI_ADDR_HIGH_SHIFT) & TISCI_ADDR_HIGH_MASK);
1969 *image_size = resp->image_size;
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301970
1971 return ret;
1972}
1973
1974/**
1975 * ti_sci_cmd_get_proc_boot_status() - Command to get the processor boot status
1976 * @handle: Pointer to TI SCI handle
1977 * @proc_id: Processor ID this request is for
1978 *
1979 * Return: 0 if all went well, else returns appropriate error value.
1980 */
1981static int ti_sci_cmd_get_proc_boot_status(const struct ti_sci_handle *handle,
1982 u8 proc_id, u64 *bv, u32 *cfg_flags,
1983 u32 *ctrl_flags, u32 *sts_flags)
1984{
1985 struct ti_sci_msg_resp_get_proc_boot_status *resp;
1986 struct ti_sci_msg_req_get_proc_boot_status req;
1987 struct ti_sci_info *info;
1988 struct ti_sci_xfer *xfer;
1989 int ret = 0;
1990
1991 if (IS_ERR(handle))
1992 return PTR_ERR(handle);
1993 if (!handle)
1994 return -EINVAL;
1995
1996 info = handle_to_ti_sci_info(handle);
1997
1998 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_GET_PROC_BOOT_STATUS,
1999 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2000 (u32 *)&req, sizeof(req), sizeof(*resp));
2001 if (IS_ERR(xfer)) {
2002 ret = PTR_ERR(xfer);
2003 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
2004 return ret;
2005 }
2006 req.processor_id = proc_id;
2007
2008 ret = ti_sci_do_xfer(info, xfer);
2009 if (ret) {
2010 dev_err(info->dev, "Mbox send fail %d\n", ret);
2011 return ret;
2012 }
2013
2014 resp = (struct ti_sci_msg_resp_get_proc_boot_status *)
2015 xfer->tx_message.buf;
2016
2017 if (!ti_sci_is_response_ack(resp))
2018 return -ENODEV;
2019 *bv = (resp->bootvector_low & TISCI_ADDR_LOW_MASK) |
2020 (((u64)resp->bootvector_high <<
2021 TISCI_ADDR_HIGH_SHIFT) & TISCI_ADDR_HIGH_MASK);
2022 *cfg_flags = resp->config_flags;
2023 *ctrl_flags = resp->control_flags;
2024 *sts_flags = resp->status_flags;
2025
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302026 return ret;
2027}
2028
2029/**
2030 * ti_sci_cmd_ring_config() - configure RA ring
2031 * @handle: pointer to TI SCI handle
2032 * @valid_params: Bitfield defining validity of ring configuration parameters.
2033 * @nav_id: Device ID of Navigator Subsystem from which the ring is allocated
2034 * @index: Ring index.
2035 * @addr_lo: The ring base address lo 32 bits
2036 * @addr_hi: The ring base address hi 32 bits
2037 * @count: Number of ring elements.
2038 * @mode: The mode of the ring
2039 * @size: The ring element size.
2040 * @order_id: Specifies the ring's bus order ID.
2041 *
2042 * Return: 0 if all went well, else returns appropriate error value.
2043 *
2044 * See @ti_sci_msg_rm_ring_cfg_req for more info.
2045 */
2046static int ti_sci_cmd_ring_config(const struct ti_sci_handle *handle,
2047 u32 valid_params, u16 nav_id, u16 index,
2048 u32 addr_lo, u32 addr_hi, u32 count,
2049 u8 mode, u8 size, u8 order_id)
2050{
2051 struct ti_sci_msg_rm_ring_cfg_resp *resp;
2052 struct ti_sci_msg_rm_ring_cfg_req req;
2053 struct ti_sci_xfer *xfer;
2054 struct ti_sci_info *info;
2055 int ret = 0;
2056
2057 if (IS_ERR(handle))
2058 return PTR_ERR(handle);
2059 if (!handle)
2060 return -EINVAL;
2061
2062 info = handle_to_ti_sci_info(handle);
2063
2064 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_RING_CFG,
2065 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2066 (u32 *)&req, sizeof(req), sizeof(*resp));
2067 if (IS_ERR(xfer)) {
2068 ret = PTR_ERR(xfer);
2069 dev_err(info->dev, "RM_RA:Message config failed(%d)\n", ret);
2070 return ret;
2071 }
2072 req.valid_params = valid_params;
2073 req.nav_id = nav_id;
2074 req.index = index;
2075 req.addr_lo = addr_lo;
2076 req.addr_hi = addr_hi;
2077 req.count = count;
2078 req.mode = mode;
2079 req.size = size;
2080 req.order_id = order_id;
2081
2082 ret = ti_sci_do_xfer(info, xfer);
2083 if (ret) {
2084 dev_err(info->dev, "RM_RA:Mbox config send fail %d\n", ret);
2085 goto fail;
2086 }
2087
2088 resp = (struct ti_sci_msg_rm_ring_cfg_resp *)xfer->tx_message.buf;
2089
2090 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2091
2092fail:
2093 dev_dbg(info->dev, "RM_RA:config ring %u ret:%d\n", index, ret);
2094 return ret;
2095}
2096
2097/**
2098 * ti_sci_cmd_ring_get_config() - get RA ring configuration
2099 * @handle: pointer to TI SCI handle
2100 * @nav_id: Device ID of Navigator Subsystem from which the ring is allocated
2101 * @index: Ring index.
2102 * @addr_lo: returns ring's base address lo 32 bits
2103 * @addr_hi: returns ring's base address hi 32 bits
2104 * @count: returns number of ring elements.
2105 * @mode: returns mode of the ring
2106 * @size: returns ring element size.
2107 * @order_id: returns ring's bus order ID.
2108 *
2109 * Return: 0 if all went well, else returns appropriate error value.
2110 *
2111 * See @ti_sci_msg_rm_ring_get_cfg_req for more info.
2112 */
2113static int ti_sci_cmd_ring_get_config(const struct ti_sci_handle *handle,
2114 u32 nav_id, u32 index, u8 *mode,
2115 u32 *addr_lo, u32 *addr_hi,
2116 u32 *count, u8 *size, u8 *order_id)
2117{
2118 struct ti_sci_msg_rm_ring_get_cfg_resp *resp;
2119 struct ti_sci_msg_rm_ring_get_cfg_req req;
2120 struct ti_sci_xfer *xfer;
2121 struct ti_sci_info *info;
2122 int ret = 0;
2123
2124 if (IS_ERR(handle))
2125 return PTR_ERR(handle);
2126 if (!handle)
2127 return -EINVAL;
2128
2129 info = handle_to_ti_sci_info(handle);
2130
2131 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_RING_GET_CFG,
2132 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2133 (u32 *)&req, sizeof(req), sizeof(*resp));
2134 if (IS_ERR(xfer)) {
2135 ret = PTR_ERR(xfer);
2136 dev_err(info->dev,
2137 "RM_RA:Message get config failed(%d)\n", ret);
2138 return ret;
2139 }
2140 req.nav_id = nav_id;
2141 req.index = index;
2142
2143 ret = ti_sci_do_xfer(info, xfer);
2144 if (ret) {
2145 dev_err(info->dev, "RM_RA:Mbox get config send fail %d\n", ret);
2146 goto fail;
2147 }
2148
2149 resp = (struct ti_sci_msg_rm_ring_get_cfg_resp *)xfer->tx_message.buf;
2150
2151 if (!ti_sci_is_response_ack(resp)) {
2152 ret = -ENODEV;
2153 } else {
2154 if (mode)
2155 *mode = resp->mode;
2156 if (addr_lo)
2157 *addr_lo = resp->addr_lo;
2158 if (addr_hi)
2159 *addr_hi = resp->addr_hi;
2160 if (count)
2161 *count = resp->count;
2162 if (size)
2163 *size = resp->size;
2164 if (order_id)
2165 *order_id = resp->order_id;
2166 };
2167
2168fail:
2169 dev_dbg(info->dev, "RM_RA:get config ring %u ret:%d\n", index, ret);
2170 return ret;
2171}
2172
2173static int ti_sci_cmd_rm_psil_pair(const struct ti_sci_handle *handle,
2174 u32 nav_id, u32 src_thread, u32 dst_thread)
2175{
2176 struct ti_sci_msg_hdr *resp;
2177 struct ti_sci_msg_psil_pair req;
2178 struct ti_sci_xfer *xfer;
2179 struct ti_sci_info *info;
2180 int ret = 0;
2181
2182 if (IS_ERR(handle))
2183 return PTR_ERR(handle);
2184 if (!handle)
2185 return -EINVAL;
2186
2187 info = handle_to_ti_sci_info(handle);
2188
2189 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_PSIL_PAIR,
2190 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2191 (u32 *)&req, sizeof(req), sizeof(*resp));
2192 if (IS_ERR(xfer)) {
2193 ret = PTR_ERR(xfer);
2194 dev_err(info->dev, "RM_PSIL:Message alloc failed(%d)\n", ret);
2195 return ret;
2196 }
2197 req.nav_id = nav_id;
2198 req.src_thread = src_thread;
2199 req.dst_thread = dst_thread;
2200
2201 ret = ti_sci_do_xfer(info, xfer);
2202 if (ret) {
2203 dev_err(info->dev, "RM_PSIL:Mbox send fail %d\n", ret);
2204 goto fail;
2205 }
2206
2207 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2208 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2209
2210fail:
2211 dev_dbg(info->dev, "RM_PSIL: nav: %u link pair %u->%u ret:%u\n",
2212 nav_id, src_thread, dst_thread, ret);
2213 return ret;
2214}
2215
2216static int ti_sci_cmd_rm_psil_unpair(const struct ti_sci_handle *handle,
2217 u32 nav_id, u32 src_thread, u32 dst_thread)
2218{
2219 struct ti_sci_msg_hdr *resp;
2220 struct ti_sci_msg_psil_unpair req;
2221 struct ti_sci_xfer *xfer;
2222 struct ti_sci_info *info;
2223 int ret = 0;
2224
2225 if (IS_ERR(handle))
2226 return PTR_ERR(handle);
2227 if (!handle)
2228 return -EINVAL;
2229
2230 info = handle_to_ti_sci_info(handle);
2231
2232 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_PSIL_UNPAIR,
2233 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2234 (u32 *)&req, sizeof(req), sizeof(*resp));
2235 if (IS_ERR(xfer)) {
2236 ret = PTR_ERR(xfer);
2237 dev_err(info->dev, "RM_PSIL:Message alloc failed(%d)\n", ret);
2238 return ret;
2239 }
2240 req.nav_id = nav_id;
2241 req.src_thread = src_thread;
2242 req.dst_thread = dst_thread;
2243
2244 ret = ti_sci_do_xfer(info, xfer);
2245 if (ret) {
2246 dev_err(info->dev, "RM_PSIL:Mbox send fail %d\n", ret);
2247 goto fail;
2248 }
2249
2250 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2251 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2252
2253fail:
2254 dev_dbg(info->dev, "RM_PSIL: link unpair %u->%u ret:%u\n",
2255 src_thread, dst_thread, ret);
2256 return ret;
2257}
2258
2259static int ti_sci_cmd_rm_udmap_tx_ch_cfg(
2260 const struct ti_sci_handle *handle,
2261 const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params)
2262{
2263 struct ti_sci_msg_rm_udmap_tx_ch_cfg_resp *resp;
2264 struct ti_sci_msg_rm_udmap_tx_ch_cfg_req req;
2265 struct ti_sci_xfer *xfer;
2266 struct ti_sci_info *info;
2267 int ret = 0;
2268
2269 if (IS_ERR(handle))
2270 return PTR_ERR(handle);
2271 if (!handle)
2272 return -EINVAL;
2273
2274 info = handle_to_ti_sci_info(handle);
2275
2276 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_TX_CH_CFG,
2277 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2278 (u32 *)&req, sizeof(req), sizeof(*resp));
2279 if (IS_ERR(xfer)) {
2280 ret = PTR_ERR(xfer);
2281 dev_err(info->dev, "Message TX_CH_CFG alloc failed(%d)\n", ret);
2282 return ret;
2283 }
2284 req.valid_params = params->valid_params;
2285 req.nav_id = params->nav_id;
2286 req.index = params->index;
2287 req.tx_pause_on_err = params->tx_pause_on_err;
2288 req.tx_filt_einfo = params->tx_filt_einfo;
2289 req.tx_filt_pswords = params->tx_filt_pswords;
2290 req.tx_atype = params->tx_atype;
2291 req.tx_chan_type = params->tx_chan_type;
2292 req.tx_supr_tdpkt = params->tx_supr_tdpkt;
2293 req.tx_fetch_size = params->tx_fetch_size;
2294 req.tx_credit_count = params->tx_credit_count;
2295 req.txcq_qnum = params->txcq_qnum;
2296 req.tx_priority = params->tx_priority;
2297 req.tx_qos = params->tx_qos;
2298 req.tx_orderid = params->tx_orderid;
2299 req.fdepth = params->fdepth;
2300 req.tx_sched_priority = params->tx_sched_priority;
2301
2302 ret = ti_sci_do_xfer(info, xfer);
2303 if (ret) {
2304 dev_err(info->dev, "Mbox send TX_CH_CFG fail %d\n", ret);
2305 goto fail;
2306 }
2307
2308 resp =
2309 (struct ti_sci_msg_rm_udmap_tx_ch_cfg_resp *)xfer->tx_message.buf;
2310 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2311
2312fail:
2313 dev_dbg(info->dev, "TX_CH_CFG: chn %u ret:%u\n", params->index, ret);
2314 return ret;
2315}
2316
2317static int ti_sci_cmd_rm_udmap_rx_ch_cfg(
2318 const struct ti_sci_handle *handle,
2319 const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params)
2320{
2321 struct ti_sci_msg_rm_udmap_rx_ch_cfg_resp *resp;
2322 struct ti_sci_msg_rm_udmap_rx_ch_cfg_req req;
2323 struct ti_sci_xfer *xfer;
2324 struct ti_sci_info *info;
2325 int ret = 0;
2326
2327 if (IS_ERR(handle))
2328 return PTR_ERR(handle);
2329 if (!handle)
2330 return -EINVAL;
2331
2332 info = handle_to_ti_sci_info(handle);
2333
2334 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_RX_CH_CFG,
2335 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2336 (u32 *)&req, sizeof(req), sizeof(*resp));
2337 if (IS_ERR(xfer)) {
2338 ret = PTR_ERR(xfer);
2339 dev_err(info->dev, "Message RX_CH_CFG alloc failed(%d)\n", ret);
2340 return ret;
2341 }
2342
2343 req.valid_params = params->valid_params;
2344 req.nav_id = params->nav_id;
2345 req.index = params->index;
2346 req.rx_fetch_size = params->rx_fetch_size;
2347 req.rxcq_qnum = params->rxcq_qnum;
2348 req.rx_priority = params->rx_priority;
2349 req.rx_qos = params->rx_qos;
2350 req.rx_orderid = params->rx_orderid;
2351 req.rx_sched_priority = params->rx_sched_priority;
2352 req.flowid_start = params->flowid_start;
2353 req.flowid_cnt = params->flowid_cnt;
2354 req.rx_pause_on_err = params->rx_pause_on_err;
2355 req.rx_atype = params->rx_atype;
2356 req.rx_chan_type = params->rx_chan_type;
2357 req.rx_ignore_short = params->rx_ignore_short;
2358 req.rx_ignore_long = params->rx_ignore_long;
2359
2360 ret = ti_sci_do_xfer(info, xfer);
2361 if (ret) {
2362 dev_err(info->dev, "Mbox send RX_CH_CFG fail %d\n", ret);
2363 goto fail;
2364 }
2365
2366 resp =
2367 (struct ti_sci_msg_rm_udmap_rx_ch_cfg_resp *)xfer->tx_message.buf;
2368 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2369
2370fail:
2371 dev_dbg(info->dev, "RX_CH_CFG: chn %u ret:%d\n", params->index, ret);
2372 return ret;
2373}
2374
2375static int ti_sci_cmd_rm_udmap_rx_flow_cfg(
2376 const struct ti_sci_handle *handle,
2377 const struct ti_sci_msg_rm_udmap_flow_cfg *params)
2378{
2379 struct ti_sci_msg_rm_udmap_flow_cfg_resp *resp;
2380 struct ti_sci_msg_rm_udmap_flow_cfg_req req;
2381 struct ti_sci_xfer *xfer;
2382 struct ti_sci_info *info;
2383 int ret = 0;
2384
2385 if (IS_ERR(handle))
2386 return PTR_ERR(handle);
2387 if (!handle)
2388 return -EINVAL;
2389
2390 info = handle_to_ti_sci_info(handle);
2391
2392 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_FLOW_CFG,
2393 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2394 (u32 *)&req, sizeof(req), sizeof(*resp));
2395 if (IS_ERR(xfer)) {
2396 ret = PTR_ERR(xfer);
2397 dev_err(dev, "RX_FL_CFG: Message alloc failed(%d)\n", ret);
2398 return ret;
2399 }
2400
2401 req.valid_params = params->valid_params;
2402 req.nav_id = params->nav_id;
2403 req.flow_index = params->flow_index;
2404 req.rx_einfo_present = params->rx_einfo_present;
2405 req.rx_psinfo_present = params->rx_psinfo_present;
2406 req.rx_error_handling = params->rx_error_handling;
2407 req.rx_desc_type = params->rx_desc_type;
2408 req.rx_sop_offset = params->rx_sop_offset;
2409 req.rx_dest_qnum = params->rx_dest_qnum;
2410 req.rx_src_tag_hi = params->rx_src_tag_hi;
2411 req.rx_src_tag_lo = params->rx_src_tag_lo;
2412 req.rx_dest_tag_hi = params->rx_dest_tag_hi;
2413 req.rx_dest_tag_lo = params->rx_dest_tag_lo;
2414 req.rx_src_tag_hi_sel = params->rx_src_tag_hi_sel;
2415 req.rx_src_tag_lo_sel = params->rx_src_tag_lo_sel;
2416 req.rx_dest_tag_hi_sel = params->rx_dest_tag_hi_sel;
2417 req.rx_dest_tag_lo_sel = params->rx_dest_tag_lo_sel;
2418 req.rx_fdq0_sz0_qnum = params->rx_fdq0_sz0_qnum;
2419 req.rx_fdq1_qnum = params->rx_fdq1_qnum;
2420 req.rx_fdq2_qnum = params->rx_fdq2_qnum;
2421 req.rx_fdq3_qnum = params->rx_fdq3_qnum;
2422 req.rx_ps_location = params->rx_ps_location;
2423
2424 ret = ti_sci_do_xfer(info, xfer);
2425 if (ret) {
2426 dev_err(dev, "RX_FL_CFG: Mbox send fail %d\n", ret);
2427 goto fail;
2428 }
2429
2430 resp =
2431 (struct ti_sci_msg_rm_udmap_flow_cfg_resp *)xfer->tx_message.buf;
2432 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2433
2434fail:
2435 dev_dbg(info->dev, "RX_FL_CFG: %u ret:%d\n", params->flow_index, ret);
Lokesh Vutlab8856af2018-08-27 15:57:37 +05302436 return ret;
2437}
2438
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002439/**
2440 * ti_sci_cmd_set_fwl_region() - Request for configuring a firewall region
2441 * @handle: pointer to TI SCI handle
2442 * @region: region configuration parameters
2443 *
2444 * Return: 0 if all went well, else returns appropriate error value.
2445 */
2446static int ti_sci_cmd_set_fwl_region(const struct ti_sci_handle *handle,
2447 const struct ti_sci_msg_fwl_region *region)
2448{
2449 struct ti_sci_msg_fwl_set_firewall_region_req req;
2450 struct ti_sci_msg_hdr *resp;
2451 struct ti_sci_info *info;
2452 struct ti_sci_xfer *xfer;
2453 int ret = 0;
2454
2455 if (IS_ERR(handle))
2456 return PTR_ERR(handle);
2457 if (!handle)
2458 return -EINVAL;
2459
2460 info = handle_to_ti_sci_info(handle);
2461
2462 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_FWL_SET,
2463 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2464 (u32 *)&req, sizeof(req), sizeof(*resp));
2465 if (IS_ERR(xfer)) {
2466 ret = PTR_ERR(xfer);
2467 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
2468 return ret;
2469 }
2470
2471 req.fwl_id = region->fwl_id;
2472 req.region = region->region;
2473 req.n_permission_regs = region->n_permission_regs;
2474 req.control = region->control;
2475 req.permissions[0] = region->permissions[0];
2476 req.permissions[1] = region->permissions[1];
2477 req.permissions[2] = region->permissions[2];
2478 req.start_address = region->start_address;
2479 req.end_address = region->end_address;
2480
2481 ret = ti_sci_do_xfer(info, xfer);
2482 if (ret) {
2483 dev_err(info->dev, "Mbox send fail %d\n", ret);
2484 return ret;
2485 }
2486
2487 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2488
2489 if (!ti_sci_is_response_ack(resp))
2490 return -ENODEV;
2491
2492 return 0;
2493}
2494
2495/**
2496 * ti_sci_cmd_get_fwl_region() - Request for getting a firewall region
2497 * @handle: pointer to TI SCI handle
2498 * @region: region configuration parameters
2499 *
2500 * Return: 0 if all went well, else returns appropriate error value.
2501 */
2502static int ti_sci_cmd_get_fwl_region(const struct ti_sci_handle *handle,
2503 struct ti_sci_msg_fwl_region *region)
2504{
2505 struct ti_sci_msg_fwl_get_firewall_region_req req;
2506 struct ti_sci_msg_fwl_get_firewall_region_resp *resp;
2507 struct ti_sci_info *info;
2508 struct ti_sci_xfer *xfer;
2509 int ret = 0;
2510
2511 if (IS_ERR(handle))
2512 return PTR_ERR(handle);
2513 if (!handle)
2514 return -EINVAL;
2515
2516 info = handle_to_ti_sci_info(handle);
2517
2518 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_FWL_GET,
2519 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2520 (u32 *)&req, sizeof(req), sizeof(*resp));
2521 if (IS_ERR(xfer)) {
2522 ret = PTR_ERR(xfer);
2523 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
2524 return ret;
2525 }
2526
2527 req.fwl_id = region->fwl_id;
2528 req.region = region->region;
2529 req.n_permission_regs = region->n_permission_regs;
2530
2531 ret = ti_sci_do_xfer(info, xfer);
2532 if (ret) {
2533 dev_err(info->dev, "Mbox send fail %d\n", ret);
2534 return ret;
2535 }
2536
2537 resp = (struct ti_sci_msg_fwl_get_firewall_region_resp *)xfer->tx_message.buf;
2538
2539 if (!ti_sci_is_response_ack(resp))
2540 return -ENODEV;
2541
2542 region->fwl_id = resp->fwl_id;
2543 region->region = resp->region;
2544 region->n_permission_regs = resp->n_permission_regs;
2545 region->control = resp->control;
2546 region->permissions[0] = resp->permissions[0];
2547 region->permissions[1] = resp->permissions[1];
2548 region->permissions[2] = resp->permissions[2];
2549 region->start_address = resp->start_address;
2550 region->end_address = resp->end_address;
2551
2552 return 0;
2553}
2554
2555/**
2556 * ti_sci_cmd_change_fwl_owner() - Request for changing a firewall owner
2557 * @handle: pointer to TI SCI handle
2558 * @region: region configuration parameters
2559 *
2560 * Return: 0 if all went well, else returns appropriate error value.
2561 */
2562static int ti_sci_cmd_change_fwl_owner(const struct ti_sci_handle *handle,
2563 struct ti_sci_msg_fwl_owner *owner)
2564{
2565 struct ti_sci_msg_fwl_change_owner_info_req req;
2566 struct ti_sci_msg_fwl_change_owner_info_resp *resp;
2567 struct ti_sci_info *info;
2568 struct ti_sci_xfer *xfer;
2569 int ret = 0;
2570
2571 if (IS_ERR(handle))
2572 return PTR_ERR(handle);
2573 if (!handle)
2574 return -EINVAL;
2575
2576 info = handle_to_ti_sci_info(handle);
2577
Andrew F. Davis8928fbd2019-04-29 09:04:11 -04002578 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_FWL_CHANGE_OWNER,
2579 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002580 (u32 *)&req, sizeof(req), sizeof(*resp));
2581 if (IS_ERR(xfer)) {
2582 ret = PTR_ERR(xfer);
2583 dev_err(info->dev, "Message alloc failed(%d)\n", ret);
2584 return ret;
2585 }
2586
2587 req.fwl_id = owner->fwl_id;
2588 req.region = owner->region;
2589 req.owner_index = owner->owner_index;
2590
2591 ret = ti_sci_do_xfer(info, xfer);
2592 if (ret) {
2593 dev_err(info->dev, "Mbox send fail %d\n", ret);
2594 return ret;
2595 }
2596
2597 resp = (struct ti_sci_msg_fwl_change_owner_info_resp *)xfer->tx_message.buf;
2598
2599 if (!ti_sci_is_response_ack(resp))
2600 return -ENODEV;
2601
2602 owner->fwl_id = resp->fwl_id;
2603 owner->region = resp->region;
2604 owner->owner_index = resp->owner_index;
2605 owner->owner_privid = resp->owner_privid;
2606 owner->owner_permission_bits = resp->owner_permission_bits;
2607
2608 return ret;
2609}
2610
Andreas Dannenberg5299c4c2018-08-27 15:57:33 +05302611/*
2612 * ti_sci_setup_ops() - Setup the operations structures
2613 * @info: pointer to TISCI pointer
2614 */
2615static void ti_sci_setup_ops(struct ti_sci_info *info)
2616{
2617 struct ti_sci_ops *ops = &info->handle.ops;
2618 struct ti_sci_board_ops *bops = &ops->board_ops;
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +05302619 struct ti_sci_dev_ops *dops = &ops->dev_ops;
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05302620 struct ti_sci_clk_ops *cops = &ops->clk_ops;
Andreas Dannenberg5bd08372018-08-27 15:57:36 +05302621 struct ti_sci_core_ops *core_ops = &ops->core_ops;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302622 struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops;
Lokesh Vutlab8856af2018-08-27 15:57:37 +05302623 struct ti_sci_proc_ops *pops = &ops->proc_ops;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302624 struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops;
2625 struct ti_sci_rm_psil_ops *psilops = &ops->rm_psil_ops;
2626 struct ti_sci_rm_udmap_ops *udmap_ops = &ops->rm_udmap_ops;
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002627 struct ti_sci_fwl_ops *fwl_ops = &ops->fwl_ops;
Andreas Dannenberg5299c4c2018-08-27 15:57:33 +05302628
2629 bops->board_config = ti_sci_cmd_set_board_config;
2630 bops->board_config_rm = ti_sci_cmd_set_board_config_rm;
2631 bops->board_config_security = ti_sci_cmd_set_board_config_security;
2632 bops->board_config_pm = ti_sci_cmd_set_board_config_pm;
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +05302633
2634 dops->get_device = ti_sci_cmd_get_device;
2635 dops->idle_device = ti_sci_cmd_idle_device;
2636 dops->put_device = ti_sci_cmd_put_device;
2637 dops->is_valid = ti_sci_cmd_dev_is_valid;
2638 dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt;
2639 dops->is_idle = ti_sci_cmd_dev_is_idle;
2640 dops->is_stop = ti_sci_cmd_dev_is_stop;
2641 dops->is_on = ti_sci_cmd_dev_is_on;
2642 dops->is_transitioning = ti_sci_cmd_dev_is_trans;
2643 dops->set_device_resets = ti_sci_cmd_set_device_resets;
2644 dops->get_device_resets = ti_sci_cmd_get_device_resets;
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05302645
2646 cops->get_clock = ti_sci_cmd_get_clock;
2647 cops->idle_clock = ti_sci_cmd_idle_clock;
2648 cops->put_clock = ti_sci_cmd_put_clock;
2649 cops->is_auto = ti_sci_cmd_clk_is_auto;
2650 cops->is_on = ti_sci_cmd_clk_is_on;
2651 cops->is_off = ti_sci_cmd_clk_is_off;
2652
2653 cops->set_parent = ti_sci_cmd_clk_set_parent;
2654 cops->get_parent = ti_sci_cmd_clk_get_parent;
2655 cops->get_num_parents = ti_sci_cmd_clk_get_num_parents;
2656
2657 cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
2658 cops->set_freq = ti_sci_cmd_clk_set_freq;
2659 cops->get_freq = ti_sci_cmd_clk_get_freq;
Andreas Dannenberg5bd08372018-08-27 15:57:36 +05302660
2661 core_ops->reboot_device = ti_sci_cmd_core_reboot;
Lokesh Vutla032dce82019-03-08 11:47:32 +05302662 core_ops->query_msmc = ti_sci_cmd_query_msmc;
Lokesh Vutlab8856af2018-08-27 15:57:37 +05302663
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302664 rm_core_ops->get_range = ti_sci_cmd_get_resource_range;
2665 rm_core_ops->get_range_from_shost =
2666 ti_sci_cmd_get_resource_range_from_shost;
2667
Lokesh Vutlab8856af2018-08-27 15:57:37 +05302668 pops->proc_request = ti_sci_cmd_proc_request;
2669 pops->proc_release = ti_sci_cmd_proc_release;
2670 pops->proc_handover = ti_sci_cmd_proc_handover;
2671 pops->set_proc_boot_cfg = ti_sci_cmd_set_proc_boot_cfg;
2672 pops->set_proc_boot_ctrl = ti_sci_cmd_set_proc_boot_ctrl;
2673 pops->proc_auth_boot_image = ti_sci_cmd_proc_auth_boot_image;
2674 pops->get_proc_boot_status = ti_sci_cmd_get_proc_boot_status;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302675
2676 rops->config = ti_sci_cmd_ring_config;
2677 rops->get_config = ti_sci_cmd_ring_get_config;
2678
2679 psilops->pair = ti_sci_cmd_rm_psil_pair;
2680 psilops->unpair = ti_sci_cmd_rm_psil_unpair;
2681
2682 udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg;
2683 udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg;
2684 udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg;
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002685
2686 fwl_ops->set_fwl_region = ti_sci_cmd_set_fwl_region;
2687 fwl_ops->get_fwl_region = ti_sci_cmd_get_fwl_region;
2688 fwl_ops->change_fwl_owner = ti_sci_cmd_change_fwl_owner;
Andreas Dannenberg5299c4c2018-08-27 15:57:33 +05302689}
2690
2691/**
Lokesh Vutla5af02db2018-08-27 15:57:32 +05302692 * ti_sci_get_handle_from_sysfw() - Get the TI SCI handle of the SYSFW
2693 * @dev: Pointer to the SYSFW device
2694 *
2695 * Return: pointer to handle if successful, else EINVAL if invalid conditions
2696 * are encountered.
2697 */
2698const
2699struct ti_sci_handle *ti_sci_get_handle_from_sysfw(struct udevice *sci_dev)
2700{
2701 if (!sci_dev)
2702 return ERR_PTR(-EINVAL);
2703
2704 struct ti_sci_info *info = dev_get_priv(sci_dev);
2705
2706 if (!info)
2707 return ERR_PTR(-EINVAL);
2708
2709 struct ti_sci_handle *handle = &info->handle;
2710
2711 if (!handle)
2712 return ERR_PTR(-EINVAL);
2713
2714 return handle;
2715}
2716
2717/**
2718 * ti_sci_get_handle() - Get the TI SCI handle for a device
2719 * @dev: Pointer to device for which we want SCI handle
2720 *
2721 * Return: pointer to handle if successful, else EINVAL if invalid conditions
2722 * are encountered.
2723 */
2724const struct ti_sci_handle *ti_sci_get_handle(struct udevice *dev)
2725{
2726 if (!dev)
2727 return ERR_PTR(-EINVAL);
2728
2729 struct udevice *sci_dev = dev_get_parent(dev);
2730
2731 return ti_sci_get_handle_from_sysfw(sci_dev);
2732}
2733
2734/**
2735 * ti_sci_get_by_phandle() - Get the TI SCI handle using DT phandle
2736 * @dev: device node
2737 * @propname: property name containing phandle on TISCI node
2738 *
2739 * Return: pointer to handle if successful, else appropriate error value.
2740 */
2741const struct ti_sci_handle *ti_sci_get_by_phandle(struct udevice *dev,
2742 const char *property)
2743{
2744 struct ti_sci_info *entry, *info = NULL;
2745 u32 phandle, err;
2746 ofnode node;
2747
2748 err = ofnode_read_u32(dev_ofnode(dev), property, &phandle);
2749 if (err)
2750 return ERR_PTR(err);
2751
2752 node = ofnode_get_by_phandle(phandle);
2753 if (!ofnode_valid(node))
2754 return ERR_PTR(-EINVAL);
2755
2756 list_for_each_entry(entry, &ti_sci_list, list)
2757 if (ofnode_equal(dev_ofnode(entry->dev), node)) {
2758 info = entry;
2759 break;
2760 }
2761
2762 if (!info)
2763 return ERR_PTR(-ENODEV);
2764
2765 return &info->handle;
2766}
2767
2768/**
2769 * ti_sci_of_to_info() - generate private data from device tree
2770 * @dev: corresponding system controller interface device
2771 * @info: pointer to driver specific private data
2772 *
2773 * Return: 0 if all goes good, else appropriate error message.
2774 */
2775static int ti_sci_of_to_info(struct udevice *dev, struct ti_sci_info *info)
2776{
2777 int ret;
2778
2779 ret = mbox_get_by_name(dev, "tx", &info->chan_tx);
2780 if (ret) {
2781 dev_err(dev, "%s: Acquiring Tx channel failed. ret = %d\n",
2782 __func__, ret);
2783 return ret;
2784 }
2785
2786 ret = mbox_get_by_name(dev, "rx", &info->chan_rx);
2787 if (ret) {
2788 dev_err(dev, "%s: Acquiring Rx channel failed. ret = %d\n",
2789 __func__, ret);
2790 return ret;
2791 }
2792
2793 /* Notify channel is optional. Enable only if populated */
2794 ret = mbox_get_by_name(dev, "notify", &info->chan_notify);
2795 if (ret) {
2796 dev_dbg(dev, "%s: Acquiring notify channel failed. ret = %d\n",
2797 __func__, ret);
2798 }
2799
2800 info->host_id = dev_read_u32_default(dev, "ti,host-id",
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302801 info->desc->default_host_id);
Lokesh Vutla5af02db2018-08-27 15:57:32 +05302802
2803 info->is_secure = dev_read_bool(dev, "ti,secure-host");
2804
2805 return 0;
2806}
2807
2808/**
2809 * ti_sci_probe() - Basic probe
2810 * @dev: corresponding system controller interface device
2811 *
2812 * Return: 0 if all goes good, else appropriate error message.
2813 */
2814static int ti_sci_probe(struct udevice *dev)
2815{
2816 struct ti_sci_info *info;
2817 int ret;
2818
2819 debug("%s(dev=%p)\n", __func__, dev);
2820
2821 info = dev_get_priv(dev);
2822 info->desc = (void *)dev_get_driver_data(dev);
2823
2824 ret = ti_sci_of_to_info(dev, info);
2825 if (ret) {
2826 dev_err(dev, "%s: Probe failed with error %d\n", __func__, ret);
2827 return ret;
2828 }
2829
2830 info->dev = dev;
2831 info->seq = 0xA;
2832
2833 list_add_tail(&info->list, &ti_sci_list);
Andreas Dannenberg5299c4c2018-08-27 15:57:33 +05302834 ti_sci_setup_ops(info);
Lokesh Vutla5af02db2018-08-27 15:57:32 +05302835
2836 ret = ti_sci_cmd_get_revision(&info->handle);
2837
2838 return ret;
2839}
2840
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302841/*
2842 * ti_sci_get_free_resource() - Get a free resource from TISCI resource.
2843 * @res: Pointer to the TISCI resource
2844 *
2845 * Return: resource num if all went ok else TI_SCI_RESOURCE_NULL.
2846 */
2847u16 ti_sci_get_free_resource(struct ti_sci_resource *res)
2848{
2849 u16 set, free_bit;
2850
2851 for (set = 0; set < res->sets; set++) {
2852 free_bit = find_first_zero_bit(res->desc[set].res_map,
2853 res->desc[set].num);
2854 if (free_bit != res->desc[set].num) {
2855 set_bit(free_bit, res->desc[set].res_map);
2856 return res->desc[set].start + free_bit;
2857 }
2858 }
2859
2860 return TI_SCI_RESOURCE_NULL;
2861}
2862
2863/**
2864 * ti_sci_release_resource() - Release a resource from TISCI resource.
2865 * @res: Pointer to the TISCI resource
2866 */
2867void ti_sci_release_resource(struct ti_sci_resource *res, u16 id)
2868{
2869 u16 set;
2870
2871 for (set = 0; set < res->sets; set++) {
2872 if (res->desc[set].start <= id &&
2873 (res->desc[set].num + res->desc[set].start) > id)
2874 clear_bit(id - res->desc[set].start,
2875 res->desc[set].res_map);
2876 }
2877}
2878
2879/**
2880 * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device
2881 * @handle: TISCI handle
2882 * @dev: Device pointer to which the resource is assigned
2883 * @of_prop: property name by which the resource are represented
2884 *
2885 * Note: This function expects of_prop to be in the form of tuples
2886 * <type, subtype>. Allocates and initializes ti_sci_resource structure
2887 * for each of_prop. Client driver can directly call
2888 * ti_sci_(get_free, release)_resource apis for handling the resource.
2889 *
2890 * Return: Pointer to ti_sci_resource if all went well else appropriate
2891 * error pointer.
2892 */
2893struct ti_sci_resource *
2894devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
2895 struct udevice *dev, u32 dev_id, char *of_prop)
2896{
2897 u32 resource_subtype;
2898 u16 resource_type;
2899 struct ti_sci_resource *res;
2900 int sets, i, ret;
2901 u32 *temp;
2902
2903 res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
2904 if (!res)
2905 return ERR_PTR(-ENOMEM);
2906
2907 sets = dev_read_size(dev, of_prop);
2908 if (sets < 0) {
2909 dev_err(dev, "%s resource type ids not available\n", of_prop);
2910 return ERR_PTR(sets);
2911 }
2912 temp = malloc(sets);
2913 sets /= sizeof(u32);
2914 res->sets = sets;
2915
2916 res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc),
2917 GFP_KERNEL);
2918 if (!res->desc)
2919 return ERR_PTR(-ENOMEM);
2920
2921 ret = ti_sci_get_resource_type(handle_to_ti_sci_info(handle), dev_id,
2922 &resource_type);
2923 if (ret) {
2924 dev_err(dev, "No valid resource type for %u\n", dev_id);
2925 return ERR_PTR(-EINVAL);
2926 }
2927
2928 ret = dev_read_u32_array(dev, of_prop, temp, res->sets);
2929 if (ret)
2930 return ERR_PTR(-EINVAL);
2931
2932 for (i = 0; i < res->sets; i++) {
2933 resource_subtype = temp[i];
2934 ret = handle->ops.rm_core_ops.get_range(handle, dev_id,
2935 resource_subtype,
2936 &res->desc[i].start,
2937 &res->desc[i].num);
2938 if (ret) {
2939 dev_err(dev, "type %d subtype %d not allocated for host %d\n",
2940 resource_type, resource_subtype,
2941 handle_to_ti_sci_info(handle)->host_id);
2942 return ERR_PTR(ret);
2943 }
2944
2945 dev_dbg(dev, "res type = %d, subtype = %d, start = %d, num = %d\n",
2946 resource_type, resource_subtype, res->desc[i].start,
2947 res->desc[i].num);
2948
2949 res->desc[i].res_map =
2950 devm_kzalloc(dev, BITS_TO_LONGS(res->desc[i].num) *
2951 sizeof(*res->desc[i].res_map), GFP_KERNEL);
2952 if (!res->desc[i].res_map)
2953 return ERR_PTR(-ENOMEM);
2954 }
2955
2956 return res;
2957}
2958
2959/* Description for K2G */
2960static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
2961 .default_host_id = 2,
2962 /* Conservative duration */
2963 .max_rx_timeout_ms = 10000,
2964 /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
2965 .max_msgs = 20,
2966 .max_msg_size = 64,
2967 .rm_type_map = NULL,
2968};
2969
2970static struct ti_sci_rm_type_map ti_sci_am654_rm_type_map[] = {
2971 {.dev_id = 56, .type = 0x00b}, /* GIC_IRQ */
2972 {.dev_id = 179, .type = 0x000}, /* MAIN_NAV_UDMASS_IA0 */
2973 {.dev_id = 187, .type = 0x009}, /* MAIN_NAV_RA */
2974 {.dev_id = 188, .type = 0x006}, /* MAIN_NAV_UDMAP */
2975 {.dev_id = 194, .type = 0x007}, /* MCU_NAV_UDMAP */
2976 {.dev_id = 195, .type = 0x00a}, /* MCU_NAV_RA */
2977 {.dev_id = 0, .type = 0x000}, /* end of table */
2978};
2979
Lokesh Vutla5af02db2018-08-27 15:57:32 +05302980/* Description for AM654 */
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302981static const struct ti_sci_desc ti_sci_pmmc_am654_desc = {
2982 .default_host_id = 12,
2983 /* Conservative duration */
2984 .max_rx_timeout_ms = 10000,
2985 /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
2986 .max_msgs = 20,
Lokesh Vutla5af02db2018-08-27 15:57:32 +05302987 .max_msg_size = 60,
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302988 .rm_type_map = ti_sci_am654_rm_type_map,
Lokesh Vutla5af02db2018-08-27 15:57:32 +05302989};
2990
2991static const struct udevice_id ti_sci_ids[] = {
2992 {
2993 .compatible = "ti,k2g-sci",
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302994 .data = (ulong)&ti_sci_pmmc_k2g_desc
2995 },
2996 {
2997 .compatible = "ti,am654-sci",
2998 .data = (ulong)&ti_sci_pmmc_am654_desc
Lokesh Vutla5af02db2018-08-27 15:57:32 +05302999 },
3000 { /* Sentinel */ },
3001};
3002
3003U_BOOT_DRIVER(ti_sci) = {
3004 .name = "ti_sci",
3005 .id = UCLASS_FIRMWARE,
3006 .of_match = ti_sci_ids,
3007 .probe = ti_sci_probe,
3008 .priv_auto_alloc_size = sizeof(struct ti_sci_info),
3009};