blob: 2c91b2b71f5db48da2b1cdfe8f6e0e8d05defd69 [file] [log] [blame]
Lokesh Vutla5af02db2018-08-27 15:57:32 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Texas Instruments System Control Interface Protocol Driver
4 * Based on drivers/firmware/ti_sci.c from Linux.
5 *
Nishanth Menoneaa39c62023-11-01 15:56:03 -05006 * Copyright (C) 2018 Texas Instruments Incorporated - https://www.ti.com/
Lokesh Vutla5af02db2018-08-27 15:57:32 +05307 * Lokesh Vutla <lokeshvutla@ti.com>
8 */
9
Lokesh Vutla5af02db2018-08-27 15:57:32 +053010#include <dm.h>
11#include <errno.h>
Simon Glass0f2af882020-05-10 11:40:05 -060012#include <log.h>
Lokesh Vutla5af02db2018-08-27 15:57:32 +053013#include <mailbox.h>
Simon Glass9bc15642020-02-03 07:36:16 -070014#include <malloc.h>
Lokesh Vutla5af02db2018-08-27 15:57:32 +053015#include <dm/device.h>
Simon Glass9bc15642020-02-03 07:36:16 -070016#include <dm/device_compat.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070017#include <dm/devres.h>
Andrew Davis1ed20d62024-04-02 11:09:07 -050018#include <dm/lists.h>
Simon Glass4dcacfc2020-05-10 11:40:13 -060019#include <linux/bitops.h>
Grygorii Strashkod64c5b22019-02-05 17:31:21 +053020#include <linux/compat.h>
Lokesh Vutla5af02db2018-08-27 15:57:32 +053021#include <linux/err.h>
22#include <linux/soc/ti/k3-sec-proxy.h>
23#include <linux/soc/ti/ti_sci_protocol.h>
24
25#include "ti_sci.h"
Vignesh Raghavendra4214a812021-06-07 19:47:48 +053026#include "ti_sci_static_data.h"
Lokesh Vutla5af02db2018-08-27 15:57:32 +053027
28/* List of all TI SCI devices active in system */
29static LIST_HEAD(ti_sci_list);
30
31/**
32 * struct ti_sci_xfer - Structure representing a message flow
33 * @tx_message: Transmit message
34 * @rx_len: Receive message length
35 */
36struct ti_sci_xfer {
37 struct k3_sec_proxy_msg tx_message;
38 u8 rx_len;
39};
40
41/**
Grygorii Strashkod64c5b22019-02-05 17:31:21 +053042 * struct ti_sci_rm_type_map - Structure representing TISCI Resource
43 * management representation of dev_ids.
44 * @dev_id: TISCI device ID
45 * @type: Corresponding id as identified by TISCI RM.
46 *
47 * Note: This is used only as a work around for using RM range apis
48 * for AM654 SoC. For future SoCs dev_id will be used as type
49 * for RM range APIs. In order to maintain ABI backward compatibility
50 * type is not being changed for AM654 SoC.
51 */
52struct ti_sci_rm_type_map {
53 u32 dev_id;
54 u16 type;
55};
56
57/**
Lokesh Vutla5af02db2018-08-27 15:57:32 +053058 * struct ti_sci_desc - Description of SoC integration
Grygorii Strashkod64c5b22019-02-05 17:31:21 +053059 * @default_host_id: Host identifier representing the compute entity
60 * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
61 * @max_msgs: Maximum number of messages that can be pending
62 * simultaneously in the system
63 * @max_msg_size: Maximum size of data per message that can be handled.
Lokesh Vutla5af02db2018-08-27 15:57:32 +053064 */
65struct ti_sci_desc {
Grygorii Strashkod64c5b22019-02-05 17:31:21 +053066 u8 default_host_id;
67 int max_rx_timeout_ms;
68 int max_msgs;
Lokesh Vutla5af02db2018-08-27 15:57:32 +053069 int max_msg_size;
70};
71
72/**
73 * struct ti_sci_info - Structure representing a TI SCI instance
74 * @dev: Device pointer
75 * @desc: SoC description for this instance
76 * @handle: Instance of TI SCI handle to send to clients.
77 * @chan_tx: Transmit mailbox channel
78 * @chan_rx: Receive mailbox channel
79 * @xfer: xfer info
80 * @list: list head
81 * @is_secure: Determines if the communication is through secure threads.
82 * @host_id: Host identifier representing the compute entity
83 * @seq: Seq id used for verification for tx and rx message.
84 */
85struct ti_sci_info {
86 struct udevice *dev;
87 const struct ti_sci_desc *desc;
88 struct ti_sci_handle handle;
89 struct mbox_chan chan_tx;
90 struct mbox_chan chan_rx;
91 struct mbox_chan chan_notify;
92 struct ti_sci_xfer xfer;
93 struct list_head list;
Lokesh Vutla0d0412a2019-06-07 19:24:41 +053094 struct list_head dev_list;
Lokesh Vutla5af02db2018-08-27 15:57:32 +053095 bool is_secure;
96 u8 host_id;
97 u8 seq;
98};
99
Lokesh Vutla0d0412a2019-06-07 19:24:41 +0530100struct ti_sci_exclusive_dev {
101 u32 id;
102 u32 count;
103 struct list_head list;
104};
105
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530106#define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle)
107
108/**
109 * ti_sci_setup_one_xfer() - Setup one message type
110 * @info: Pointer to SCI entity information
111 * @msg_type: Message type
112 * @msg_flags: Flag to set for the message
113 * @buf: Buffer to be send to mailbox channel
114 * @tx_message_size: transmit message size
Andreas Dannenbergca08cb32019-06-07 19:24:40 +0530115 * @rx_message_size: receive message size. may be set to zero for send-only
116 * transactions.
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530117 *
118 * Helper function which is used by various command functions that are
119 * exposed to clients of this driver for allocating a message traffic event.
120 *
121 * Return: Corresponding ti_sci_xfer pointer if all went fine,
122 * else appropriate error pointer.
123 */
124static struct ti_sci_xfer *ti_sci_setup_one_xfer(struct ti_sci_info *info,
125 u16 msg_type, u32 msg_flags,
126 u32 *buf,
127 size_t tx_message_size,
128 size_t rx_message_size)
129{
130 struct ti_sci_xfer *xfer = &info->xfer;
131 struct ti_sci_msg_hdr *hdr;
132
133 /* Ensure we have sane transfer sizes */
134 if (rx_message_size > info->desc->max_msg_size ||
135 tx_message_size > info->desc->max_msg_size ||
Andreas Dannenbergca08cb32019-06-07 19:24:40 +0530136 (rx_message_size > 0 && rx_message_size < sizeof(*hdr)) ||
Andrew Davis22563722022-07-25 20:25:04 -0500137 tx_message_size < sizeof(*hdr)) {
138 dev_err(info->dev, "TI-SCI message transfer size not sane\n");
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530139 return ERR_PTR(-ERANGE);
Andrew Davis22563722022-07-25 20:25:04 -0500140 }
141
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530142 info->seq = ~info->seq;
143 xfer->tx_message.buf = buf;
144 xfer->tx_message.len = tx_message_size;
145 xfer->rx_len = (u8)rx_message_size;
146
147 hdr = (struct ti_sci_msg_hdr *)buf;
148 hdr->seq = info->seq;
149 hdr->type = msg_type;
150 hdr->host = info->host_id;
151 hdr->flags = msg_flags;
152
153 return xfer;
154}
155
156/**
157 * ti_sci_get_response() - Receive response from mailbox channel
158 * @info: Pointer to SCI entity information
159 * @xfer: Transfer to initiate and wait for response
160 * @chan: Channel to receive the response
161 *
162 * Return: -ETIMEDOUT in case of no response, if transmit error,
163 * return corresponding error, else if all goes well,
164 * return 0.
165 */
Andrew Davisb3e71b72022-07-25 20:25:05 -0500166static int ti_sci_get_response(struct ti_sci_info *info,
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530167 struct ti_sci_xfer *xfer,
168 struct mbox_chan *chan)
169{
170 struct k3_sec_proxy_msg *msg = &xfer->tx_message;
171 struct ti_sci_secure_msg_hdr *secure_hdr;
172 struct ti_sci_msg_hdr *hdr;
173 int ret;
174
175 /* Receive the response */
Andreas Dannenberg607d4ca2019-04-24 14:20:08 -0500176 ret = mbox_recv(chan, msg, info->desc->max_rx_timeout_ms * 1000);
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530177 if (ret) {
178 dev_err(info->dev, "%s: Message receive failed. ret = %d\n",
179 __func__, ret);
180 return ret;
181 }
182
183 /* ToDo: Verify checksum */
184 if (info->is_secure) {
185 secure_hdr = (struct ti_sci_secure_msg_hdr *)msg->buf;
186 msg->buf = (u32 *)((void *)msg->buf + sizeof(*secure_hdr));
187 }
188
189 /* msg is updated by mailbox driver */
190 hdr = (struct ti_sci_msg_hdr *)msg->buf;
191
192 /* Sanity check for message response */
193 if (hdr->seq != info->seq) {
194 dev_dbg(info->dev, "%s: Message for %d is not expected\n",
195 __func__, hdr->seq);
196 return ret;
197 }
198
199 if (msg->len > info->desc->max_msg_size) {
200 dev_err(info->dev, "%s: Unable to handle %zu xfer (max %d)\n",
201 __func__, msg->len, info->desc->max_msg_size);
202 return -EINVAL;
203 }
204
205 if (msg->len < xfer->rx_len) {
206 dev_err(info->dev, "%s: Recv xfer %zu < expected %d length\n",
207 __func__, msg->len, xfer->rx_len);
208 }
209
210 return ret;
211}
212
213/**
Andrew Davis04e43932022-07-25 20:25:06 -0500214 * ti_sci_is_response_ack() - Generic ACK/NACK message checkup
215 * @r: pointer to response buffer
216 *
217 * Return: true if the response was an ACK, else returns false.
218 */
219static bool ti_sci_is_response_ack(void *r)
220{
221 struct ti_sci_msg_hdr *hdr = r;
222
223 return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false;
224}
225
226/**
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530227 * ti_sci_do_xfer() - Do one transfer
228 * @info: Pointer to SCI entity information
229 * @xfer: Transfer to initiate and wait for response
230 *
231 * Return: 0 if all went fine, else return appropriate error.
232 */
Andrew Davisb3e71b72022-07-25 20:25:05 -0500233static int ti_sci_do_xfer(struct ti_sci_info *info,
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530234 struct ti_sci_xfer *xfer)
235{
236 struct k3_sec_proxy_msg *msg = &xfer->tx_message;
237 u8 secure_buf[info->desc->max_msg_size];
Dhruva Goled3341022024-01-30 20:29:59 +0530238 struct ti_sci_secure_msg_hdr *secure_hdr = (struct ti_sci_secure_msg_hdr *)secure_buf;
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530239 int ret;
240
Dhruva Gole5452ebd2024-01-30 20:30:00 +0530241 /*
242 * The reason why we need the is_secure code is because of boot R5.
243 * boot R5 starts off in "secure mode" when it hands off from Boot
244 * ROM over to the Secondary bootloader. The initial set of calls
245 * we have to make need to be on a secure pipe.
246 */
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530247 if (info->is_secure) {
248 /* ToDo: get checksum of the entire message */
Dhruva Goled3341022024-01-30 20:29:59 +0530249 secure_hdr->checksum = 0;
250 secure_hdr->reserved = 0;
251 memcpy(&secure_buf[sizeof(*secure_hdr)], xfer->tx_message.buf,
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530252 xfer->tx_message.len);
253
254 xfer->tx_message.buf = (u32 *)secure_buf;
Dhruva Goled3341022024-01-30 20:29:59 +0530255 xfer->tx_message.len += sizeof(*secure_hdr);
Andreas Dannenbergca08cb32019-06-07 19:24:40 +0530256
257 if (xfer->rx_len)
Dhruva Goled3341022024-01-30 20:29:59 +0530258 xfer->rx_len += sizeof(*secure_hdr);
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530259 }
260
261 /* Send the message */
262 ret = mbox_send(&info->chan_tx, msg);
263 if (ret) {
264 dev_err(info->dev, "%s: Message sending failed. ret = %d\n",
265 __func__, ret);
266 return ret;
267 }
268
Andreas Dannenbergca08cb32019-06-07 19:24:40 +0530269 /* Get response if requested */
Andrew Davis04e43932022-07-25 20:25:06 -0500270 if (xfer->rx_len) {
Andreas Dannenbergca08cb32019-06-07 19:24:40 +0530271 ret = ti_sci_get_response(info, xfer, &info->chan_rx);
Andrew Davis04e43932022-07-25 20:25:06 -0500272 if (!ti_sci_is_response_ack(xfer->tx_message.buf)) {
Andreas Dannenberg831b73f2023-05-09 16:38:13 -0500273 dev_err(info->dev, "Message not acknowledged\n");
Andrew Davis04e43932022-07-25 20:25:06 -0500274 ret = -ENODEV;
275 }
276 }
Andreas Dannenbergca08cb32019-06-07 19:24:40 +0530277
278 return ret;
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530279}
280
281/**
Moteen Shah3a66db62025-06-09 13:44:31 +0530282 * ti_sci_cmd_query_dm_cap() - Command to query DM firmware's capabilities
283 * @handle: Pointer to TI SCI handle
284 * @fw_caps: Pointer to firmware capabilities
285 *
286 * Return: 0 if all went fine, else return appropriate error.
287 */
288static int ti_sci_cmd_query_dm_cap(struct ti_sci_handle *handle, u64 *fw_caps)
289{
290 struct ti_sci_query_fw_caps_resp *cap_info;
291 struct ti_sci_msg_hdr hdr;
292 struct ti_sci_info *info;
293 struct ti_sci_xfer *xfer;
294 int ret;
295
296 if (IS_ERR(handle))
297 return PTR_ERR(handle);
298 if (!handle)
299 return -EINVAL;
300
301 info = handle_to_ti_sci_info(handle);
302
303 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_QUERY_FW_CAPS,
304 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
305 (u32 *)&hdr, sizeof(struct ti_sci_msg_hdr),
306 sizeof(*cap_info));
307 if (IS_ERR(xfer)) {
308 ret = PTR_ERR(xfer);
309 return ret;
310 }
311
312 ret = ti_sci_do_xfer(info, xfer);
313 if (ret)
314 return ret;
315
316 cap_info = (struct ti_sci_query_fw_caps_resp *)xfer->tx_message.buf;
317
318 *fw_caps = cap_info->fw_caps;
319
320 return 0;
321}
322
323/**
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530324 * ti_sci_cmd_get_revision() - command to get the revision of the SCI entity
325 * @handle: pointer to TI SCI handle
326 *
327 * Updates the SCI information in the internal data structure.
328 *
329 * Return: 0 if all went fine, else return appropriate error.
330 */
331static int ti_sci_cmd_get_revision(struct ti_sci_handle *handle)
332{
333 struct ti_sci_msg_resp_version *rev_info;
334 struct ti_sci_version_info *ver;
335 struct ti_sci_msg_hdr hdr;
336 struct ti_sci_info *info;
337 struct ti_sci_xfer *xfer;
338 int ret;
339
340 if (IS_ERR(handle))
341 return PTR_ERR(handle);
342 if (!handle)
343 return -EINVAL;
344
345 info = handle_to_ti_sci_info(handle);
346
Andrew F. Davis8928fbd2019-04-29 09:04:11 -0400347 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_VERSION,
348 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530349 (u32 *)&hdr, sizeof(struct ti_sci_msg_hdr),
350 sizeof(*rev_info));
351 if (IS_ERR(xfer)) {
352 ret = PTR_ERR(xfer);
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530353 return ret;
354 }
355
356 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -0500357 if (ret)
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530358 return ret;
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530359
360 rev_info = (struct ti_sci_msg_resp_version *)xfer->tx_message.buf;
361
362 ver = &handle->version;
363 ver->abi_major = rev_info->abi_major;
364 ver->abi_minor = rev_info->abi_minor;
365 ver->firmware_revision = rev_info->firmware_revision;
366 strncpy(ver->firmware_description, rev_info->firmware_description,
367 sizeof(ver->firmware_description));
368
369 return 0;
370}
371
372/**
Andreas Dannenberg5299c4c2018-08-27 15:57:33 +0530373 * cmd_set_board_config_using_msg() - Common command to send board configuration
374 * message
375 * @handle: pointer to TI SCI handle
376 * @msg_type: One of the TISCI message types to set board configuration
377 * @addr: Address where the board config structure is located
378 * @size: Size of the board config structure
379 *
380 * Return: 0 if all went well, else returns appropriate error value.
381 */
382static int cmd_set_board_config_using_msg(const struct ti_sci_handle *handle,
383 u16 msg_type, u64 addr, u32 size)
384{
385 struct ti_sci_msg_board_config req;
386 struct ti_sci_msg_hdr *resp;
387 struct ti_sci_info *info;
388 struct ti_sci_xfer *xfer;
389 int ret = 0;
390
391 if (IS_ERR(handle))
392 return PTR_ERR(handle);
393 if (!handle)
394 return -EINVAL;
395
396 info = handle_to_ti_sci_info(handle);
397
398 xfer = ti_sci_setup_one_xfer(info, msg_type,
399 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
400 (u32 *)&req, sizeof(req), sizeof(*resp));
401 if (IS_ERR(xfer)) {
402 ret = PTR_ERR(xfer);
Andreas Dannenberg5299c4c2018-08-27 15:57:33 +0530403 return ret;
404 }
405 req.boardcfgp_high = (addr >> 32) & 0xffffffff;
406 req.boardcfgp_low = addr & 0xffffffff;
407 req.boardcfg_size = size;
408
409 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -0500410 if (ret)
Andreas Dannenberg5299c4c2018-08-27 15:57:33 +0530411 return ret;
Andreas Dannenberg5299c4c2018-08-27 15:57:33 +0530412
Andreas Dannenberg5299c4c2018-08-27 15:57:33 +0530413 return ret;
414}
415
416/**
417 * ti_sci_cmd_set_board_config() - Command to send board configuration message
418 * @handle: pointer to TI SCI handle
419 * @addr: Address where the board config structure is located
420 * @size: Size of the board config structure
421 *
422 * Return: 0 if all went well, else returns appropriate error value.
423 */
424static int ti_sci_cmd_set_board_config(const struct ti_sci_handle *handle,
425 u64 addr, u32 size)
426{
427 return cmd_set_board_config_using_msg(handle,
428 TI_SCI_MSG_BOARD_CONFIG,
429 addr, size);
430}
431
432/**
433 * ti_sci_cmd_set_board_config_rm() - Command to send board resource
434 * management configuration
435 * @handle: pointer to TI SCI handle
436 * @addr: Address where the board RM config structure is located
437 * @size: Size of the RM config structure
438 *
439 * Return: 0 if all went well, else returns appropriate error value.
440 */
441static
442int ti_sci_cmd_set_board_config_rm(const struct ti_sci_handle *handle,
443 u64 addr, u32 size)
444{
445 return cmd_set_board_config_using_msg(handle,
446 TI_SCI_MSG_BOARD_CONFIG_RM,
447 addr, size);
448}
449
450/**
451 * ti_sci_cmd_set_board_config_security() - Command to send board security
452 * configuration message
453 * @handle: pointer to TI SCI handle
454 * @addr: Address where the board security config structure is located
455 * @size: Size of the security config structure
456 *
457 * Return: 0 if all went well, else returns appropriate error value.
458 */
459static
460int ti_sci_cmd_set_board_config_security(const struct ti_sci_handle *handle,
461 u64 addr, u32 size)
462{
463 return cmd_set_board_config_using_msg(handle,
464 TI_SCI_MSG_BOARD_CONFIG_SECURITY,
465 addr, size);
466}
467
468/**
469 * ti_sci_cmd_set_board_config_pm() - Command to send board power and clock
470 * configuration message
471 * @handle: pointer to TI SCI handle
472 * @addr: Address where the board PM config structure is located
473 * @size: Size of the PM config structure
474 *
475 * Return: 0 if all went well, else returns appropriate error value.
476 */
477static int ti_sci_cmd_set_board_config_pm(const struct ti_sci_handle *handle,
478 u64 addr, u32 size)
479{
480 return cmd_set_board_config_using_msg(handle,
481 TI_SCI_MSG_BOARD_CONFIG_PM,
482 addr, size);
483}
484
Lokesh Vutla0d0412a2019-06-07 19:24:41 +0530485static struct ti_sci_exclusive_dev
486*ti_sci_get_exclusive_dev(struct list_head *dev_list, u32 id)
487{
488 struct ti_sci_exclusive_dev *dev;
489
490 list_for_each_entry(dev, dev_list, list)
491 if (dev->id == id)
492 return dev;
493
494 return NULL;
495}
496
497static void ti_sci_add_exclusive_dev(struct ti_sci_info *info, u32 id)
498{
499 struct ti_sci_exclusive_dev *dev;
500
501 dev = ti_sci_get_exclusive_dev(&info->dev_list, id);
502 if (dev) {
503 dev->count++;
504 return;
505 }
506
507 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
508 dev->id = id;
509 dev->count = 1;
510 INIT_LIST_HEAD(&dev->list);
511 list_add_tail(&dev->list, &info->dev_list);
512}
513
514static void ti_sci_delete_exclusive_dev(struct ti_sci_info *info, u32 id)
515{
516 struct ti_sci_exclusive_dev *dev;
517
518 dev = ti_sci_get_exclusive_dev(&info->dev_list, id);
519 if (!dev)
520 return;
521
522 if (dev->count > 0)
523 dev->count--;
524}
525
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530526/**
527 * ti_sci_set_device_state() - Set device state helper
528 * @handle: pointer to TI SCI handle
529 * @id: Device identifier
530 * @flags: flags to setup for the device
531 * @state: State to move the device to
532 *
533 * Return: 0 if all went well, else returns appropriate error value.
534 */
535static int ti_sci_set_device_state(const struct ti_sci_handle *handle,
536 u32 id, u32 flags, u8 state)
537{
538 struct ti_sci_msg_req_set_device_state req;
539 struct ti_sci_msg_hdr *resp;
540 struct ti_sci_info *info;
541 struct ti_sci_xfer *xfer;
542 int ret = 0;
543
544 if (IS_ERR(handle))
545 return PTR_ERR(handle);
546 if (!handle)
547 return -EINVAL;
548
549 info = handle_to_ti_sci_info(handle);
550
551 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
552 flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
553 (u32 *)&req, sizeof(req), sizeof(*resp));
554 if (IS_ERR(xfer)) {
555 ret = PTR_ERR(xfer);
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530556 return ret;
557 }
558 req.id = id;
559 req.state = state;
560
561 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -0500562 if (ret)
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530563 return ret;
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530564
Lokesh Vutla0d0412a2019-06-07 19:24:41 +0530565 if (state == MSG_DEVICE_SW_STATE_AUTO_OFF)
566 ti_sci_delete_exclusive_dev(info, id);
567 else if (flags & MSG_FLAG_DEVICE_EXCLUSIVE)
568 ti_sci_add_exclusive_dev(info, id);
569
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530570 return ret;
571}
572
573/**
Andreas Dannenbergca08cb32019-06-07 19:24:40 +0530574 * ti_sci_set_device_state_no_wait() - Set device state helper without
575 * requesting or waiting for a response.
576 * @handle: pointer to TI SCI handle
577 * @id: Device identifier
578 * @flags: flags to setup for the device
579 * @state: State to move the device to
580 *
581 * Return: 0 if all went well, else returns appropriate error value.
582 */
583static int ti_sci_set_device_state_no_wait(const struct ti_sci_handle *handle,
584 u32 id, u32 flags, u8 state)
585{
586 struct ti_sci_msg_req_set_device_state req;
587 struct ti_sci_info *info;
588 struct ti_sci_xfer *xfer;
589 int ret = 0;
590
591 if (IS_ERR(handle))
592 return PTR_ERR(handle);
593 if (!handle)
594 return -EINVAL;
595
596 info = handle_to_ti_sci_info(handle);
597
598 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
599 flags | TI_SCI_FLAG_REQ_GENERIC_NORESPONSE,
600 (u32 *)&req, sizeof(req), 0);
601 if (IS_ERR(xfer)) {
602 ret = PTR_ERR(xfer);
Andreas Dannenbergca08cb32019-06-07 19:24:40 +0530603 return ret;
604 }
605 req.id = id;
606 req.state = state;
607
608 ret = ti_sci_do_xfer(info, xfer);
609 if (ret)
Andrew Davis771a16f2022-07-25 20:25:03 -0500610 return ret;
Andreas Dannenbergca08cb32019-06-07 19:24:40 +0530611
612 return ret;
613}
614
615/**
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530616 * ti_sci_get_device_state() - Get device state helper
617 * @handle: Handle to the device
618 * @id: Device Identifier
619 * @clcnt: Pointer to Context Loss Count
620 * @resets: pointer to resets
621 * @p_state: pointer to p_state
622 * @c_state: pointer to c_state
623 *
624 * Return: 0 if all went fine, else return appropriate error.
625 */
626static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
627 u32 id, u32 *clcnt, u32 *resets,
628 u8 *p_state, u8 *c_state)
629{
630 struct ti_sci_msg_resp_get_device_state *resp;
631 struct ti_sci_msg_req_get_device_state req;
632 struct ti_sci_info *info;
633 struct ti_sci_xfer *xfer;
634 int ret = 0;
635
636 if (IS_ERR(handle))
637 return PTR_ERR(handle);
638 if (!handle)
639 return -EINVAL;
640
641 if (!clcnt && !resets && !p_state && !c_state)
642 return -EINVAL;
643
644 info = handle_to_ti_sci_info(handle);
645
Andrew F. Davis8928fbd2019-04-29 09:04:11 -0400646 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
647 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530648 (u32 *)&req, sizeof(req), sizeof(*resp));
649 if (IS_ERR(xfer)) {
650 ret = PTR_ERR(xfer);
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530651 return ret;
652 }
653 req.id = id;
654
655 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -0500656 if (ret)
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530657 return ret;
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530658
659 resp = (struct ti_sci_msg_resp_get_device_state *)xfer->tx_message.buf;
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530660
661 if (clcnt)
662 *clcnt = resp->context_loss_count;
663 if (resets)
664 *resets = resp->resets;
665 if (p_state)
666 *p_state = resp->programmed_state;
667 if (c_state)
668 *c_state = resp->current_state;
669
670 return ret;
671}
672
673/**
674 * ti_sci_cmd_get_device() - command to request for device managed by TISCI
675 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
676 * @id: Device Identifier
677 *
678 * Request for the device - NOTE: the client MUST maintain integrity of
679 * usage count by balancing get_device with put_device. No refcounting is
680 * managed by driver for that purpose.
681 *
682 * NOTE: The request is for exclusive access for the processor.
683 *
684 * Return: 0 if all went fine, else return appropriate error.
685 */
686static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
687{
Lokesh Vutlaf5613002019-06-07 19:24:39 +0530688 return ti_sci_set_device_state(handle, id, 0,
689 MSG_DEVICE_SW_STATE_ON);
690}
691
692static int ti_sci_cmd_get_device_exclusive(const struct ti_sci_handle *handle,
693 u32 id)
694{
695 return ti_sci_set_device_state(handle, id, MSG_FLAG_DEVICE_EXCLUSIVE,
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530696 MSG_DEVICE_SW_STATE_ON);
697}
698
699/**
700 * ti_sci_cmd_idle_device() - Command to idle a device managed by TISCI
701 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
702 * @id: Device Identifier
703 *
704 * Request for the device - NOTE: the client MUST maintain integrity of
705 * usage count by balancing get_device with put_device. No refcounting is
706 * managed by driver for that purpose.
707 *
708 * Return: 0 if all went fine, else return appropriate error.
709 */
710static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)
711{
712 return ti_sci_set_device_state(handle, id,
Lokesh Vutlaf5613002019-06-07 19:24:39 +0530713 0,
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530714 MSG_DEVICE_SW_STATE_RETENTION);
715}
716
Lokesh Vutlaf5613002019-06-07 19:24:39 +0530717static int ti_sci_cmd_idle_device_exclusive(const struct ti_sci_handle *handle,
718 u32 id)
719{
720 return ti_sci_set_device_state(handle, id, MSG_FLAG_DEVICE_EXCLUSIVE,
721 MSG_DEVICE_SW_STATE_RETENTION);
722}
723
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530724/**
725 * ti_sci_cmd_put_device() - command to release a device managed by TISCI
726 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
727 * @id: Device Identifier
728 *
729 * Request for the device - NOTE: the client MUST maintain integrity of
730 * usage count by balancing get_device with put_device. No refcounting is
731 * managed by driver for that purpose.
732 *
733 * Return: 0 if all went fine, else return appropriate error.
734 */
735static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id)
736{
Lokesh Vutlaf5613002019-06-07 19:24:39 +0530737 return ti_sci_set_device_state(handle, id, 0,
738 MSG_DEVICE_SW_STATE_AUTO_OFF);
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530739}
740
Nishanth Menona15ff772025-04-07 07:15:54 -0500741static int ti_sci_cmd_release_exclusive_devices(void)
Lokesh Vutla0d0412a2019-06-07 19:24:41 +0530742{
743 struct ti_sci_exclusive_dev *dev, *tmp;
744 struct ti_sci_info *info;
745 int i, cnt;
746
Nishanth Menona15ff772025-04-07 07:15:54 -0500747 /*
748 * Scan all ti_sci_list registrations, since with FIT images, we could
749 * have started with one device tree registration and switched over
750 * to a final version. This prevents exclusive devices identified
751 * during the first probe to be left orphan.
752 */
753 list_for_each_entry(info, &ti_sci_list, list) {
754 list_for_each_entry_safe(dev, tmp, &info->dev_list, list) {
755 cnt = dev->count;
756 debug("%s: id = %d, cnt = %d\n", __func__, dev->id, cnt);
757 for (i = 0; i < cnt; i++)
758 ti_sci_cmd_put_device(&info->handle, dev->id);
759 }
Lokesh Vutla0d0412a2019-06-07 19:24:41 +0530760 }
761
762 return 0;
763}
764
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530765/**
766 * ti_sci_cmd_dev_is_valid() - Is the device valid
767 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
768 * @id: Device Identifier
769 *
770 * Return: 0 if all went fine and the device ID is valid, else return
771 * appropriate error.
772 */
773static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id)
774{
775 u8 unused;
776
777 /* check the device state which will also tell us if the ID is valid */
778 return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused);
779}
780
781/**
782 * ti_sci_cmd_dev_get_clcnt() - Get context loss counter
783 * @handle: Pointer to TISCI handle
784 * @id: Device Identifier
785 * @count: Pointer to Context Loss counter to populate
786 *
787 * Return: 0 if all went fine, else return appropriate error.
788 */
789static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id,
790 u32 *count)
791{
792 return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL);
793}
794
795/**
796 * ti_sci_cmd_dev_is_idle() - Check if the device is requested to be idle
797 * @handle: Pointer to TISCI handle
798 * @id: Device Identifier
799 * @r_state: true if requested to be idle
800 *
801 * Return: 0 if all went fine, else return appropriate error.
802 */
803static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id,
804 bool *r_state)
805{
806 int ret;
807 u8 state;
808
809 if (!r_state)
810 return -EINVAL;
811
812 ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL);
813 if (ret)
814 return ret;
815
816 *r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
817
818 return 0;
819}
820
821/**
822 * ti_sci_cmd_dev_is_stop() - Check if the device is requested to be stopped
823 * @handle: Pointer to TISCI handle
824 * @id: Device Identifier
825 * @r_state: true if requested to be stopped
826 * @curr_state: true if currently stopped.
827 *
828 * Return: 0 if all went fine, else return appropriate error.
829 */
830static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id,
831 bool *r_state, bool *curr_state)
832{
833 int ret;
834 u8 p_state, c_state;
835
836 if (!r_state && !curr_state)
837 return -EINVAL;
838
839 ret =
840 ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
841 if (ret)
842 return ret;
843
844 if (r_state)
845 *r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
846 if (curr_state)
847 *curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
848
849 return 0;
850}
851
852/**
853 * ti_sci_cmd_dev_is_on() - Check if the device is requested to be ON
854 * @handle: Pointer to TISCI handle
855 * @id: Device Identifier
856 * @r_state: true if requested to be ON
857 * @curr_state: true if currently ON and active
858 *
859 * Return: 0 if all went fine, else return appropriate error.
860 */
861static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id,
862 bool *r_state, bool *curr_state)
863{
864 int ret;
865 u8 p_state, c_state;
866
867 if (!r_state && !curr_state)
868 return -EINVAL;
869
870 ret =
871 ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
872 if (ret)
873 return ret;
874
875 if (r_state)
876 *r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
877 if (curr_state)
878 *curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
879
880 return 0;
881}
882
883/**
884 * ti_sci_cmd_dev_is_trans() - Check if the device is currently transitioning
885 * @handle: Pointer to TISCI handle
886 * @id: Device Identifier
887 * @curr_state: true if currently transitioning.
888 *
889 * Return: 0 if all went fine, else return appropriate error.
890 */
891static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id,
892 bool *curr_state)
893{
894 int ret;
895 u8 state;
896
897 if (!curr_state)
898 return -EINVAL;
899
900 ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state);
901 if (ret)
902 return ret;
903
904 *curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
905
906 return 0;
907}
908
909/**
910 * ti_sci_cmd_set_device_resets() - command to set resets for device managed
911 * by TISCI
912 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
913 * @id: Device Identifier
914 * @reset_state: Device specific reset bit field
915 *
916 * Return: 0 if all went fine, else return appropriate error.
917 */
918static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle,
919 u32 id, u32 reset_state)
920{
921 struct ti_sci_msg_req_set_device_resets req;
922 struct ti_sci_msg_hdr *resp;
923 struct ti_sci_info *info;
924 struct ti_sci_xfer *xfer;
925 int ret = 0;
926
927 if (IS_ERR(handle))
928 return PTR_ERR(handle);
929 if (!handle)
930 return -EINVAL;
931
932 info = handle_to_ti_sci_info(handle);
933
934 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS,
935 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
936 (u32 *)&req, sizeof(req), sizeof(*resp));
937 if (IS_ERR(xfer)) {
938 ret = PTR_ERR(xfer);
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530939 return ret;
940 }
941 req.id = id;
942 req.resets = reset_state;
943
944 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -0500945 if (ret)
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530946 return ret;
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530947
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530948 return ret;
949}
950
951/**
952 * ti_sci_cmd_get_device_resets() - Get reset state for device managed
953 * by TISCI
954 * @handle: Pointer to TISCI handle
955 * @id: Device Identifier
956 * @reset_state: Pointer to reset state to populate
957 *
958 * Return: 0 if all went fine, else return appropriate error.
959 */
960static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle,
961 u32 id, u32 *reset_state)
962{
963 return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL,
964 NULL);
965}
966
Lokesh Vutlad10c80c2018-08-27 15:57:35 +0530967/**
968 * ti_sci_set_clock_state() - Set clock state helper
969 * @handle: pointer to TI SCI handle
970 * @dev_id: Device identifier this request is for
971 * @clk_id: Clock identifier for the device for this request.
972 * Each device has it's own set of clock inputs. This indexes
973 * which clock input to modify.
974 * @flags: Header flags as needed
975 * @state: State to request for the clock.
976 *
977 * Return: 0 if all went well, else returns appropriate error value.
978 */
979static int ti_sci_set_clock_state(const struct ti_sci_handle *handle,
980 u32 dev_id, u8 clk_id,
981 u32 flags, u8 state)
982{
983 struct ti_sci_msg_req_set_clock_state req;
984 struct ti_sci_msg_hdr *resp;
985 struct ti_sci_info *info;
986 struct ti_sci_xfer *xfer;
987 int ret = 0;
988
989 if (IS_ERR(handle))
990 return PTR_ERR(handle);
991 if (!handle)
992 return -EINVAL;
993
994 info = handle_to_ti_sci_info(handle);
995
996 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE,
997 flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
998 (u32 *)&req, sizeof(req), sizeof(*resp));
999 if (IS_ERR(xfer)) {
1000 ret = PTR_ERR(xfer);
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301001 return ret;
1002 }
1003 req.dev_id = dev_id;
1004 req.clk_id = clk_id;
1005 req.request_state = state;
1006
1007 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001008 if (ret)
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301009 return ret;
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301010
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301011 return ret;
1012}
1013
1014/**
1015 * ti_sci_cmd_get_clock_state() - Get clock state helper
1016 * @handle: pointer to TI SCI handle
1017 * @dev_id: Device identifier this request is for
1018 * @clk_id: Clock identifier for the device for this request.
1019 * Each device has it's own set of clock inputs. This indexes
1020 * which clock input to modify.
1021 * @programmed_state: State requested for clock to move to
1022 * @current_state: State that the clock is currently in
1023 *
1024 * Return: 0 if all went well, else returns appropriate error value.
1025 */
1026static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle,
1027 u32 dev_id, u8 clk_id,
1028 u8 *programmed_state, u8 *current_state)
1029{
1030 struct ti_sci_msg_resp_get_clock_state *resp;
1031 struct ti_sci_msg_req_get_clock_state req;
1032 struct ti_sci_info *info;
1033 struct ti_sci_xfer *xfer;
1034 int ret = 0;
1035
1036 if (IS_ERR(handle))
1037 return PTR_ERR(handle);
1038 if (!handle)
1039 return -EINVAL;
1040
1041 if (!programmed_state && !current_state)
1042 return -EINVAL;
1043
1044 info = handle_to_ti_sci_info(handle);
1045
1046 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE,
1047 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1048 (u32 *)&req, sizeof(req), sizeof(*resp));
1049 if (IS_ERR(xfer)) {
1050 ret = PTR_ERR(xfer);
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301051 return ret;
1052 }
1053 req.dev_id = dev_id;
1054 req.clk_id = clk_id;
1055
1056 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001057 if (ret)
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301058 return ret;
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301059
1060 resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->tx_message.buf;
1061
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301062 if (programmed_state)
1063 *programmed_state = resp->programmed_state;
1064 if (current_state)
1065 *current_state = resp->current_state;
1066
1067 return ret;
1068}
1069
1070/**
1071 * ti_sci_cmd_get_clock() - Get control of a clock from TI SCI
1072 * @handle: pointer to TI SCI handle
1073 * @dev_id: Device identifier this request is for
1074 * @clk_id: Clock identifier for the device for this request.
1075 * Each device has it's own set of clock inputs. This indexes
1076 * which clock input to modify.
1077 * @needs_ssc: 'true' if Spread Spectrum clock is desired, else 'false'
1078 * @can_change_freq: 'true' if frequency change is desired, else 'false'
1079 * @enable_input_term: 'true' if input termination is desired, else 'false'
1080 *
1081 * Return: 0 if all went well, else returns appropriate error value.
1082 */
1083static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id,
1084 u8 clk_id, bool needs_ssc, bool can_change_freq,
1085 bool enable_input_term)
1086{
1087 u32 flags = 0;
1088
1089 flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
1090 flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
1091 flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
1092
1093 return ti_sci_set_clock_state(handle, dev_id, clk_id, flags,
1094 MSG_CLOCK_SW_STATE_REQ);
1095}
1096
1097/**
1098 * ti_sci_cmd_idle_clock() - Idle a clock which is in our control
1099 * @handle: pointer to TI SCI handle
1100 * @dev_id: Device identifier this request is for
1101 * @clk_id: Clock identifier for the device for this request.
1102 * Each device has it's own set of clock inputs. This indexes
1103 * which clock input to modify.
1104 *
1105 * NOTE: This clock must have been requested by get_clock previously.
1106 *
1107 * Return: 0 if all went well, else returns appropriate error value.
1108 */
1109static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle,
1110 u32 dev_id, u8 clk_id)
1111{
1112 return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
1113 MSG_CLOCK_SW_STATE_UNREQ);
1114}
1115
1116/**
1117 * ti_sci_cmd_put_clock() - Release a clock from our control back to TISCI
1118 * @handle: pointer to TI SCI handle
1119 * @dev_id: Device identifier this request is for
1120 * @clk_id: Clock identifier for the device for this request.
1121 * Each device has it's own set of clock inputs. This indexes
1122 * which clock input to modify.
1123 *
1124 * NOTE: This clock must have been requested by get_clock previously.
1125 *
1126 * Return: 0 if all went well, else returns appropriate error value.
1127 */
1128static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle,
1129 u32 dev_id, u8 clk_id)
1130{
1131 return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
1132 MSG_CLOCK_SW_STATE_AUTO);
1133}
1134
1135/**
1136 * ti_sci_cmd_clk_is_auto() - Is the clock being auto managed
1137 * @handle: pointer to TI SCI handle
1138 * @dev_id: Device identifier this request is for
1139 * @clk_id: Clock identifier for the device for this request.
1140 * Each device has it's own set of clock inputs. This indexes
1141 * which clock input to modify.
1142 * @req_state: state indicating if the clock is auto managed
1143 *
1144 * Return: 0 if all went well, else returns appropriate error value.
1145 */
1146static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle,
1147 u32 dev_id, u8 clk_id, bool *req_state)
1148{
1149 u8 state = 0;
1150 int ret;
1151
1152 if (!req_state)
1153 return -EINVAL;
1154
1155 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL);
1156 if (ret)
1157 return ret;
1158
1159 *req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
1160 return 0;
1161}
1162
1163/**
1164 * ti_sci_cmd_clk_is_on() - Is the clock ON
1165 * @handle: pointer to TI SCI handle
1166 * @dev_id: Device identifier this request is for
1167 * @clk_id: Clock identifier for the device for this request.
1168 * Each device has it's own set of clock inputs. This indexes
1169 * which clock input to modify.
1170 * @req_state: state indicating if the clock is managed by us and enabled
1171 * @curr_state: state indicating if the clock is ready for operation
1172 *
1173 * Return: 0 if all went well, else returns appropriate error value.
1174 */
1175static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id,
1176 u8 clk_id, bool *req_state, bool *curr_state)
1177{
1178 u8 c_state = 0, r_state = 0;
1179 int ret;
1180
1181 if (!req_state && !curr_state)
1182 return -EINVAL;
1183
1184 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1185 &r_state, &c_state);
1186 if (ret)
1187 return ret;
1188
1189 if (req_state)
1190 *req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
1191 if (curr_state)
1192 *curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
1193 return 0;
1194}
1195
1196/**
1197 * ti_sci_cmd_clk_is_off() - Is the clock OFF
1198 * @handle: pointer to TI SCI handle
1199 * @dev_id: Device identifier this request is for
1200 * @clk_id: Clock identifier for the device for this request.
1201 * Each device has it's own set of clock inputs. This indexes
1202 * which clock input to modify.
1203 * @req_state: state indicating if the clock is managed by us and disabled
1204 * @curr_state: state indicating if the clock is NOT ready for operation
1205 *
1206 * Return: 0 if all went well, else returns appropriate error value.
1207 */
1208static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id,
1209 u8 clk_id, bool *req_state, bool *curr_state)
1210{
1211 u8 c_state = 0, r_state = 0;
1212 int ret;
1213
1214 if (!req_state && !curr_state)
1215 return -EINVAL;
1216
1217 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1218 &r_state, &c_state);
1219 if (ret)
1220 return ret;
1221
1222 if (req_state)
1223 *req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
1224 if (curr_state)
1225 *curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
1226 return 0;
1227}
1228
1229/**
1230 * ti_sci_cmd_clk_set_parent() - Set the clock source of a specific device clock
1231 * @handle: pointer to TI SCI handle
1232 * @dev_id: Device identifier this request is for
1233 * @clk_id: Clock identifier for the device for this request.
1234 * Each device has it's own set of clock inputs. This indexes
1235 * which clock input to modify.
1236 * @parent_id: Parent clock identifier to set
1237 *
1238 * Return: 0 if all went well, else returns appropriate error value.
1239 */
1240static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle,
1241 u32 dev_id, u8 clk_id, u8 parent_id)
1242{
1243 struct ti_sci_msg_req_set_clock_parent req;
1244 struct ti_sci_msg_hdr *resp;
1245 struct ti_sci_info *info;
1246 struct ti_sci_xfer *xfer;
1247 int ret = 0;
1248
1249 if (IS_ERR(handle))
1250 return PTR_ERR(handle);
1251 if (!handle)
1252 return -EINVAL;
1253
1254 info = handle_to_ti_sci_info(handle);
1255
1256 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT,
1257 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1258 (u32 *)&req, sizeof(req), sizeof(*resp));
1259 if (IS_ERR(xfer)) {
1260 ret = PTR_ERR(xfer);
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301261 return ret;
1262 }
1263 req.dev_id = dev_id;
1264 req.clk_id = clk_id;
1265 req.parent_id = parent_id;
1266
1267 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001268 if (ret)
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301269 return ret;
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301270
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301271 return ret;
1272}
1273
1274/**
1275 * ti_sci_cmd_clk_get_parent() - Get current parent clock source
1276 * @handle: pointer to TI SCI handle
1277 * @dev_id: Device identifier this request is for
1278 * @clk_id: Clock identifier for the device for this request.
1279 * Each device has it's own set of clock inputs. This indexes
1280 * which clock input to modify.
1281 * @parent_id: Current clock parent
1282 *
1283 * Return: 0 if all went well, else returns appropriate error value.
1284 */
1285static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle,
1286 u32 dev_id, u8 clk_id, u8 *parent_id)
1287{
1288 struct ti_sci_msg_resp_get_clock_parent *resp;
1289 struct ti_sci_msg_req_get_clock_parent req;
1290 struct ti_sci_info *info;
1291 struct ti_sci_xfer *xfer;
1292 int ret = 0;
1293
1294 if (IS_ERR(handle))
1295 return PTR_ERR(handle);
1296 if (!handle || !parent_id)
1297 return -EINVAL;
1298
1299 info = handle_to_ti_sci_info(handle);
1300
1301 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT,
1302 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1303 (u32 *)&req, sizeof(req), sizeof(*resp));
1304 if (IS_ERR(xfer)) {
1305 ret = PTR_ERR(xfer);
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301306 return ret;
1307 }
1308 req.dev_id = dev_id;
1309 req.clk_id = clk_id;
1310
1311 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001312 if (ret)
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301313 return ret;
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301314
Andrew Davis04e43932022-07-25 20:25:06 -05001315 *parent_id = resp->parent_id;
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301316
1317 return ret;
1318}
1319
1320/**
1321 * ti_sci_cmd_clk_get_num_parents() - Get num parents of the current clk source
1322 * @handle: pointer to TI SCI handle
1323 * @dev_id: Device identifier this request is for
1324 * @clk_id: Clock identifier for the device for this request.
1325 * Each device has it's own set of clock inputs. This indexes
1326 * which clock input to modify.
1327 * @num_parents: Returns he number of parents to the current clock.
1328 *
1329 * Return: 0 if all went well, else returns appropriate error value.
1330 */
1331static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle,
1332 u32 dev_id, u8 clk_id,
1333 u8 *num_parents)
1334{
1335 struct ti_sci_msg_resp_get_clock_num_parents *resp;
1336 struct ti_sci_msg_req_get_clock_num_parents req;
1337 struct ti_sci_info *info;
1338 struct ti_sci_xfer *xfer;
1339 int ret = 0;
1340
1341 if (IS_ERR(handle))
1342 return PTR_ERR(handle);
1343 if (!handle || !num_parents)
1344 return -EINVAL;
1345
1346 info = handle_to_ti_sci_info(handle);
1347
1348 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS,
1349 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1350 (u32 *)&req, sizeof(req), sizeof(*resp));
1351 if (IS_ERR(xfer)) {
1352 ret = PTR_ERR(xfer);
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301353 return ret;
1354 }
1355 req.dev_id = dev_id;
1356 req.clk_id = clk_id;
1357
1358 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001359 if (ret)
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301360 return ret;
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301361
1362 resp = (struct ti_sci_msg_resp_get_clock_num_parents *)
1363 xfer->tx_message.buf;
1364
Andrew Davis04e43932022-07-25 20:25:06 -05001365 *num_parents = resp->num_parents;
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301366
1367 return ret;
1368}
1369
1370/**
1371 * ti_sci_cmd_clk_get_match_freq() - Find a good match for frequency
1372 * @handle: pointer to TI SCI handle
1373 * @dev_id: Device identifier this request is for
1374 * @clk_id: Clock identifier for the device for this request.
1375 * Each device has it's own set of clock inputs. This indexes
1376 * which clock input to modify.
1377 * @min_freq: The minimum allowable frequency in Hz. This is the minimum
1378 * allowable programmed frequency and does not account for clock
1379 * tolerances and jitter.
1380 * @target_freq: The target clock frequency in Hz. A frequency will be
1381 * processed as close to this target frequency as possible.
1382 * @max_freq: The maximum allowable frequency in Hz. This is the maximum
1383 * allowable programmed frequency and does not account for clock
1384 * tolerances and jitter.
1385 * @match_freq: Frequency match in Hz response.
1386 *
1387 * Return: 0 if all went well, else returns appropriate error value.
1388 */
1389static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle,
1390 u32 dev_id, u8 clk_id, u64 min_freq,
1391 u64 target_freq, u64 max_freq,
1392 u64 *match_freq)
1393{
1394 struct ti_sci_msg_resp_query_clock_freq *resp;
1395 struct ti_sci_msg_req_query_clock_freq req;
1396 struct ti_sci_info *info;
1397 struct ti_sci_xfer *xfer;
1398 int ret = 0;
1399
1400 if (IS_ERR(handle))
1401 return PTR_ERR(handle);
1402 if (!handle || !match_freq)
1403 return -EINVAL;
1404
1405 info = handle_to_ti_sci_info(handle);
1406
1407 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ,
1408 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1409 (u32 *)&req, sizeof(req), sizeof(*resp));
1410 if (IS_ERR(xfer)) {
1411 ret = PTR_ERR(xfer);
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301412 return ret;
1413 }
1414 req.dev_id = dev_id;
1415 req.clk_id = clk_id;
1416 req.min_freq_hz = min_freq;
1417 req.target_freq_hz = target_freq;
1418 req.max_freq_hz = max_freq;
1419
1420 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001421 if (ret)
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301422 return ret;
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301423
1424 resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->tx_message.buf;
1425
Andrew Davis04e43932022-07-25 20:25:06 -05001426 *match_freq = resp->freq_hz;
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301427
1428 return ret;
1429}
1430
1431/**
1432 * ti_sci_cmd_clk_set_freq() - Set a frequency for clock
1433 * @handle: pointer to TI SCI handle
1434 * @dev_id: Device identifier this request is for
1435 * @clk_id: Clock identifier for the device for this request.
1436 * Each device has it's own set of clock inputs. This indexes
1437 * which clock input to modify.
1438 * @min_freq: The minimum allowable frequency in Hz. This is the minimum
1439 * allowable programmed frequency and does not account for clock
1440 * tolerances and jitter.
1441 * @target_freq: The target clock frequency in Hz. A frequency will be
1442 * processed as close to this target frequency as possible.
1443 * @max_freq: The maximum allowable frequency in Hz. This is the maximum
1444 * allowable programmed frequency and does not account for clock
1445 * tolerances and jitter.
1446 *
1447 * Return: 0 if all went well, else returns appropriate error value.
1448 */
1449static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle,
1450 u32 dev_id, u8 clk_id, u64 min_freq,
1451 u64 target_freq, u64 max_freq)
1452{
1453 struct ti_sci_msg_req_set_clock_freq req;
1454 struct ti_sci_msg_hdr *resp;
1455 struct ti_sci_info *info;
1456 struct ti_sci_xfer *xfer;
1457 int ret = 0;
1458
1459 if (IS_ERR(handle))
1460 return PTR_ERR(handle);
1461 if (!handle)
1462 return -EINVAL;
1463
1464 info = handle_to_ti_sci_info(handle);
1465
1466 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ,
1467 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1468 (u32 *)&req, sizeof(req), sizeof(*resp));
1469 if (IS_ERR(xfer)) {
1470 ret = PTR_ERR(xfer);
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301471 return ret;
1472 }
1473 req.dev_id = dev_id;
1474 req.clk_id = clk_id;
1475 req.min_freq_hz = min_freq;
1476 req.target_freq_hz = target_freq;
1477 req.max_freq_hz = max_freq;
1478
1479 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001480 if (ret)
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301481 return ret;
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301482
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301483 return ret;
1484}
1485
1486/**
1487 * ti_sci_cmd_clk_get_freq() - Get current frequency
1488 * @handle: pointer to TI SCI handle
1489 * @dev_id: Device identifier this request is for
1490 * @clk_id: Clock identifier for the device for this request.
1491 * Each device has it's own set of clock inputs. This indexes
1492 * which clock input to modify.
1493 * @freq: Currently frequency in Hz
1494 *
1495 * Return: 0 if all went well, else returns appropriate error value.
1496 */
1497static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle,
1498 u32 dev_id, u8 clk_id, u64 *freq)
1499{
1500 struct ti_sci_msg_resp_get_clock_freq *resp;
1501 struct ti_sci_msg_req_get_clock_freq req;
1502 struct ti_sci_info *info;
1503 struct ti_sci_xfer *xfer;
1504 int ret = 0;
1505
1506 if (IS_ERR(handle))
1507 return PTR_ERR(handle);
1508 if (!handle || !freq)
1509 return -EINVAL;
1510
1511 info = handle_to_ti_sci_info(handle);
1512
1513 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ,
1514 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1515 (u32 *)&req, sizeof(req), sizeof(*resp));
1516 if (IS_ERR(xfer)) {
1517 ret = PTR_ERR(xfer);
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301518 return ret;
1519 }
1520 req.dev_id = dev_id;
1521 req.clk_id = clk_id;
1522
1523 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001524 if (ret)
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301525 return ret;
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301526
1527 resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->tx_message.buf;
1528
Andrew Davis04e43932022-07-25 20:25:06 -05001529 *freq = resp->freq_hz;
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301530
1531 return ret;
1532}
1533
Andreas Dannenberg5bd08372018-08-27 15:57:36 +05301534/**
1535 * ti_sci_cmd_core_reboot() - Command to request system reset
1536 * @handle: pointer to TI SCI handle
1537 *
1538 * Return: 0 if all went well, else returns appropriate error value.
1539 */
1540static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
1541{
1542 struct ti_sci_msg_req_reboot req;
1543 struct ti_sci_msg_hdr *resp;
1544 struct ti_sci_info *info;
1545 struct ti_sci_xfer *xfer;
1546 int ret = 0;
1547
1548 if (IS_ERR(handle))
1549 return PTR_ERR(handle);
1550 if (!handle)
1551 return -EINVAL;
1552
1553 info = handle_to_ti_sci_info(handle);
1554
1555 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SYS_RESET,
1556 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1557 (u32 *)&req, sizeof(req), sizeof(*resp));
1558 if (IS_ERR(xfer)) {
1559 ret = PTR_ERR(xfer);
Andreas Dannenberg5bd08372018-08-27 15:57:36 +05301560 return ret;
1561 }
Dave Gerlach366df4e2021-05-13 20:10:55 -05001562 req.domain = 0;
Andreas Dannenberg5bd08372018-08-27 15:57:36 +05301563
1564 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001565 if (ret)
Andreas Dannenberg5bd08372018-08-27 15:57:36 +05301566 return ret;
Andreas Dannenberg5bd08372018-08-27 15:57:36 +05301567
Andreas Dannenberg5bd08372018-08-27 15:57:36 +05301568 return ret;
1569}
1570
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05301571/**
1572 * ti_sci_get_resource_range - Helper to get a range of resources assigned
1573 * to a host. Resource is uniquely identified by
1574 * type and subtype.
1575 * @handle: Pointer to TISCI handle.
1576 * @dev_id: TISCI device ID.
1577 * @subtype: Resource assignment subtype that is being requested
1578 * from the given device.
1579 * @s_host: Host processor ID to which the resources are allocated
1580 * @range_start: Start index of the resource range
1581 * @range_num: Number of resources in the range
1582 *
1583 * Return: 0 if all went fine, else return appropriate error.
1584 */
1585static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
1586 u32 dev_id, u8 subtype, u8 s_host,
1587 u16 *range_start, u16 *range_num)
1588{
1589 struct ti_sci_msg_resp_get_resource_range *resp;
1590 struct ti_sci_msg_req_get_resource_range req;
1591 struct ti_sci_xfer *xfer;
1592 struct ti_sci_info *info;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05301593 int ret = 0;
1594
1595 if (IS_ERR(handle))
1596 return PTR_ERR(handle);
1597 if (!handle)
1598 return -EINVAL;
1599
1600 info = handle_to_ti_sci_info(handle);
1601
1602 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_RESOURCE_RANGE,
1603 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1604 (u32 *)&req, sizeof(req), sizeof(*resp));
1605 if (IS_ERR(xfer)) {
1606 ret = PTR_ERR(xfer);
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05301607 return ret;
1608 }
1609
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05301610 req.secondary_host = s_host;
Lokesh Vutla0acf1dc2020-08-17 11:00:48 +05301611 req.type = dev_id & MSG_RM_RESOURCE_TYPE_MASK;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05301612 req.subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK;
1613
1614 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001615 if (ret)
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05301616 goto fail;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05301617
1618 resp = (struct ti_sci_msg_resp_get_resource_range *)xfer->tx_message.buf;
Andrew Davis04e43932022-07-25 20:25:06 -05001619 if (!resp->range_start && !resp->range_num) {
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05301620 ret = -ENODEV;
1621 } else {
1622 *range_start = resp->range_start;
1623 *range_num = resp->range_num;
1624 };
1625
1626fail:
1627 return ret;
1628}
1629
Vignesh Raghavendra4214a812021-06-07 19:47:48 +05301630static int __maybe_unused
Vignesh Raghavendraaa0e3fc2021-06-07 19:47:49 +05301631ti_sci_cmd_get_resource_range_static(const struct ti_sci_handle *handle,
1632 u32 dev_id, u8 subtype,
1633 u16 *range_start, u16 *range_num)
Vignesh Raghavendra4214a812021-06-07 19:47:48 +05301634{
1635 struct ti_sci_resource_static_data *data;
1636 int i = 0;
1637
1638 while (1) {
1639 data = &rm_static_data[i];
1640
1641 if (!data->dev_id)
1642 return -EINVAL;
1643
1644 if (data->dev_id != dev_id || data->subtype != subtype) {
1645 i++;
1646 continue;
1647 }
1648
1649 *range_start = data->range_start;
1650 *range_num = data->range_num;
1651
1652 return 0;
1653 }
1654
1655 return -EINVAL;
1656}
1657
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05301658/**
1659 * ti_sci_cmd_get_resource_range - Get a range of resources assigned to host
1660 * that is same as ti sci interface host.
1661 * @handle: Pointer to TISCI handle.
1662 * @dev_id: TISCI device ID.
1663 * @subtype: Resource assignment subtype that is being requested
1664 * from the given device.
1665 * @range_start: Start index of the resource range
1666 * @range_num: Number of resources in the range
1667 *
1668 * Return: 0 if all went fine, else return appropriate error.
1669 */
1670static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
1671 u32 dev_id, u8 subtype,
1672 u16 *range_start, u16 *range_num)
1673{
1674 return ti_sci_get_resource_range(handle, dev_id, subtype,
1675 TI_SCI_IRQ_SECONDARY_HOST_INVALID,
1676 range_start, range_num);
1677}
1678
1679/**
1680 * ti_sci_cmd_get_resource_range_from_shost - Get a range of resources
1681 * assigned to a specified host.
1682 * @handle: Pointer to TISCI handle.
1683 * @dev_id: TISCI device ID.
1684 * @subtype: Resource assignment subtype that is being requested
1685 * from the given device.
1686 * @s_host: Host processor ID to which the resources are allocated
1687 * @range_start: Start index of the resource range
1688 * @range_num: Number of resources in the range
1689 *
1690 * Return: 0 if all went fine, else return appropriate error.
1691 */
1692static
1693int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle,
1694 u32 dev_id, u8 subtype, u8 s_host,
1695 u16 *range_start, u16 *range_num)
1696{
1697 return ti_sci_get_resource_range(handle, dev_id, subtype, s_host,
1698 range_start, range_num);
1699}
1700
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301701/**
Lokesh Vutla032dce82019-03-08 11:47:32 +05301702 * ti_sci_cmd_query_msmc() - Command to query currently available msmc memory
1703 * @handle: pointer to TI SCI handle
1704 * @msms_start: MSMC start as returned by tisci
1705 * @msmc_end: MSMC end as returned by tisci
1706 *
1707 * Return: 0 if all went well, else returns appropriate error value.
1708 */
1709static int ti_sci_cmd_query_msmc(const struct ti_sci_handle *handle,
1710 u64 *msmc_start, u64 *msmc_end)
1711{
1712 struct ti_sci_msg_resp_query_msmc *resp;
1713 struct ti_sci_msg_hdr req;
1714 struct ti_sci_info *info;
1715 struct ti_sci_xfer *xfer;
1716 int ret = 0;
1717
1718 if (IS_ERR(handle))
1719 return PTR_ERR(handle);
1720 if (!handle)
1721 return -EINVAL;
1722
1723 info = handle_to_ti_sci_info(handle);
1724
1725 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_QUERY_MSMC,
1726 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1727 (u32 *)&req, sizeof(req), sizeof(*resp));
1728 if (IS_ERR(xfer)) {
1729 ret = PTR_ERR(xfer);
Lokesh Vutla032dce82019-03-08 11:47:32 +05301730 return ret;
1731 }
1732
1733 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001734 if (ret)
Lokesh Vutla032dce82019-03-08 11:47:32 +05301735 return ret;
Lokesh Vutla032dce82019-03-08 11:47:32 +05301736
1737 resp = (struct ti_sci_msg_resp_query_msmc *)xfer->tx_message.buf;
1738
Lokesh Vutla032dce82019-03-08 11:47:32 +05301739 *msmc_start = ((u64)resp->msmc_start_high << TISCI_ADDR_HIGH_SHIFT) |
1740 resp->msmc_start_low;
1741 *msmc_end = ((u64)resp->msmc_end_high << TISCI_ADDR_HIGH_SHIFT) |
1742 resp->msmc_end_low;
1743
1744 return ret;
1745}
1746
1747/**
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301748 * ti_sci_cmd_proc_request() - Command to request a physical processor control
1749 * @handle: Pointer to TI SCI handle
1750 * @proc_id: Processor ID this request is for
1751 *
1752 * Return: 0 if all went well, else returns appropriate error value.
1753 */
1754static int ti_sci_cmd_proc_request(const struct ti_sci_handle *handle,
1755 u8 proc_id)
1756{
1757 struct ti_sci_msg_req_proc_request req;
1758 struct ti_sci_msg_hdr *resp;
1759 struct ti_sci_info *info;
1760 struct ti_sci_xfer *xfer;
1761 int ret = 0;
1762
1763 if (IS_ERR(handle))
1764 return PTR_ERR(handle);
1765 if (!handle)
1766 return -EINVAL;
1767
1768 info = handle_to_ti_sci_info(handle);
1769
1770 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_REQUEST,
1771 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1772 (u32 *)&req, sizeof(req), sizeof(*resp));
1773 if (IS_ERR(xfer)) {
1774 ret = PTR_ERR(xfer);
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301775 return ret;
1776 }
1777 req.processor_id = proc_id;
1778
1779 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001780 if (ret)
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301781 return ret;
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301782
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301783 return ret;
1784}
1785
1786/**
1787 * ti_sci_cmd_proc_release() - Command to release a physical processor control
1788 * @handle: Pointer to TI SCI handle
1789 * @proc_id: Processor ID this request is for
1790 *
1791 * Return: 0 if all went well, else returns appropriate error value.
1792 */
1793static int ti_sci_cmd_proc_release(const struct ti_sci_handle *handle,
1794 u8 proc_id)
1795{
1796 struct ti_sci_msg_req_proc_release req;
1797 struct ti_sci_msg_hdr *resp;
1798 struct ti_sci_info *info;
1799 struct ti_sci_xfer *xfer;
1800 int ret = 0;
1801
1802 if (IS_ERR(handle))
1803 return PTR_ERR(handle);
1804 if (!handle)
1805 return -EINVAL;
1806
1807 info = handle_to_ti_sci_info(handle);
1808
1809 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_RELEASE,
1810 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1811 (u32 *)&req, sizeof(req), sizeof(*resp));
1812 if (IS_ERR(xfer)) {
1813 ret = PTR_ERR(xfer);
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301814 return ret;
1815 }
1816 req.processor_id = proc_id;
1817
1818 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001819 if (ret)
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301820 return ret;
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301821
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301822 return ret;
1823}
1824
1825/**
1826 * ti_sci_cmd_proc_handover() - Command to handover a physical processor
1827 * control to a host in the processor's access
1828 * control list.
1829 * @handle: Pointer to TI SCI handle
1830 * @proc_id: Processor ID this request is for
1831 * @host_id: Host ID to get the control of the processor
1832 *
1833 * Return: 0 if all went well, else returns appropriate error value.
1834 */
1835static int ti_sci_cmd_proc_handover(const struct ti_sci_handle *handle,
1836 u8 proc_id, u8 host_id)
1837{
1838 struct ti_sci_msg_req_proc_handover req;
1839 struct ti_sci_msg_hdr *resp;
1840 struct ti_sci_info *info;
1841 struct ti_sci_xfer *xfer;
1842 int ret = 0;
1843
1844 if (IS_ERR(handle))
1845 return PTR_ERR(handle);
1846 if (!handle)
1847 return -EINVAL;
1848
1849 info = handle_to_ti_sci_info(handle);
1850
1851 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_HANDOVER,
1852 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1853 (u32 *)&req, sizeof(req), sizeof(*resp));
1854 if (IS_ERR(xfer)) {
1855 ret = PTR_ERR(xfer);
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301856 return ret;
1857 }
1858 req.processor_id = proc_id;
1859 req.host_id = host_id;
1860
1861 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001862 if (ret)
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301863 return ret;
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301864
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301865 return ret;
1866}
1867
1868/**
1869 * ti_sci_cmd_set_proc_boot_cfg() - Command to set the processor boot
1870 * configuration flags
1871 * @handle: Pointer to TI SCI handle
1872 * @proc_id: Processor ID this request is for
1873 * @config_flags_set: Configuration flags to be set
1874 * @config_flags_clear: Configuration flags to be cleared.
1875 *
1876 * Return: 0 if all went well, else returns appropriate error value.
1877 */
1878static int ti_sci_cmd_set_proc_boot_cfg(const struct ti_sci_handle *handle,
1879 u8 proc_id, u64 bootvector,
1880 u32 config_flags_set,
1881 u32 config_flags_clear)
1882{
1883 struct ti_sci_msg_req_set_proc_boot_config req;
1884 struct ti_sci_msg_hdr *resp;
1885 struct ti_sci_info *info;
1886 struct ti_sci_xfer *xfer;
1887 int ret = 0;
1888
1889 if (IS_ERR(handle))
1890 return PTR_ERR(handle);
1891 if (!handle)
1892 return -EINVAL;
1893
1894 info = handle_to_ti_sci_info(handle);
1895
1896 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_SET_PROC_BOOT_CONFIG,
1897 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1898 (u32 *)&req, sizeof(req), sizeof(*resp));
1899 if (IS_ERR(xfer)) {
1900 ret = PTR_ERR(xfer);
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301901 return ret;
1902 }
1903 req.processor_id = proc_id;
1904 req.bootvector_low = bootvector & TISCI_ADDR_LOW_MASK;
1905 req.bootvector_high = (bootvector & TISCI_ADDR_HIGH_MASK) >>
1906 TISCI_ADDR_HIGH_SHIFT;
1907 req.config_flags_set = config_flags_set;
1908 req.config_flags_clear = config_flags_clear;
1909
1910 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001911 if (ret)
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301912 return ret;
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301913
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301914 return ret;
1915}
1916
1917/**
1918 * ti_sci_cmd_set_proc_boot_ctrl() - Command to set the processor boot
1919 * control flags
1920 * @handle: Pointer to TI SCI handle
1921 * @proc_id: Processor ID this request is for
1922 * @control_flags_set: Control flags to be set
1923 * @control_flags_clear: Control flags to be cleared
1924 *
1925 * Return: 0 if all went well, else returns appropriate error value.
1926 */
1927static int ti_sci_cmd_set_proc_boot_ctrl(const struct ti_sci_handle *handle,
1928 u8 proc_id, u32 control_flags_set,
1929 u32 control_flags_clear)
1930{
1931 struct ti_sci_msg_req_set_proc_boot_ctrl req;
1932 struct ti_sci_msg_hdr *resp;
1933 struct ti_sci_info *info;
1934 struct ti_sci_xfer *xfer;
1935 int ret = 0;
1936
1937 if (IS_ERR(handle))
1938 return PTR_ERR(handle);
1939 if (!handle)
1940 return -EINVAL;
1941
1942 info = handle_to_ti_sci_info(handle);
1943
1944 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_SET_PROC_BOOT_CTRL,
1945 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1946 (u32 *)&req, sizeof(req), sizeof(*resp));
1947 if (IS_ERR(xfer)) {
1948 ret = PTR_ERR(xfer);
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301949 return ret;
1950 }
1951 req.processor_id = proc_id;
1952 req.control_flags_set = control_flags_set;
1953 req.control_flags_clear = control_flags_clear;
1954
1955 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001956 if (ret)
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301957 return ret;
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301958
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301959 return ret;
1960}
1961
1962/**
1963 * ti_sci_cmd_proc_auth_boot_image() - Command to authenticate and load the
1964 * image and then set the processor configuration flags.
1965 * @handle: Pointer to TI SCI handle
Andrew F. Davis7aa9a082019-04-12 12:54:44 -04001966 * @image_addr: Memory address at which payload image and certificate is
1967 * located in memory, this is updated if the image data is
1968 * moved during authentication.
1969 * @image_size: This is updated with the final size of the image after
1970 * authentication.
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301971 *
1972 * Return: 0 if all went well, else returns appropriate error value.
1973 */
1974static int ti_sci_cmd_proc_auth_boot_image(const struct ti_sci_handle *handle,
Andrew F. Davis7aa9a082019-04-12 12:54:44 -04001975 u64 *image_addr, u32 *image_size)
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301976{
1977 struct ti_sci_msg_req_proc_auth_boot_image req;
Andrew F. Davis7aa9a082019-04-12 12:54:44 -04001978 struct ti_sci_msg_resp_proc_auth_boot_image *resp;
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301979 struct ti_sci_info *info;
1980 struct ti_sci_xfer *xfer;
1981 int ret = 0;
1982
1983 if (IS_ERR(handle))
1984 return PTR_ERR(handle);
1985 if (!handle)
1986 return -EINVAL;
1987
1988 info = handle_to_ti_sci_info(handle);
1989
Jorge Ramirez-Ortizb0373282023-01-10 18:29:48 +01001990 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_AUTH_BOOT_IMAGE,
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301991 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1992 (u32 *)&req, sizeof(req), sizeof(*resp));
1993 if (IS_ERR(xfer)) {
1994 ret = PTR_ERR(xfer);
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301995 return ret;
1996 }
Andrew F. Davis7aa9a082019-04-12 12:54:44 -04001997 req.cert_addr_low = *image_addr & TISCI_ADDR_LOW_MASK;
1998 req.cert_addr_high = (*image_addr & TISCI_ADDR_HIGH_MASK) >>
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301999 TISCI_ADDR_HIGH_SHIFT;
2000
2001 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05002002 if (ret)
Lokesh Vutlab8856af2018-08-27 15:57:37 +05302003 return ret;
Lokesh Vutlab8856af2018-08-27 15:57:37 +05302004
Andrew F. Davis7aa9a082019-04-12 12:54:44 -04002005 resp = (struct ti_sci_msg_resp_proc_auth_boot_image *)xfer->tx_message.buf;
Lokesh Vutlab8856af2018-08-27 15:57:37 +05302006
Andrew F. Davis7aa9a082019-04-12 12:54:44 -04002007 *image_addr = (resp->image_addr_low & TISCI_ADDR_LOW_MASK) |
2008 (((u64)resp->image_addr_high <<
2009 TISCI_ADDR_HIGH_SHIFT) & TISCI_ADDR_HIGH_MASK);
2010 *image_size = resp->image_size;
Lokesh Vutlab8856af2018-08-27 15:57:37 +05302011
2012 return ret;
2013}
2014
2015/**
2016 * ti_sci_cmd_get_proc_boot_status() - Command to get the processor boot status
2017 * @handle: Pointer to TI SCI handle
2018 * @proc_id: Processor ID this request is for
2019 *
2020 * Return: 0 if all went well, else returns appropriate error value.
2021 */
2022static int ti_sci_cmd_get_proc_boot_status(const struct ti_sci_handle *handle,
2023 u8 proc_id, u64 *bv, u32 *cfg_flags,
2024 u32 *ctrl_flags, u32 *sts_flags)
2025{
2026 struct ti_sci_msg_resp_get_proc_boot_status *resp;
2027 struct ti_sci_msg_req_get_proc_boot_status req;
2028 struct ti_sci_info *info;
2029 struct ti_sci_xfer *xfer;
2030 int ret = 0;
2031
2032 if (IS_ERR(handle))
2033 return PTR_ERR(handle);
2034 if (!handle)
2035 return -EINVAL;
2036
2037 info = handle_to_ti_sci_info(handle);
2038
2039 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_GET_PROC_BOOT_STATUS,
2040 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2041 (u32 *)&req, sizeof(req), sizeof(*resp));
2042 if (IS_ERR(xfer)) {
2043 ret = PTR_ERR(xfer);
Lokesh Vutlab8856af2018-08-27 15:57:37 +05302044 return ret;
2045 }
2046 req.processor_id = proc_id;
2047
2048 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05002049 if (ret)
Lokesh Vutlab8856af2018-08-27 15:57:37 +05302050 return ret;
Lokesh Vutlab8856af2018-08-27 15:57:37 +05302051
2052 resp = (struct ti_sci_msg_resp_get_proc_boot_status *)
2053 xfer->tx_message.buf;
2054
Lokesh Vutlab8856af2018-08-27 15:57:37 +05302055 *bv = (resp->bootvector_low & TISCI_ADDR_LOW_MASK) |
2056 (((u64)resp->bootvector_high <<
2057 TISCI_ADDR_HIGH_SHIFT) & TISCI_ADDR_HIGH_MASK);
2058 *cfg_flags = resp->config_flags;
2059 *ctrl_flags = resp->control_flags;
2060 *sts_flags = resp->status_flags;
2061
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302062 return ret;
2063}
2064
2065/**
Andreas Dannenbergca08cb32019-06-07 19:24:40 +05302066 * ti_sci_proc_wait_boot_status_no_wait() - Helper function to wait for a
2067 * processor boot status without requesting or
2068 * waiting for a response.
2069 * @proc_id: Processor ID this request is for
2070 * @num_wait_iterations: Total number of iterations we will check before
2071 * we will timeout and give up
2072 * @num_match_iterations: How many iterations should we have continued
2073 * status to account for status bits glitching.
2074 * This is to make sure that match occurs for
2075 * consecutive checks. This implies that the
2076 * worst case should consider that the stable
2077 * time should at the worst be num_wait_iterations
2078 * num_match_iterations to prevent timeout.
2079 * @delay_per_iteration_us: Specifies how long to wait (in micro seconds)
2080 * between each status checks. This is the minimum
2081 * duration, and overhead of register reads and
2082 * checks are on top of this and can vary based on
2083 * varied conditions.
2084 * @delay_before_iterations_us: Specifies how long to wait (in micro seconds)
2085 * before the very first check in the first
2086 * iteration of status check loop. This is the
2087 * minimum duration, and overhead of register
2088 * reads and checks are.
2089 * @status_flags_1_set_all_wait:If non-zero, Specifies that all bits of the
2090 * status matching this field requested MUST be 1.
2091 * @status_flags_1_set_any_wait:If non-zero, Specifies that at least one of the
2092 * bits matching this field requested MUST be 1.
2093 * @status_flags_1_clr_all_wait:If non-zero, Specifies that all bits of the
2094 * status matching this field requested MUST be 0.
2095 * @status_flags_1_clr_any_wait:If non-zero, Specifies that at least one of the
2096 * bits matching this field requested MUST be 0.
2097 *
2098 * Return: 0 if all goes well, else appropriate error message
2099 */
2100static int
2101ti_sci_proc_wait_boot_status_no_wait(const struct ti_sci_handle *handle,
2102 u8 proc_id,
2103 u8 num_wait_iterations,
2104 u8 num_match_iterations,
2105 u8 delay_per_iteration_us,
2106 u8 delay_before_iterations_us,
2107 u32 status_flags_1_set_all_wait,
2108 u32 status_flags_1_set_any_wait,
2109 u32 status_flags_1_clr_all_wait,
2110 u32 status_flags_1_clr_any_wait)
2111{
2112 struct ti_sci_msg_req_wait_proc_boot_status req;
2113 struct ti_sci_info *info;
2114 struct ti_sci_xfer *xfer;
2115 int ret = 0;
2116
2117 if (IS_ERR(handle))
2118 return PTR_ERR(handle);
2119 if (!handle)
2120 return -EINVAL;
2121
2122 info = handle_to_ti_sci_info(handle);
2123
2124 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_WAIT_PROC_BOOT_STATUS,
2125 TI_SCI_FLAG_REQ_GENERIC_NORESPONSE,
2126 (u32 *)&req, sizeof(req), 0);
2127 if (IS_ERR(xfer)) {
2128 ret = PTR_ERR(xfer);
Andreas Dannenbergca08cb32019-06-07 19:24:40 +05302129 return ret;
2130 }
2131 req.processor_id = proc_id;
2132 req.num_wait_iterations = num_wait_iterations;
2133 req.num_match_iterations = num_match_iterations;
2134 req.delay_per_iteration_us = delay_per_iteration_us;
2135 req.delay_before_iterations_us = delay_before_iterations_us;
2136 req.status_flags_1_set_all_wait = status_flags_1_set_all_wait;
2137 req.status_flags_1_set_any_wait = status_flags_1_set_any_wait;
2138 req.status_flags_1_clr_all_wait = status_flags_1_clr_all_wait;
2139 req.status_flags_1_clr_any_wait = status_flags_1_clr_any_wait;
2140
2141 ret = ti_sci_do_xfer(info, xfer);
2142 if (ret)
Andrew Davis771a16f2022-07-25 20:25:03 -05002143 return ret;
Andreas Dannenbergca08cb32019-06-07 19:24:40 +05302144
2145 return ret;
2146}
2147
2148/**
2149 * ti_sci_cmd_proc_shutdown_no_wait() - Command to shutdown a core without
2150 * requesting or waiting for a response. Note that this API call
2151 * should be followed by placing the respective processor into
2152 * either WFE or WFI mode.
2153 * @handle: Pointer to TI SCI handle
2154 * @proc_id: Processor ID this request is for
2155 *
2156 * Return: 0 if all went well, else returns appropriate error value.
2157 */
2158static int ti_sci_cmd_proc_shutdown_no_wait(const struct ti_sci_handle *handle,
2159 u8 proc_id)
2160{
2161 int ret;
Sean Anderson405dc242020-09-15 10:44:38 -04002162 struct ti_sci_info *info;
2163
2164 if (IS_ERR(handle))
2165 return PTR_ERR(handle);
2166 if (!handle)
2167 return -EINVAL;
2168
2169 info = handle_to_ti_sci_info(handle);
Andreas Dannenbergca08cb32019-06-07 19:24:40 +05302170
2171 /*
2172 * Send the core boot status wait message waiting for either WFE or
2173 * WFI without requesting or waiting for a TISCI response with the
2174 * maximum wait time to give us the best chance to get to the WFE/WFI
2175 * command that should follow the invocation of this API before the
2176 * DMSC-internal processing of this command times out. Note that
2177 * waiting for the R5 WFE/WFI flags will also work on an ARMV8 type
2178 * core as the related flag bit positions are the same.
2179 */
2180 ret = ti_sci_proc_wait_boot_status_no_wait(handle, proc_id,
2181 U8_MAX, 100, U8_MAX, U8_MAX,
2182 0, PROC_BOOT_STATUS_FLAG_R5_WFE | PROC_BOOT_STATUS_FLAG_R5_WFI,
2183 0, 0);
2184 if (ret) {
2185 dev_err(info->dev, "Sending core %u wait message fail %d\n",
2186 proc_id, ret);
2187 return ret;
2188 }
2189
2190 /*
2191 * Release a processor managed by TISCI without requesting or waiting
2192 * for a response.
2193 */
2194 ret = ti_sci_set_device_state_no_wait(handle, proc_id, 0,
2195 MSG_DEVICE_SW_STATE_AUTO_OFF);
2196 if (ret)
2197 dev_err(info->dev, "Sending core %u shutdown message fail %d\n",
2198 proc_id, ret);
2199
2200 return ret;
2201}
2202
2203/**
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302204 * ti_sci_cmd_ring_config() - configure RA ring
2205 * @handle: pointer to TI SCI handle
2206 * @valid_params: Bitfield defining validity of ring configuration parameters.
2207 * @nav_id: Device ID of Navigator Subsystem from which the ring is allocated
2208 * @index: Ring index.
2209 * @addr_lo: The ring base address lo 32 bits
2210 * @addr_hi: The ring base address hi 32 bits
2211 * @count: Number of ring elements.
2212 * @mode: The mode of the ring
2213 * @size: The ring element size.
2214 * @order_id: Specifies the ring's bus order ID.
2215 *
2216 * Return: 0 if all went well, else returns appropriate error value.
2217 *
2218 * See @ti_sci_msg_rm_ring_cfg_req for more info.
2219 */
2220static int ti_sci_cmd_ring_config(const struct ti_sci_handle *handle,
2221 u32 valid_params, u16 nav_id, u16 index,
2222 u32 addr_lo, u32 addr_hi, u32 count,
2223 u8 mode, u8 size, u8 order_id)
2224{
2225 struct ti_sci_msg_rm_ring_cfg_resp *resp;
2226 struct ti_sci_msg_rm_ring_cfg_req req;
2227 struct ti_sci_xfer *xfer;
2228 struct ti_sci_info *info;
2229 int ret = 0;
2230
2231 if (IS_ERR(handle))
2232 return PTR_ERR(handle);
2233 if (!handle)
2234 return -EINVAL;
2235
2236 info = handle_to_ti_sci_info(handle);
2237
2238 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_RING_CFG,
2239 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2240 (u32 *)&req, sizeof(req), sizeof(*resp));
2241 if (IS_ERR(xfer)) {
2242 ret = PTR_ERR(xfer);
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302243 return ret;
2244 }
2245 req.valid_params = valid_params;
2246 req.nav_id = nav_id;
2247 req.index = index;
2248 req.addr_lo = addr_lo;
2249 req.addr_hi = addr_hi;
2250 req.count = count;
2251 req.mode = mode;
2252 req.size = size;
2253 req.order_id = order_id;
2254
2255 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05002256 if (ret)
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302257 goto fail;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302258
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302259fail:
2260 dev_dbg(info->dev, "RM_RA:config ring %u ret:%d\n", index, ret);
2261 return ret;
2262}
2263
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302264static int ti_sci_cmd_rm_psil_pair(const struct ti_sci_handle *handle,
2265 u32 nav_id, u32 src_thread, u32 dst_thread)
2266{
2267 struct ti_sci_msg_hdr *resp;
2268 struct ti_sci_msg_psil_pair req;
2269 struct ti_sci_xfer *xfer;
2270 struct ti_sci_info *info;
2271 int ret = 0;
2272
2273 if (IS_ERR(handle))
2274 return PTR_ERR(handle);
2275 if (!handle)
2276 return -EINVAL;
2277
2278 info = handle_to_ti_sci_info(handle);
2279
2280 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_PSIL_PAIR,
2281 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2282 (u32 *)&req, sizeof(req), sizeof(*resp));
2283 if (IS_ERR(xfer)) {
2284 ret = PTR_ERR(xfer);
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302285 return ret;
2286 }
2287 req.nav_id = nav_id;
2288 req.src_thread = src_thread;
2289 req.dst_thread = dst_thread;
2290
2291 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05002292 if (ret)
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302293 goto fail;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302294
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302295fail:
2296 dev_dbg(info->dev, "RM_PSIL: nav: %u link pair %u->%u ret:%u\n",
2297 nav_id, src_thread, dst_thread, ret);
2298 return ret;
2299}
2300
2301static int ti_sci_cmd_rm_psil_unpair(const struct ti_sci_handle *handle,
2302 u32 nav_id, u32 src_thread, u32 dst_thread)
2303{
2304 struct ti_sci_msg_hdr *resp;
2305 struct ti_sci_msg_psil_unpair req;
2306 struct ti_sci_xfer *xfer;
2307 struct ti_sci_info *info;
2308 int ret = 0;
2309
2310 if (IS_ERR(handle))
2311 return PTR_ERR(handle);
2312 if (!handle)
2313 return -EINVAL;
2314
2315 info = handle_to_ti_sci_info(handle);
2316
2317 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_PSIL_UNPAIR,
2318 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2319 (u32 *)&req, sizeof(req), sizeof(*resp));
2320 if (IS_ERR(xfer)) {
2321 ret = PTR_ERR(xfer);
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302322 return ret;
2323 }
2324 req.nav_id = nav_id;
2325 req.src_thread = src_thread;
2326 req.dst_thread = dst_thread;
2327
2328 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05002329 if (ret)
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302330 goto fail;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302331
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302332fail:
2333 dev_dbg(info->dev, "RM_PSIL: link unpair %u->%u ret:%u\n",
2334 src_thread, dst_thread, ret);
2335 return ret;
2336}
2337
2338static int ti_sci_cmd_rm_udmap_tx_ch_cfg(
2339 const struct ti_sci_handle *handle,
2340 const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params)
2341{
2342 struct ti_sci_msg_rm_udmap_tx_ch_cfg_resp *resp;
2343 struct ti_sci_msg_rm_udmap_tx_ch_cfg_req req;
2344 struct ti_sci_xfer *xfer;
2345 struct ti_sci_info *info;
2346 int ret = 0;
2347
2348 if (IS_ERR(handle))
2349 return PTR_ERR(handle);
2350 if (!handle)
2351 return -EINVAL;
2352
2353 info = handle_to_ti_sci_info(handle);
2354
2355 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_TX_CH_CFG,
2356 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2357 (u32 *)&req, sizeof(req), sizeof(*resp));
2358 if (IS_ERR(xfer)) {
2359 ret = PTR_ERR(xfer);
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302360 return ret;
2361 }
2362 req.valid_params = params->valid_params;
2363 req.nav_id = params->nav_id;
2364 req.index = params->index;
2365 req.tx_pause_on_err = params->tx_pause_on_err;
2366 req.tx_filt_einfo = params->tx_filt_einfo;
2367 req.tx_filt_pswords = params->tx_filt_pswords;
2368 req.tx_atype = params->tx_atype;
2369 req.tx_chan_type = params->tx_chan_type;
2370 req.tx_supr_tdpkt = params->tx_supr_tdpkt;
2371 req.tx_fetch_size = params->tx_fetch_size;
2372 req.tx_credit_count = params->tx_credit_count;
2373 req.txcq_qnum = params->txcq_qnum;
2374 req.tx_priority = params->tx_priority;
2375 req.tx_qos = params->tx_qos;
2376 req.tx_orderid = params->tx_orderid;
2377 req.fdepth = params->fdepth;
2378 req.tx_sched_priority = params->tx_sched_priority;
Vignesh Raghavendraa8a2b8a2021-05-10 20:06:02 +05302379 req.tx_burst_size = params->tx_burst_size;
2380 req.tx_tdtype = params->tx_tdtype;
2381 req.extended_ch_type = params->extended_ch_type;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302382
2383 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05002384 if (ret)
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302385 goto fail;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302386
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302387fail:
2388 dev_dbg(info->dev, "TX_CH_CFG: chn %u ret:%u\n", params->index, ret);
2389 return ret;
2390}
2391
2392static int ti_sci_cmd_rm_udmap_rx_ch_cfg(
2393 const struct ti_sci_handle *handle,
2394 const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params)
2395{
2396 struct ti_sci_msg_rm_udmap_rx_ch_cfg_resp *resp;
2397 struct ti_sci_msg_rm_udmap_rx_ch_cfg_req req;
2398 struct ti_sci_xfer *xfer;
2399 struct ti_sci_info *info;
2400 int ret = 0;
2401
2402 if (IS_ERR(handle))
2403 return PTR_ERR(handle);
2404 if (!handle)
2405 return -EINVAL;
2406
2407 info = handle_to_ti_sci_info(handle);
2408
2409 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_RX_CH_CFG,
2410 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2411 (u32 *)&req, sizeof(req), sizeof(*resp));
2412 if (IS_ERR(xfer)) {
2413 ret = PTR_ERR(xfer);
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302414 return ret;
2415 }
2416
2417 req.valid_params = params->valid_params;
2418 req.nav_id = params->nav_id;
2419 req.index = params->index;
2420 req.rx_fetch_size = params->rx_fetch_size;
2421 req.rxcq_qnum = params->rxcq_qnum;
2422 req.rx_priority = params->rx_priority;
2423 req.rx_qos = params->rx_qos;
2424 req.rx_orderid = params->rx_orderid;
2425 req.rx_sched_priority = params->rx_sched_priority;
2426 req.flowid_start = params->flowid_start;
2427 req.flowid_cnt = params->flowid_cnt;
2428 req.rx_pause_on_err = params->rx_pause_on_err;
2429 req.rx_atype = params->rx_atype;
2430 req.rx_chan_type = params->rx_chan_type;
2431 req.rx_ignore_short = params->rx_ignore_short;
2432 req.rx_ignore_long = params->rx_ignore_long;
2433
2434 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05002435 if (ret)
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302436 goto fail;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302437
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302438fail:
2439 dev_dbg(info->dev, "RX_CH_CFG: chn %u ret:%d\n", params->index, ret);
2440 return ret;
2441}
2442
2443static int ti_sci_cmd_rm_udmap_rx_flow_cfg(
2444 const struct ti_sci_handle *handle,
2445 const struct ti_sci_msg_rm_udmap_flow_cfg *params)
2446{
2447 struct ti_sci_msg_rm_udmap_flow_cfg_resp *resp;
2448 struct ti_sci_msg_rm_udmap_flow_cfg_req req;
2449 struct ti_sci_xfer *xfer;
2450 struct ti_sci_info *info;
2451 int ret = 0;
2452
2453 if (IS_ERR(handle))
2454 return PTR_ERR(handle);
2455 if (!handle)
2456 return -EINVAL;
2457
2458 info = handle_to_ti_sci_info(handle);
2459
2460 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_FLOW_CFG,
2461 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2462 (u32 *)&req, sizeof(req), sizeof(*resp));
2463 if (IS_ERR(xfer)) {
2464 ret = PTR_ERR(xfer);
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302465 return ret;
2466 }
2467
2468 req.valid_params = params->valid_params;
2469 req.nav_id = params->nav_id;
2470 req.flow_index = params->flow_index;
2471 req.rx_einfo_present = params->rx_einfo_present;
2472 req.rx_psinfo_present = params->rx_psinfo_present;
2473 req.rx_error_handling = params->rx_error_handling;
2474 req.rx_desc_type = params->rx_desc_type;
2475 req.rx_sop_offset = params->rx_sop_offset;
2476 req.rx_dest_qnum = params->rx_dest_qnum;
2477 req.rx_src_tag_hi = params->rx_src_tag_hi;
2478 req.rx_src_tag_lo = params->rx_src_tag_lo;
2479 req.rx_dest_tag_hi = params->rx_dest_tag_hi;
2480 req.rx_dest_tag_lo = params->rx_dest_tag_lo;
2481 req.rx_src_tag_hi_sel = params->rx_src_tag_hi_sel;
2482 req.rx_src_tag_lo_sel = params->rx_src_tag_lo_sel;
2483 req.rx_dest_tag_hi_sel = params->rx_dest_tag_hi_sel;
2484 req.rx_dest_tag_lo_sel = params->rx_dest_tag_lo_sel;
2485 req.rx_fdq0_sz0_qnum = params->rx_fdq0_sz0_qnum;
2486 req.rx_fdq1_qnum = params->rx_fdq1_qnum;
2487 req.rx_fdq2_qnum = params->rx_fdq2_qnum;
2488 req.rx_fdq3_qnum = params->rx_fdq3_qnum;
2489 req.rx_ps_location = params->rx_ps_location;
2490
2491 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05002492 if (ret)
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302493 goto fail;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302494
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302495fail:
2496 dev_dbg(info->dev, "RX_FL_CFG: %u ret:%d\n", params->flow_index, ret);
Lokesh Vutlab8856af2018-08-27 15:57:37 +05302497 return ret;
2498}
2499
Kishon Vijay Abraham Ie9876c82024-08-26 15:55:06 +05302500static int ti_sci_cmd_rm_udmap_rx_flow_cfg_noop(const struct ti_sci_handle *handle,
2501 const struct ti_sci_msg_rm_udmap_flow_cfg *params)
2502{
2503 return 0;
2504}
2505
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002506/**
2507 * ti_sci_cmd_set_fwl_region() - Request for configuring a firewall region
2508 * @handle: pointer to TI SCI handle
2509 * @region: region configuration parameters
2510 *
2511 * Return: 0 if all went well, else returns appropriate error value.
2512 */
2513static int ti_sci_cmd_set_fwl_region(const struct ti_sci_handle *handle,
2514 const struct ti_sci_msg_fwl_region *region)
2515{
2516 struct ti_sci_msg_fwl_set_firewall_region_req req;
2517 struct ti_sci_msg_hdr *resp;
2518 struct ti_sci_info *info;
2519 struct ti_sci_xfer *xfer;
2520 int ret = 0;
2521
2522 if (IS_ERR(handle))
2523 return PTR_ERR(handle);
2524 if (!handle)
2525 return -EINVAL;
2526
2527 info = handle_to_ti_sci_info(handle);
2528
2529 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_FWL_SET,
2530 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2531 (u32 *)&req, sizeof(req), sizeof(*resp));
2532 if (IS_ERR(xfer)) {
2533 ret = PTR_ERR(xfer);
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002534 return ret;
2535 }
2536
2537 req.fwl_id = region->fwl_id;
2538 req.region = region->region;
2539 req.n_permission_regs = region->n_permission_regs;
2540 req.control = region->control;
2541 req.permissions[0] = region->permissions[0];
2542 req.permissions[1] = region->permissions[1];
2543 req.permissions[2] = region->permissions[2];
2544 req.start_address = region->start_address;
2545 req.end_address = region->end_address;
2546
2547 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05002548 if (ret)
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002549 return ret;
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002550
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002551 return 0;
2552}
2553
2554/**
2555 * ti_sci_cmd_get_fwl_region() - Request for getting a firewall region
2556 * @handle: pointer to TI SCI handle
2557 * @region: region configuration parameters
2558 *
2559 * Return: 0 if all went well, else returns appropriate error value.
2560 */
2561static int ti_sci_cmd_get_fwl_region(const struct ti_sci_handle *handle,
2562 struct ti_sci_msg_fwl_region *region)
2563{
2564 struct ti_sci_msg_fwl_get_firewall_region_req req;
2565 struct ti_sci_msg_fwl_get_firewall_region_resp *resp;
2566 struct ti_sci_info *info;
2567 struct ti_sci_xfer *xfer;
2568 int ret = 0;
2569
2570 if (IS_ERR(handle))
2571 return PTR_ERR(handle);
2572 if (!handle)
2573 return -EINVAL;
2574
2575 info = handle_to_ti_sci_info(handle);
2576
2577 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_FWL_GET,
2578 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2579 (u32 *)&req, sizeof(req), sizeof(*resp));
2580 if (IS_ERR(xfer)) {
2581 ret = PTR_ERR(xfer);
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002582 return ret;
2583 }
2584
2585 req.fwl_id = region->fwl_id;
2586 req.region = region->region;
2587 req.n_permission_regs = region->n_permission_regs;
2588
2589 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05002590 if (ret)
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002591 return ret;
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002592
2593 resp = (struct ti_sci_msg_fwl_get_firewall_region_resp *)xfer->tx_message.buf;
2594
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002595 region->fwl_id = resp->fwl_id;
2596 region->region = resp->region;
2597 region->n_permission_regs = resp->n_permission_regs;
2598 region->control = resp->control;
2599 region->permissions[0] = resp->permissions[0];
2600 region->permissions[1] = resp->permissions[1];
2601 region->permissions[2] = resp->permissions[2];
2602 region->start_address = resp->start_address;
2603 region->end_address = resp->end_address;
2604
2605 return 0;
2606}
2607
2608/**
2609 * ti_sci_cmd_change_fwl_owner() - Request for changing a firewall owner
2610 * @handle: pointer to TI SCI handle
2611 * @region: region configuration parameters
2612 *
2613 * Return: 0 if all went well, else returns appropriate error value.
2614 */
2615static int ti_sci_cmd_change_fwl_owner(const struct ti_sci_handle *handle,
2616 struct ti_sci_msg_fwl_owner *owner)
2617{
2618 struct ti_sci_msg_fwl_change_owner_info_req req;
2619 struct ti_sci_msg_fwl_change_owner_info_resp *resp;
2620 struct ti_sci_info *info;
2621 struct ti_sci_xfer *xfer;
2622 int ret = 0;
2623
2624 if (IS_ERR(handle))
2625 return PTR_ERR(handle);
2626 if (!handle)
2627 return -EINVAL;
2628
2629 info = handle_to_ti_sci_info(handle);
2630
Andrew F. Davis8928fbd2019-04-29 09:04:11 -04002631 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_FWL_CHANGE_OWNER,
2632 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002633 (u32 *)&req, sizeof(req), sizeof(*resp));
2634 if (IS_ERR(xfer)) {
2635 ret = PTR_ERR(xfer);
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002636 return ret;
2637 }
2638
2639 req.fwl_id = owner->fwl_id;
2640 req.region = owner->region;
2641 req.owner_index = owner->owner_index;
2642
2643 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05002644 if (ret)
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002645 return ret;
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002646
2647 resp = (struct ti_sci_msg_fwl_change_owner_info_resp *)xfer->tx_message.buf;
2648
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002649 owner->fwl_id = resp->fwl_id;
2650 owner->region = resp->region;
2651 owner->owner_index = resp->owner_index;
2652 owner->owner_privid = resp->owner_privid;
2653 owner->owner_permission_bits = resp->owner_permission_bits;
2654
2655 return ret;
2656}
2657
Andreas Dannenberg5299c4c2018-08-27 15:57:33 +05302658/*
2659 * ti_sci_setup_ops() - Setup the operations structures
2660 * @info: pointer to TISCI pointer
2661 */
2662static void ti_sci_setup_ops(struct ti_sci_info *info)
2663{
2664 struct ti_sci_ops *ops = &info->handle.ops;
2665 struct ti_sci_board_ops *bops = &ops->board_ops;
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +05302666 struct ti_sci_dev_ops *dops = &ops->dev_ops;
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05302667 struct ti_sci_clk_ops *cops = &ops->clk_ops;
Andreas Dannenberg5bd08372018-08-27 15:57:36 +05302668 struct ti_sci_core_ops *core_ops = &ops->core_ops;
Moteen Shah3a66db62025-06-09 13:44:31 +05302669 struct ti_sci_firmware_ops *fw_ops = &ops->fw_ops;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302670 struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops;
Lokesh Vutlab8856af2018-08-27 15:57:37 +05302671 struct ti_sci_proc_ops *pops = &ops->proc_ops;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302672 struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops;
2673 struct ti_sci_rm_psil_ops *psilops = &ops->rm_psil_ops;
2674 struct ti_sci_rm_udmap_ops *udmap_ops = &ops->rm_udmap_ops;
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002675 struct ti_sci_fwl_ops *fwl_ops = &ops->fwl_ops;
Andreas Dannenberg5299c4c2018-08-27 15:57:33 +05302676
2677 bops->board_config = ti_sci_cmd_set_board_config;
2678 bops->board_config_rm = ti_sci_cmd_set_board_config_rm;
2679 bops->board_config_security = ti_sci_cmd_set_board_config_security;
2680 bops->board_config_pm = ti_sci_cmd_set_board_config_pm;
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +05302681
2682 dops->get_device = ti_sci_cmd_get_device;
Lokesh Vutlaf5613002019-06-07 19:24:39 +05302683 dops->get_device_exclusive = ti_sci_cmd_get_device_exclusive;
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +05302684 dops->idle_device = ti_sci_cmd_idle_device;
Lokesh Vutlaf5613002019-06-07 19:24:39 +05302685 dops->idle_device_exclusive = ti_sci_cmd_idle_device_exclusive;
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +05302686 dops->put_device = ti_sci_cmd_put_device;
2687 dops->is_valid = ti_sci_cmd_dev_is_valid;
2688 dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt;
2689 dops->is_idle = ti_sci_cmd_dev_is_idle;
2690 dops->is_stop = ti_sci_cmd_dev_is_stop;
2691 dops->is_on = ti_sci_cmd_dev_is_on;
2692 dops->is_transitioning = ti_sci_cmd_dev_is_trans;
2693 dops->set_device_resets = ti_sci_cmd_set_device_resets;
2694 dops->get_device_resets = ti_sci_cmd_get_device_resets;
Lokesh Vutla0d0412a2019-06-07 19:24:41 +05302695 dops->release_exclusive_devices = ti_sci_cmd_release_exclusive_devices;
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05302696
2697 cops->get_clock = ti_sci_cmd_get_clock;
2698 cops->idle_clock = ti_sci_cmd_idle_clock;
2699 cops->put_clock = ti_sci_cmd_put_clock;
2700 cops->is_auto = ti_sci_cmd_clk_is_auto;
2701 cops->is_on = ti_sci_cmd_clk_is_on;
2702 cops->is_off = ti_sci_cmd_clk_is_off;
2703
2704 cops->set_parent = ti_sci_cmd_clk_set_parent;
2705 cops->get_parent = ti_sci_cmd_clk_get_parent;
2706 cops->get_num_parents = ti_sci_cmd_clk_get_num_parents;
2707
2708 cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
2709 cops->set_freq = ti_sci_cmd_clk_set_freq;
2710 cops->get_freq = ti_sci_cmd_clk_get_freq;
Andreas Dannenberg5bd08372018-08-27 15:57:36 +05302711
2712 core_ops->reboot_device = ti_sci_cmd_core_reboot;
Lokesh Vutla032dce82019-03-08 11:47:32 +05302713 core_ops->query_msmc = ti_sci_cmd_query_msmc;
Lokesh Vutlab8856af2018-08-27 15:57:37 +05302714
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302715 rm_core_ops->get_range = ti_sci_cmd_get_resource_range;
2716 rm_core_ops->get_range_from_shost =
2717 ti_sci_cmd_get_resource_range_from_shost;
2718
Lokesh Vutlab8856af2018-08-27 15:57:37 +05302719 pops->proc_request = ti_sci_cmd_proc_request;
2720 pops->proc_release = ti_sci_cmd_proc_release;
2721 pops->proc_handover = ti_sci_cmd_proc_handover;
2722 pops->set_proc_boot_cfg = ti_sci_cmd_set_proc_boot_cfg;
2723 pops->set_proc_boot_ctrl = ti_sci_cmd_set_proc_boot_ctrl;
2724 pops->proc_auth_boot_image = ti_sci_cmd_proc_auth_boot_image;
2725 pops->get_proc_boot_status = ti_sci_cmd_get_proc_boot_status;
Andreas Dannenbergca08cb32019-06-07 19:24:40 +05302726 pops->proc_shutdown_no_wait = ti_sci_cmd_proc_shutdown_no_wait;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302727
2728 rops->config = ti_sci_cmd_ring_config;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302729
2730 psilops->pair = ti_sci_cmd_rm_psil_pair;
2731 psilops->unpair = ti_sci_cmd_rm_psil_unpair;
2732
2733 udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg;
2734 udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg;
2735 udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg;
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002736
2737 fwl_ops->set_fwl_region = ti_sci_cmd_set_fwl_region;
2738 fwl_ops->get_fwl_region = ti_sci_cmd_get_fwl_region;
2739 fwl_ops->change_fwl_owner = ti_sci_cmd_change_fwl_owner;
Moteen Shah3a66db62025-06-09 13:44:31 +05302740
2741 fw_ops->query_dm_cap = ti_sci_cmd_query_dm_cap;
Andreas Dannenberg5299c4c2018-08-27 15:57:33 +05302742}
2743
2744/**
Lokesh Vutla5af02db2018-08-27 15:57:32 +05302745 * ti_sci_get_handle_from_sysfw() - Get the TI SCI handle of the SYSFW
2746 * @dev: Pointer to the SYSFW device
2747 *
2748 * Return: pointer to handle if successful, else EINVAL if invalid conditions
2749 * are encountered.
2750 */
2751const
2752struct ti_sci_handle *ti_sci_get_handle_from_sysfw(struct udevice *sci_dev)
2753{
Neha Malcom Francisc7dedd02023-09-27 18:39:53 +05302754 int ret;
2755
Lokesh Vutla5af02db2018-08-27 15:57:32 +05302756 if (!sci_dev)
2757 return ERR_PTR(-EINVAL);
2758
2759 struct ti_sci_info *info = dev_get_priv(sci_dev);
2760
2761 if (!info)
2762 return ERR_PTR(-EINVAL);
2763
2764 struct ti_sci_handle *handle = &info->handle;
2765
2766 if (!handle)
2767 return ERR_PTR(-EINVAL);
2768
Neha Malcom Francisc7dedd02023-09-27 18:39:53 +05302769 ret = ti_sci_cmd_get_revision(handle);
2770
2771 if (ret)
2772 return ERR_PTR(-EINVAL);
2773
Lokesh Vutla5af02db2018-08-27 15:57:32 +05302774 return handle;
2775}
2776
2777/**
2778 * ti_sci_get_handle() - Get the TI SCI handle for a device
2779 * @dev: Pointer to device for which we want SCI handle
2780 *
2781 * Return: pointer to handle if successful, else EINVAL if invalid conditions
2782 * are encountered.
2783 */
2784const struct ti_sci_handle *ti_sci_get_handle(struct udevice *dev)
2785{
2786 if (!dev)
2787 return ERR_PTR(-EINVAL);
2788
2789 struct udevice *sci_dev = dev_get_parent(dev);
2790
2791 return ti_sci_get_handle_from_sysfw(sci_dev);
2792}
2793
2794/**
2795 * ti_sci_get_by_phandle() - Get the TI SCI handle using DT phandle
2796 * @dev: device node
2797 * @propname: property name containing phandle on TISCI node
2798 *
2799 * Return: pointer to handle if successful, else appropriate error value.
2800 */
2801const struct ti_sci_handle *ti_sci_get_by_phandle(struct udevice *dev,
2802 const char *property)
2803{
2804 struct ti_sci_info *entry, *info = NULL;
2805 u32 phandle, err;
2806 ofnode node;
2807
2808 err = ofnode_read_u32(dev_ofnode(dev), property, &phandle);
2809 if (err)
2810 return ERR_PTR(err);
2811
2812 node = ofnode_get_by_phandle(phandle);
2813 if (!ofnode_valid(node))
2814 return ERR_PTR(-EINVAL);
2815
2816 list_for_each_entry(entry, &ti_sci_list, list)
2817 if (ofnode_equal(dev_ofnode(entry->dev), node)) {
2818 info = entry;
2819 break;
2820 }
2821
2822 if (!info)
2823 return ERR_PTR(-ENODEV);
2824
2825 return &info->handle;
2826}
2827
2828/**
2829 * ti_sci_of_to_info() - generate private data from device tree
2830 * @dev: corresponding system controller interface device
2831 * @info: pointer to driver specific private data
2832 *
2833 * Return: 0 if all goes good, else appropriate error message.
2834 */
2835static int ti_sci_of_to_info(struct udevice *dev, struct ti_sci_info *info)
2836{
2837 int ret;
2838
2839 ret = mbox_get_by_name(dev, "tx", &info->chan_tx);
2840 if (ret) {
2841 dev_err(dev, "%s: Acquiring Tx channel failed. ret = %d\n",
2842 __func__, ret);
2843 return ret;
2844 }
2845
2846 ret = mbox_get_by_name(dev, "rx", &info->chan_rx);
2847 if (ret) {
2848 dev_err(dev, "%s: Acquiring Rx channel failed. ret = %d\n",
2849 __func__, ret);
2850 return ret;
2851 }
2852
2853 /* Notify channel is optional. Enable only if populated */
2854 ret = mbox_get_by_name(dev, "notify", &info->chan_notify);
2855 if (ret) {
2856 dev_dbg(dev, "%s: Acquiring notify channel failed. ret = %d\n",
2857 __func__, ret);
2858 }
2859
2860 info->host_id = dev_read_u32_default(dev, "ti,host-id",
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302861 info->desc->default_host_id);
Lokesh Vutla5af02db2018-08-27 15:57:32 +05302862
2863 info->is_secure = dev_read_bool(dev, "ti,secure-host");
2864
2865 return 0;
2866}
2867
2868/**
2869 * ti_sci_probe() - Basic probe
2870 * @dev: corresponding system controller interface device
2871 *
2872 * Return: 0 if all goes good, else appropriate error message.
2873 */
2874static int ti_sci_probe(struct udevice *dev)
2875{
2876 struct ti_sci_info *info;
2877 int ret;
2878
2879 debug("%s(dev=%p)\n", __func__, dev);
2880
2881 info = dev_get_priv(dev);
2882 info->desc = (void *)dev_get_driver_data(dev);
2883
2884 ret = ti_sci_of_to_info(dev, info);
2885 if (ret) {
2886 dev_err(dev, "%s: Probe failed with error %d\n", __func__, ret);
2887 return ret;
2888 }
2889
2890 info->dev = dev;
2891 info->seq = 0xA;
2892
Udit Kumar24b11a42025-04-29 22:44:40 +05302893 INIT_LIST_HEAD(&info->dev_list);
2894
Lokesh Vutla5af02db2018-08-27 15:57:32 +05302895 list_add_tail(&info->list, &ti_sci_list);
Andreas Dannenberg5299c4c2018-08-27 15:57:33 +05302896 ti_sci_setup_ops(info);
Lokesh Vutla5af02db2018-08-27 15:57:32 +05302897
Andrew Davis1ed20d62024-04-02 11:09:07 -05002898 if (IS_ENABLED(CONFIG_SYSRESET_TI_SCI)) {
2899 ret = device_bind_driver(dev, "ti-sci-sysreset", "sysreset", NULL);
2900 if (ret)
2901 dev_warn(dev, "cannot bind SYSRESET (ret = %d)\n", ret);
2902 }
2903
Neha Malcom Francisc7dedd02023-09-27 18:39:53 +05302904 return 0;
Lokesh Vutla5af02db2018-08-27 15:57:32 +05302905}
2906
Vignesh Raghavendraaa0e3fc2021-06-07 19:47:49 +05302907/**
2908 * ti_sci_dm_probe() - Basic probe for DM to TIFS SCI
2909 * @dev: corresponding system controller interface device
2910 *
2911 * Return: 0 if all goes good, else appropriate error message.
2912 */
2913static __maybe_unused int ti_sci_dm_probe(struct udevice *dev)
2914{
2915 struct ti_sci_rm_core_ops *rm_core_ops;
2916 struct ti_sci_rm_udmap_ops *udmap_ops;
2917 struct ti_sci_rm_ringacc_ops *rops;
2918 struct ti_sci_rm_psil_ops *psilops;
2919 struct ti_sci_ops *ops;
2920 struct ti_sci_info *info;
2921 int ret;
2922
2923 debug("%s(dev=%p)\n", __func__, dev);
2924
2925 info = dev_get_priv(dev);
2926 info->desc = (void *)dev_get_driver_data(dev);
2927
2928 ret = ti_sci_of_to_info(dev, info);
2929 if (ret) {
2930 dev_err(dev, "%s: Probe failed with error %d\n", __func__, ret);
2931 return ret;
2932 }
2933
2934 info->dev = dev;
2935 info->seq = 0xA;
2936
Udit Kumar24b11a42025-04-29 22:44:40 +05302937 INIT_LIST_HEAD(&info->dev_list);
2938
Vignesh Raghavendraaa0e3fc2021-06-07 19:47:49 +05302939 list_add_tail(&info->list, &ti_sci_list);
2940
2941 ops = &info->handle.ops;
2942
2943 rm_core_ops = &ops->rm_core_ops;
2944 rm_core_ops->get_range = ti_sci_cmd_get_resource_range_static;
2945
2946 rops = &ops->rm_ring_ops;
2947 rops->config = ti_sci_cmd_ring_config;
2948
2949 psilops = &ops->rm_psil_ops;
2950 psilops->pair = ti_sci_cmd_rm_psil_pair;
2951 psilops->unpair = ti_sci_cmd_rm_psil_unpair;
2952
2953 udmap_ops = &ops->rm_udmap_ops;
2954 udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg;
2955 udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg;
Kishon Vijay Abraham Ie9876c82024-08-26 15:55:06 +05302956 udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg_noop;
Vignesh Raghavendraaa0e3fc2021-06-07 19:47:49 +05302957
2958 return ret;
2959}
2960
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302961/*
2962 * ti_sci_get_free_resource() - Get a free resource from TISCI resource.
2963 * @res: Pointer to the TISCI resource
2964 *
2965 * Return: resource num if all went ok else TI_SCI_RESOURCE_NULL.
2966 */
2967u16 ti_sci_get_free_resource(struct ti_sci_resource *res)
2968{
2969 u16 set, free_bit;
2970
2971 for (set = 0; set < res->sets; set++) {
2972 free_bit = find_first_zero_bit(res->desc[set].res_map,
2973 res->desc[set].num);
2974 if (free_bit != res->desc[set].num) {
2975 set_bit(free_bit, res->desc[set].res_map);
2976 return res->desc[set].start + free_bit;
2977 }
2978 }
2979
2980 return TI_SCI_RESOURCE_NULL;
2981}
2982
2983/**
2984 * ti_sci_release_resource() - Release a resource from TISCI resource.
2985 * @res: Pointer to the TISCI resource
2986 */
2987void ti_sci_release_resource(struct ti_sci_resource *res, u16 id)
2988{
2989 u16 set;
2990
2991 for (set = 0; set < res->sets; set++) {
2992 if (res->desc[set].start <= id &&
2993 (res->desc[set].num + res->desc[set].start) > id)
2994 clear_bit(id - res->desc[set].start,
2995 res->desc[set].res_map);
2996 }
2997}
2998
2999/**
3000 * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device
3001 * @handle: TISCI handle
3002 * @dev: Device pointer to which the resource is assigned
3003 * @of_prop: property name by which the resource are represented
3004 *
3005 * Note: This function expects of_prop to be in the form of tuples
3006 * <type, subtype>. Allocates and initializes ti_sci_resource structure
3007 * for each of_prop. Client driver can directly call
3008 * ti_sci_(get_free, release)_resource apis for handling the resource.
3009 *
3010 * Return: Pointer to ti_sci_resource if all went well else appropriate
3011 * error pointer.
3012 */
3013struct ti_sci_resource *
3014devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
3015 struct udevice *dev, u32 dev_id, char *of_prop)
3016{
3017 u32 resource_subtype;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05303018 struct ti_sci_resource *res;
Vignesh Raghavendrae1164dd2019-08-05 12:26:44 -05003019 bool valid_set = false;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05303020 int sets, i, ret;
3021 u32 *temp;
3022
3023 res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
3024 if (!res)
3025 return ERR_PTR(-ENOMEM);
3026
3027 sets = dev_read_size(dev, of_prop);
3028 if (sets < 0) {
3029 dev_err(dev, "%s resource type ids not available\n", of_prop);
3030 return ERR_PTR(sets);
3031 }
3032 temp = malloc(sets);
3033 sets /= sizeof(u32);
3034 res->sets = sets;
3035
3036 res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc),
3037 GFP_KERNEL);
3038 if (!res->desc)
3039 return ERR_PTR(-ENOMEM);
3040
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05303041 ret = dev_read_u32_array(dev, of_prop, temp, res->sets);
3042 if (ret)
3043 return ERR_PTR(-EINVAL);
3044
3045 for (i = 0; i < res->sets; i++) {
3046 resource_subtype = temp[i];
3047 ret = handle->ops.rm_core_ops.get_range(handle, dev_id,
3048 resource_subtype,
3049 &res->desc[i].start,
3050 &res->desc[i].num);
3051 if (ret) {
Vignesh Raghavendrae1164dd2019-08-05 12:26:44 -05003052 dev_dbg(dev, "type %d subtype %d not allocated for host %d\n",
Lokesh Vutla0acf1dc2020-08-17 11:00:48 +05303053 dev_id, resource_subtype,
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05303054 handle_to_ti_sci_info(handle)->host_id);
Vignesh Raghavendrae1164dd2019-08-05 12:26:44 -05003055 res->desc[i].start = 0;
3056 res->desc[i].num = 0;
3057 continue;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05303058 }
3059
Vignesh Raghavendrae1164dd2019-08-05 12:26:44 -05003060 valid_set = true;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05303061 dev_dbg(dev, "res type = %d, subtype = %d, start = %d, num = %d\n",
Lokesh Vutla0acf1dc2020-08-17 11:00:48 +05303062 dev_id, resource_subtype, res->desc[i].start,
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05303063 res->desc[i].num);
3064
3065 res->desc[i].res_map =
3066 devm_kzalloc(dev, BITS_TO_LONGS(res->desc[i].num) *
3067 sizeof(*res->desc[i].res_map), GFP_KERNEL);
3068 if (!res->desc[i].res_map)
3069 return ERR_PTR(-ENOMEM);
3070 }
3071
Vignesh Raghavendrae1164dd2019-08-05 12:26:44 -05003072 if (valid_set)
3073 return res;
3074
3075 return ERR_PTR(-EINVAL);
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05303076}
3077
3078/* Description for K2G */
3079static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
3080 .default_host_id = 2,
3081 /* Conservative duration */
3082 .max_rx_timeout_ms = 10000,
3083 /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
3084 .max_msgs = 20,
3085 .max_msg_size = 64,
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05303086};
3087
Lokesh Vutla5af02db2018-08-27 15:57:32 +05303088/* Description for AM654 */
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05303089static const struct ti_sci_desc ti_sci_pmmc_am654_desc = {
3090 .default_host_id = 12,
3091 /* Conservative duration */
3092 .max_rx_timeout_ms = 10000,
3093 /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
3094 .max_msgs = 20,
Lokesh Vutla5af02db2018-08-27 15:57:32 +05303095 .max_msg_size = 60,
3096};
3097
Vignesh Raghavendraaa0e3fc2021-06-07 19:47:49 +05303098/* Description for J721e DM to DMSC communication */
3099static const struct ti_sci_desc ti_sci_dm_j721e_desc = {
3100 .default_host_id = 3,
3101 .max_rx_timeout_ms = 10000,
3102 .max_msgs = 20,
3103 .max_msg_size = 60,
3104};
3105
Lokesh Vutla5af02db2018-08-27 15:57:32 +05303106static const struct udevice_id ti_sci_ids[] = {
3107 {
3108 .compatible = "ti,k2g-sci",
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05303109 .data = (ulong)&ti_sci_pmmc_k2g_desc
3110 },
3111 {
3112 .compatible = "ti,am654-sci",
3113 .data = (ulong)&ti_sci_pmmc_am654_desc
Lokesh Vutla5af02db2018-08-27 15:57:32 +05303114 },
3115 { /* Sentinel */ },
3116};
3117
Vignesh Raghavendraaa0e3fc2021-06-07 19:47:49 +05303118static __maybe_unused const struct udevice_id ti_sci_dm_ids[] = {
3119 {
3120 .compatible = "ti,j721e-dm-sci",
3121 .data = (ulong)&ti_sci_dm_j721e_desc
3122 },
3123 { /* Sentinel */ },
3124};
3125
Lokesh Vutla5af02db2018-08-27 15:57:32 +05303126U_BOOT_DRIVER(ti_sci) = {
3127 .name = "ti_sci",
3128 .id = UCLASS_FIRMWARE,
3129 .of_match = ti_sci_ids,
3130 .probe = ti_sci_probe,
Simon Glass8a2b47f2020-12-03 16:55:17 -07003131 .priv_auto = sizeof(struct ti_sci_info),
Manorit Chawdhry27f161c2024-12-17 14:24:37 +05303132 .flags = DM_FLAG_PRE_RELOC,
Lokesh Vutla5af02db2018-08-27 15:57:32 +05303133};
Vignesh Raghavendraaa0e3fc2021-06-07 19:47:49 +05303134
3135#if IS_ENABLED(CONFIG_K3_DM_FW)
3136U_BOOT_DRIVER(ti_sci_dm) = {
3137 .name = "ti_sci_dm",
3138 .id = UCLASS_FIRMWARE,
3139 .of_match = ti_sci_dm_ids,
3140 .probe = ti_sci_dm_probe,
3141 .priv_auto = sizeof(struct ti_sci_info),
3142};
3143#endif