blob: 8ce0f46e70c8dd4ea4e3756c04f291a75854ef5f [file] [log] [blame]
Lokesh Vutla5af02db2018-08-27 15:57:32 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Texas Instruments System Control Interface Protocol Driver
4 * Based on drivers/firmware/ti_sci.c from Linux.
5 *
Nishanth Menoneaa39c62023-11-01 15:56:03 -05006 * Copyright (C) 2018 Texas Instruments Incorporated - https://www.ti.com/
Lokesh Vutla5af02db2018-08-27 15:57:32 +05307 * Lokesh Vutla <lokeshvutla@ti.com>
8 */
9
Lokesh Vutla5af02db2018-08-27 15:57:32 +053010#include <dm.h>
11#include <errno.h>
Simon Glass0f2af882020-05-10 11:40:05 -060012#include <log.h>
Lokesh Vutla5af02db2018-08-27 15:57:32 +053013#include <mailbox.h>
Simon Glass9bc15642020-02-03 07:36:16 -070014#include <malloc.h>
Lokesh Vutla5af02db2018-08-27 15:57:32 +053015#include <dm/device.h>
Simon Glass9bc15642020-02-03 07:36:16 -070016#include <dm/device_compat.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070017#include <dm/devres.h>
Andrew Davis1ed20d62024-04-02 11:09:07 -050018#include <dm/lists.h>
Simon Glass4dcacfc2020-05-10 11:40:13 -060019#include <linux/bitops.h>
Grygorii Strashkod64c5b22019-02-05 17:31:21 +053020#include <linux/compat.h>
Lokesh Vutla5af02db2018-08-27 15:57:32 +053021#include <linux/err.h>
22#include <linux/soc/ti/k3-sec-proxy.h>
23#include <linux/soc/ti/ti_sci_protocol.h>
24
25#include "ti_sci.h"
Vignesh Raghavendra4214a812021-06-07 19:47:48 +053026#include "ti_sci_static_data.h"
Lokesh Vutla5af02db2018-08-27 15:57:32 +053027
28/* List of all TI SCI devices active in system */
29static LIST_HEAD(ti_sci_list);
30
31/**
32 * struct ti_sci_xfer - Structure representing a message flow
33 * @tx_message: Transmit message
34 * @rx_len: Receive message length
35 */
36struct ti_sci_xfer {
37 struct k3_sec_proxy_msg tx_message;
38 u8 rx_len;
39};
40
41/**
Grygorii Strashkod64c5b22019-02-05 17:31:21 +053042 * struct ti_sci_rm_type_map - Structure representing TISCI Resource
43 * management representation of dev_ids.
44 * @dev_id: TISCI device ID
45 * @type: Corresponding id as identified by TISCI RM.
46 *
47 * Note: This is used only as a work around for using RM range apis
48 * for AM654 SoC. For future SoCs dev_id will be used as type
49 * for RM range APIs. In order to maintain ABI backward compatibility
50 * type is not being changed for AM654 SoC.
51 */
52struct ti_sci_rm_type_map {
53 u32 dev_id;
54 u16 type;
55};
56
57/**
Lokesh Vutla5af02db2018-08-27 15:57:32 +053058 * struct ti_sci_desc - Description of SoC integration
Grygorii Strashkod64c5b22019-02-05 17:31:21 +053059 * @default_host_id: Host identifier representing the compute entity
60 * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
61 * @max_msgs: Maximum number of messages that can be pending
62 * simultaneously in the system
63 * @max_msg_size: Maximum size of data per message that can be handled.
Lokesh Vutla5af02db2018-08-27 15:57:32 +053064 */
65struct ti_sci_desc {
Grygorii Strashkod64c5b22019-02-05 17:31:21 +053066 u8 default_host_id;
67 int max_rx_timeout_ms;
68 int max_msgs;
Lokesh Vutla5af02db2018-08-27 15:57:32 +053069 int max_msg_size;
70};
71
72/**
73 * struct ti_sci_info - Structure representing a TI SCI instance
74 * @dev: Device pointer
75 * @desc: SoC description for this instance
76 * @handle: Instance of TI SCI handle to send to clients.
77 * @chan_tx: Transmit mailbox channel
78 * @chan_rx: Receive mailbox channel
79 * @xfer: xfer info
80 * @list: list head
81 * @is_secure: Determines if the communication is through secure threads.
82 * @host_id: Host identifier representing the compute entity
83 * @seq: Seq id used for verification for tx and rx message.
84 */
85struct ti_sci_info {
86 struct udevice *dev;
87 const struct ti_sci_desc *desc;
88 struct ti_sci_handle handle;
89 struct mbox_chan chan_tx;
90 struct mbox_chan chan_rx;
91 struct mbox_chan chan_notify;
92 struct ti_sci_xfer xfer;
93 struct list_head list;
Lokesh Vutla0d0412a2019-06-07 19:24:41 +053094 struct list_head dev_list;
Lokesh Vutla5af02db2018-08-27 15:57:32 +053095 bool is_secure;
96 u8 host_id;
97 u8 seq;
98};
99
Lokesh Vutla0d0412a2019-06-07 19:24:41 +0530100struct ti_sci_exclusive_dev {
101 u32 id;
102 u32 count;
103 struct list_head list;
104};
105
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530106#define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle)
107
108/**
109 * ti_sci_setup_one_xfer() - Setup one message type
110 * @info: Pointer to SCI entity information
111 * @msg_type: Message type
112 * @msg_flags: Flag to set for the message
113 * @buf: Buffer to be send to mailbox channel
114 * @tx_message_size: transmit message size
Andreas Dannenbergca08cb32019-06-07 19:24:40 +0530115 * @rx_message_size: receive message size. may be set to zero for send-only
116 * transactions.
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530117 *
118 * Helper function which is used by various command functions that are
119 * exposed to clients of this driver for allocating a message traffic event.
120 *
121 * Return: Corresponding ti_sci_xfer pointer if all went fine,
122 * else appropriate error pointer.
123 */
124static struct ti_sci_xfer *ti_sci_setup_one_xfer(struct ti_sci_info *info,
125 u16 msg_type, u32 msg_flags,
126 u32 *buf,
127 size_t tx_message_size,
128 size_t rx_message_size)
129{
130 struct ti_sci_xfer *xfer = &info->xfer;
131 struct ti_sci_msg_hdr *hdr;
132
133 /* Ensure we have sane transfer sizes */
134 if (rx_message_size > info->desc->max_msg_size ||
135 tx_message_size > info->desc->max_msg_size ||
Andreas Dannenbergca08cb32019-06-07 19:24:40 +0530136 (rx_message_size > 0 && rx_message_size < sizeof(*hdr)) ||
Andrew Davis22563722022-07-25 20:25:04 -0500137 tx_message_size < sizeof(*hdr)) {
138 dev_err(info->dev, "TI-SCI message transfer size not sane\n");
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530139 return ERR_PTR(-ERANGE);
Andrew Davis22563722022-07-25 20:25:04 -0500140 }
141
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530142
143 info->seq = ~info->seq;
144 xfer->tx_message.buf = buf;
145 xfer->tx_message.len = tx_message_size;
146 xfer->rx_len = (u8)rx_message_size;
147
148 hdr = (struct ti_sci_msg_hdr *)buf;
149 hdr->seq = info->seq;
150 hdr->type = msg_type;
151 hdr->host = info->host_id;
152 hdr->flags = msg_flags;
153
154 return xfer;
155}
156
157/**
158 * ti_sci_get_response() - Receive response from mailbox channel
159 * @info: Pointer to SCI entity information
160 * @xfer: Transfer to initiate and wait for response
161 * @chan: Channel to receive the response
162 *
163 * Return: -ETIMEDOUT in case of no response, if transmit error,
164 * return corresponding error, else if all goes well,
165 * return 0.
166 */
Andrew Davisb3e71b72022-07-25 20:25:05 -0500167static int ti_sci_get_response(struct ti_sci_info *info,
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530168 struct ti_sci_xfer *xfer,
169 struct mbox_chan *chan)
170{
171 struct k3_sec_proxy_msg *msg = &xfer->tx_message;
172 struct ti_sci_secure_msg_hdr *secure_hdr;
173 struct ti_sci_msg_hdr *hdr;
174 int ret;
175
176 /* Receive the response */
Andreas Dannenberg607d4ca2019-04-24 14:20:08 -0500177 ret = mbox_recv(chan, msg, info->desc->max_rx_timeout_ms * 1000);
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530178 if (ret) {
179 dev_err(info->dev, "%s: Message receive failed. ret = %d\n",
180 __func__, ret);
181 return ret;
182 }
183
184 /* ToDo: Verify checksum */
185 if (info->is_secure) {
186 secure_hdr = (struct ti_sci_secure_msg_hdr *)msg->buf;
187 msg->buf = (u32 *)((void *)msg->buf + sizeof(*secure_hdr));
188 }
189
190 /* msg is updated by mailbox driver */
191 hdr = (struct ti_sci_msg_hdr *)msg->buf;
192
193 /* Sanity check for message response */
194 if (hdr->seq != info->seq) {
195 dev_dbg(info->dev, "%s: Message for %d is not expected\n",
196 __func__, hdr->seq);
197 return ret;
198 }
199
200 if (msg->len > info->desc->max_msg_size) {
201 dev_err(info->dev, "%s: Unable to handle %zu xfer (max %d)\n",
202 __func__, msg->len, info->desc->max_msg_size);
203 return -EINVAL;
204 }
205
206 if (msg->len < xfer->rx_len) {
207 dev_err(info->dev, "%s: Recv xfer %zu < expected %d length\n",
208 __func__, msg->len, xfer->rx_len);
209 }
210
211 return ret;
212}
213
214/**
Andrew Davis04e43932022-07-25 20:25:06 -0500215 * ti_sci_is_response_ack() - Generic ACK/NACK message checkup
216 * @r: pointer to response buffer
217 *
218 * Return: true if the response was an ACK, else returns false.
219 */
220static bool ti_sci_is_response_ack(void *r)
221{
222 struct ti_sci_msg_hdr *hdr = r;
223
224 return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false;
225}
226
227/**
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530228 * ti_sci_do_xfer() - Do one transfer
229 * @info: Pointer to SCI entity information
230 * @xfer: Transfer to initiate and wait for response
231 *
232 * Return: 0 if all went fine, else return appropriate error.
233 */
Andrew Davisb3e71b72022-07-25 20:25:05 -0500234static int ti_sci_do_xfer(struct ti_sci_info *info,
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530235 struct ti_sci_xfer *xfer)
236{
237 struct k3_sec_proxy_msg *msg = &xfer->tx_message;
238 u8 secure_buf[info->desc->max_msg_size];
Dhruva Goled3341022024-01-30 20:29:59 +0530239 struct ti_sci_secure_msg_hdr *secure_hdr = (struct ti_sci_secure_msg_hdr *)secure_buf;
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530240 int ret;
241
Dhruva Gole5452ebd2024-01-30 20:30:00 +0530242 /*
243 * The reason why we need the is_secure code is because of boot R5.
244 * boot R5 starts off in "secure mode" when it hands off from Boot
245 * ROM over to the Secondary bootloader. The initial set of calls
246 * we have to make need to be on a secure pipe.
247 */
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530248 if (info->is_secure) {
249 /* ToDo: get checksum of the entire message */
Dhruva Goled3341022024-01-30 20:29:59 +0530250 secure_hdr->checksum = 0;
251 secure_hdr->reserved = 0;
252 memcpy(&secure_buf[sizeof(*secure_hdr)], xfer->tx_message.buf,
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530253 xfer->tx_message.len);
254
255 xfer->tx_message.buf = (u32 *)secure_buf;
Dhruva Goled3341022024-01-30 20:29:59 +0530256 xfer->tx_message.len += sizeof(*secure_hdr);
Andreas Dannenbergca08cb32019-06-07 19:24:40 +0530257
258 if (xfer->rx_len)
Dhruva Goled3341022024-01-30 20:29:59 +0530259 xfer->rx_len += sizeof(*secure_hdr);
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530260 }
261
262 /* Send the message */
263 ret = mbox_send(&info->chan_tx, msg);
264 if (ret) {
265 dev_err(info->dev, "%s: Message sending failed. ret = %d\n",
266 __func__, ret);
267 return ret;
268 }
269
Andreas Dannenbergca08cb32019-06-07 19:24:40 +0530270 /* Get response if requested */
Andrew Davis04e43932022-07-25 20:25:06 -0500271 if (xfer->rx_len) {
Andreas Dannenbergca08cb32019-06-07 19:24:40 +0530272 ret = ti_sci_get_response(info, xfer, &info->chan_rx);
Andrew Davis04e43932022-07-25 20:25:06 -0500273 if (!ti_sci_is_response_ack(xfer->tx_message.buf)) {
Andreas Dannenberg831b73f2023-05-09 16:38:13 -0500274 dev_err(info->dev, "Message not acknowledged\n");
Andrew Davis04e43932022-07-25 20:25:06 -0500275 ret = -ENODEV;
276 }
277 }
Andreas Dannenbergca08cb32019-06-07 19:24:40 +0530278
279 return ret;
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530280}
281
282/**
283 * ti_sci_cmd_get_revision() - command to get the revision of the SCI entity
284 * @handle: pointer to TI SCI handle
285 *
286 * Updates the SCI information in the internal data structure.
287 *
288 * Return: 0 if all went fine, else return appropriate error.
289 */
290static int ti_sci_cmd_get_revision(struct ti_sci_handle *handle)
291{
292 struct ti_sci_msg_resp_version *rev_info;
293 struct ti_sci_version_info *ver;
294 struct ti_sci_msg_hdr hdr;
295 struct ti_sci_info *info;
296 struct ti_sci_xfer *xfer;
297 int ret;
298
299 if (IS_ERR(handle))
300 return PTR_ERR(handle);
301 if (!handle)
302 return -EINVAL;
303
304 info = handle_to_ti_sci_info(handle);
305
Andrew F. Davis8928fbd2019-04-29 09:04:11 -0400306 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_VERSION,
307 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530308 (u32 *)&hdr, sizeof(struct ti_sci_msg_hdr),
309 sizeof(*rev_info));
310 if (IS_ERR(xfer)) {
311 ret = PTR_ERR(xfer);
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530312 return ret;
313 }
314
315 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -0500316 if (ret)
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530317 return ret;
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530318
319 rev_info = (struct ti_sci_msg_resp_version *)xfer->tx_message.buf;
320
321 ver = &handle->version;
322 ver->abi_major = rev_info->abi_major;
323 ver->abi_minor = rev_info->abi_minor;
324 ver->firmware_revision = rev_info->firmware_revision;
325 strncpy(ver->firmware_description, rev_info->firmware_description,
326 sizeof(ver->firmware_description));
327
328 return 0;
329}
330
331/**
Andreas Dannenberg5299c4c2018-08-27 15:57:33 +0530332 * cmd_set_board_config_using_msg() - Common command to send board configuration
333 * message
334 * @handle: pointer to TI SCI handle
335 * @msg_type: One of the TISCI message types to set board configuration
336 * @addr: Address where the board config structure is located
337 * @size: Size of the board config structure
338 *
339 * Return: 0 if all went well, else returns appropriate error value.
340 */
341static int cmd_set_board_config_using_msg(const struct ti_sci_handle *handle,
342 u16 msg_type, u64 addr, u32 size)
343{
344 struct ti_sci_msg_board_config req;
345 struct ti_sci_msg_hdr *resp;
346 struct ti_sci_info *info;
347 struct ti_sci_xfer *xfer;
348 int ret = 0;
349
350 if (IS_ERR(handle))
351 return PTR_ERR(handle);
352 if (!handle)
353 return -EINVAL;
354
355 info = handle_to_ti_sci_info(handle);
356
357 xfer = ti_sci_setup_one_xfer(info, msg_type,
358 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
359 (u32 *)&req, sizeof(req), sizeof(*resp));
360 if (IS_ERR(xfer)) {
361 ret = PTR_ERR(xfer);
Andreas Dannenberg5299c4c2018-08-27 15:57:33 +0530362 return ret;
363 }
364 req.boardcfgp_high = (addr >> 32) & 0xffffffff;
365 req.boardcfgp_low = addr & 0xffffffff;
366 req.boardcfg_size = size;
367
368 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -0500369 if (ret)
Andreas Dannenberg5299c4c2018-08-27 15:57:33 +0530370 return ret;
Andreas Dannenberg5299c4c2018-08-27 15:57:33 +0530371
Andreas Dannenberg5299c4c2018-08-27 15:57:33 +0530372 return ret;
373}
374
375/**
376 * ti_sci_cmd_set_board_config() - Command to send board configuration message
377 * @handle: pointer to TI SCI handle
378 * @addr: Address where the board config structure is located
379 * @size: Size of the board config structure
380 *
381 * Return: 0 if all went well, else returns appropriate error value.
382 */
383static int ti_sci_cmd_set_board_config(const struct ti_sci_handle *handle,
384 u64 addr, u32 size)
385{
386 return cmd_set_board_config_using_msg(handle,
387 TI_SCI_MSG_BOARD_CONFIG,
388 addr, size);
389}
390
391/**
392 * ti_sci_cmd_set_board_config_rm() - Command to send board resource
393 * management configuration
394 * @handle: pointer to TI SCI handle
395 * @addr: Address where the board RM config structure is located
396 * @size: Size of the RM config structure
397 *
398 * Return: 0 if all went well, else returns appropriate error value.
399 */
400static
401int ti_sci_cmd_set_board_config_rm(const struct ti_sci_handle *handle,
402 u64 addr, u32 size)
403{
404 return cmd_set_board_config_using_msg(handle,
405 TI_SCI_MSG_BOARD_CONFIG_RM,
406 addr, size);
407}
408
409/**
410 * ti_sci_cmd_set_board_config_security() - Command to send board security
411 * configuration message
412 * @handle: pointer to TI SCI handle
413 * @addr: Address where the board security config structure is located
414 * @size: Size of the security config structure
415 *
416 * Return: 0 if all went well, else returns appropriate error value.
417 */
418static
419int ti_sci_cmd_set_board_config_security(const struct ti_sci_handle *handle,
420 u64 addr, u32 size)
421{
422 return cmd_set_board_config_using_msg(handle,
423 TI_SCI_MSG_BOARD_CONFIG_SECURITY,
424 addr, size);
425}
426
427/**
428 * ti_sci_cmd_set_board_config_pm() - Command to send board power and clock
429 * configuration message
430 * @handle: pointer to TI SCI handle
431 * @addr: Address where the board PM config structure is located
432 * @size: Size of the PM config structure
433 *
434 * Return: 0 if all went well, else returns appropriate error value.
435 */
436static int ti_sci_cmd_set_board_config_pm(const struct ti_sci_handle *handle,
437 u64 addr, u32 size)
438{
439 return cmd_set_board_config_using_msg(handle,
440 TI_SCI_MSG_BOARD_CONFIG_PM,
441 addr, size);
442}
443
Lokesh Vutla0d0412a2019-06-07 19:24:41 +0530444static struct ti_sci_exclusive_dev
445*ti_sci_get_exclusive_dev(struct list_head *dev_list, u32 id)
446{
447 struct ti_sci_exclusive_dev *dev;
448
449 list_for_each_entry(dev, dev_list, list)
450 if (dev->id == id)
451 return dev;
452
453 return NULL;
454}
455
456static void ti_sci_add_exclusive_dev(struct ti_sci_info *info, u32 id)
457{
458 struct ti_sci_exclusive_dev *dev;
459
460 dev = ti_sci_get_exclusive_dev(&info->dev_list, id);
461 if (dev) {
462 dev->count++;
463 return;
464 }
465
466 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
467 dev->id = id;
468 dev->count = 1;
469 INIT_LIST_HEAD(&dev->list);
470 list_add_tail(&dev->list, &info->dev_list);
471}
472
473static void ti_sci_delete_exclusive_dev(struct ti_sci_info *info, u32 id)
474{
475 struct ti_sci_exclusive_dev *dev;
476
477 dev = ti_sci_get_exclusive_dev(&info->dev_list, id);
478 if (!dev)
479 return;
480
481 if (dev->count > 0)
482 dev->count--;
483}
484
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530485/**
486 * ti_sci_set_device_state() - Set device state helper
487 * @handle: pointer to TI SCI handle
488 * @id: Device identifier
489 * @flags: flags to setup for the device
490 * @state: State to move the device to
491 *
492 * Return: 0 if all went well, else returns appropriate error value.
493 */
494static int ti_sci_set_device_state(const struct ti_sci_handle *handle,
495 u32 id, u32 flags, u8 state)
496{
497 struct ti_sci_msg_req_set_device_state req;
498 struct ti_sci_msg_hdr *resp;
499 struct ti_sci_info *info;
500 struct ti_sci_xfer *xfer;
501 int ret = 0;
502
503 if (IS_ERR(handle))
504 return PTR_ERR(handle);
505 if (!handle)
506 return -EINVAL;
507
508 info = handle_to_ti_sci_info(handle);
509
510 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
511 flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
512 (u32 *)&req, sizeof(req), sizeof(*resp));
513 if (IS_ERR(xfer)) {
514 ret = PTR_ERR(xfer);
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530515 return ret;
516 }
517 req.id = id;
518 req.state = state;
519
520 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -0500521 if (ret)
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530522 return ret;
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530523
Lokesh Vutla0d0412a2019-06-07 19:24:41 +0530524 if (state == MSG_DEVICE_SW_STATE_AUTO_OFF)
525 ti_sci_delete_exclusive_dev(info, id);
526 else if (flags & MSG_FLAG_DEVICE_EXCLUSIVE)
527 ti_sci_add_exclusive_dev(info, id);
528
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530529 return ret;
530}
531
532/**
Andreas Dannenbergca08cb32019-06-07 19:24:40 +0530533 * ti_sci_set_device_state_no_wait() - Set device state helper without
534 * requesting or waiting for a response.
535 * @handle: pointer to TI SCI handle
536 * @id: Device identifier
537 * @flags: flags to setup for the device
538 * @state: State to move the device to
539 *
540 * Return: 0 if all went well, else returns appropriate error value.
541 */
542static int ti_sci_set_device_state_no_wait(const struct ti_sci_handle *handle,
543 u32 id, u32 flags, u8 state)
544{
545 struct ti_sci_msg_req_set_device_state req;
546 struct ti_sci_info *info;
547 struct ti_sci_xfer *xfer;
548 int ret = 0;
549
550 if (IS_ERR(handle))
551 return PTR_ERR(handle);
552 if (!handle)
553 return -EINVAL;
554
555 info = handle_to_ti_sci_info(handle);
556
557 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
558 flags | TI_SCI_FLAG_REQ_GENERIC_NORESPONSE,
559 (u32 *)&req, sizeof(req), 0);
560 if (IS_ERR(xfer)) {
561 ret = PTR_ERR(xfer);
Andreas Dannenbergca08cb32019-06-07 19:24:40 +0530562 return ret;
563 }
564 req.id = id;
565 req.state = state;
566
567 ret = ti_sci_do_xfer(info, xfer);
568 if (ret)
Andrew Davis771a16f2022-07-25 20:25:03 -0500569 return ret;
Andreas Dannenbergca08cb32019-06-07 19:24:40 +0530570
571 return ret;
572}
573
574/**
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530575 * ti_sci_get_device_state() - Get device state helper
576 * @handle: Handle to the device
577 * @id: Device Identifier
578 * @clcnt: Pointer to Context Loss Count
579 * @resets: pointer to resets
580 * @p_state: pointer to p_state
581 * @c_state: pointer to c_state
582 *
583 * Return: 0 if all went fine, else return appropriate error.
584 */
585static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
586 u32 id, u32 *clcnt, u32 *resets,
587 u8 *p_state, u8 *c_state)
588{
589 struct ti_sci_msg_resp_get_device_state *resp;
590 struct ti_sci_msg_req_get_device_state req;
591 struct ti_sci_info *info;
592 struct ti_sci_xfer *xfer;
593 int ret = 0;
594
595 if (IS_ERR(handle))
596 return PTR_ERR(handle);
597 if (!handle)
598 return -EINVAL;
599
600 if (!clcnt && !resets && !p_state && !c_state)
601 return -EINVAL;
602
603 info = handle_to_ti_sci_info(handle);
604
Andrew F. Davis8928fbd2019-04-29 09:04:11 -0400605 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
606 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530607 (u32 *)&req, sizeof(req), sizeof(*resp));
608 if (IS_ERR(xfer)) {
609 ret = PTR_ERR(xfer);
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530610 return ret;
611 }
612 req.id = id;
613
614 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -0500615 if (ret)
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530616 return ret;
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530617
618 resp = (struct ti_sci_msg_resp_get_device_state *)xfer->tx_message.buf;
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530619
620 if (clcnt)
621 *clcnt = resp->context_loss_count;
622 if (resets)
623 *resets = resp->resets;
624 if (p_state)
625 *p_state = resp->programmed_state;
626 if (c_state)
627 *c_state = resp->current_state;
628
629 return ret;
630}
631
632/**
633 * ti_sci_cmd_get_device() - command to request for device managed by TISCI
634 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
635 * @id: Device Identifier
636 *
637 * Request for the device - NOTE: the client MUST maintain integrity of
638 * usage count by balancing get_device with put_device. No refcounting is
639 * managed by driver for that purpose.
640 *
641 * NOTE: The request is for exclusive access for the processor.
642 *
643 * Return: 0 if all went fine, else return appropriate error.
644 */
645static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
646{
Lokesh Vutlaf5613002019-06-07 19:24:39 +0530647 return ti_sci_set_device_state(handle, id, 0,
648 MSG_DEVICE_SW_STATE_ON);
649}
650
651static int ti_sci_cmd_get_device_exclusive(const struct ti_sci_handle *handle,
652 u32 id)
653{
654 return ti_sci_set_device_state(handle, id, MSG_FLAG_DEVICE_EXCLUSIVE,
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530655 MSG_DEVICE_SW_STATE_ON);
656}
657
658/**
659 * ti_sci_cmd_idle_device() - Command to idle a device managed by TISCI
660 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
661 * @id: Device Identifier
662 *
663 * Request for the device - NOTE: the client MUST maintain integrity of
664 * usage count by balancing get_device with put_device. No refcounting is
665 * managed by driver for that purpose.
666 *
667 * Return: 0 if all went fine, else return appropriate error.
668 */
669static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)
670{
671 return ti_sci_set_device_state(handle, id,
Lokesh Vutlaf5613002019-06-07 19:24:39 +0530672 0,
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530673 MSG_DEVICE_SW_STATE_RETENTION);
674}
675
Lokesh Vutlaf5613002019-06-07 19:24:39 +0530676static int ti_sci_cmd_idle_device_exclusive(const struct ti_sci_handle *handle,
677 u32 id)
678{
679 return ti_sci_set_device_state(handle, id, MSG_FLAG_DEVICE_EXCLUSIVE,
680 MSG_DEVICE_SW_STATE_RETENTION);
681}
682
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530683/**
684 * ti_sci_cmd_put_device() - command to release a device managed by TISCI
685 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
686 * @id: Device Identifier
687 *
688 * Request for the device - NOTE: the client MUST maintain integrity of
689 * usage count by balancing get_device with put_device. No refcounting is
690 * managed by driver for that purpose.
691 *
692 * Return: 0 if all went fine, else return appropriate error.
693 */
694static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id)
695{
Lokesh Vutlaf5613002019-06-07 19:24:39 +0530696 return ti_sci_set_device_state(handle, id, 0,
697 MSG_DEVICE_SW_STATE_AUTO_OFF);
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530698}
699
Lokesh Vutla0d0412a2019-06-07 19:24:41 +0530700static
701int ti_sci_cmd_release_exclusive_devices(const struct ti_sci_handle *handle)
702{
703 struct ti_sci_exclusive_dev *dev, *tmp;
704 struct ti_sci_info *info;
705 int i, cnt;
706
707 info = handle_to_ti_sci_info(handle);
708
709 list_for_each_entry_safe(dev, tmp, &info->dev_list, list) {
710 cnt = dev->count;
711 debug("%s: id = %d, cnt = %d\n", __func__, dev->id, cnt);
712 for (i = 0; i < cnt; i++)
713 ti_sci_cmd_put_device(handle, dev->id);
714 }
715
716 return 0;
717}
718
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530719/**
720 * ti_sci_cmd_dev_is_valid() - Is the device valid
721 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
722 * @id: Device Identifier
723 *
724 * Return: 0 if all went fine and the device ID is valid, else return
725 * appropriate error.
726 */
727static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id)
728{
729 u8 unused;
730
731 /* check the device state which will also tell us if the ID is valid */
732 return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused);
733}
734
735/**
736 * ti_sci_cmd_dev_get_clcnt() - Get context loss counter
737 * @handle: Pointer to TISCI handle
738 * @id: Device Identifier
739 * @count: Pointer to Context Loss counter to populate
740 *
741 * Return: 0 if all went fine, else return appropriate error.
742 */
743static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id,
744 u32 *count)
745{
746 return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL);
747}
748
749/**
750 * ti_sci_cmd_dev_is_idle() - Check if the device is requested to be idle
751 * @handle: Pointer to TISCI handle
752 * @id: Device Identifier
753 * @r_state: true if requested to be idle
754 *
755 * Return: 0 if all went fine, else return appropriate error.
756 */
757static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id,
758 bool *r_state)
759{
760 int ret;
761 u8 state;
762
763 if (!r_state)
764 return -EINVAL;
765
766 ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL);
767 if (ret)
768 return ret;
769
770 *r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
771
772 return 0;
773}
774
775/**
776 * ti_sci_cmd_dev_is_stop() - Check if the device is requested to be stopped
777 * @handle: Pointer to TISCI handle
778 * @id: Device Identifier
779 * @r_state: true if requested to be stopped
780 * @curr_state: true if currently stopped.
781 *
782 * Return: 0 if all went fine, else return appropriate error.
783 */
784static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id,
785 bool *r_state, bool *curr_state)
786{
787 int ret;
788 u8 p_state, c_state;
789
790 if (!r_state && !curr_state)
791 return -EINVAL;
792
793 ret =
794 ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
795 if (ret)
796 return ret;
797
798 if (r_state)
799 *r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
800 if (curr_state)
801 *curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
802
803 return 0;
804}
805
806/**
807 * ti_sci_cmd_dev_is_on() - Check if the device is requested to be ON
808 * @handle: Pointer to TISCI handle
809 * @id: Device Identifier
810 * @r_state: true if requested to be ON
811 * @curr_state: true if currently ON and active
812 *
813 * Return: 0 if all went fine, else return appropriate error.
814 */
815static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id,
816 bool *r_state, bool *curr_state)
817{
818 int ret;
819 u8 p_state, c_state;
820
821 if (!r_state && !curr_state)
822 return -EINVAL;
823
824 ret =
825 ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
826 if (ret)
827 return ret;
828
829 if (r_state)
830 *r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
831 if (curr_state)
832 *curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
833
834 return 0;
835}
836
837/**
838 * ti_sci_cmd_dev_is_trans() - Check if the device is currently transitioning
839 * @handle: Pointer to TISCI handle
840 * @id: Device Identifier
841 * @curr_state: true if currently transitioning.
842 *
843 * Return: 0 if all went fine, else return appropriate error.
844 */
845static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id,
846 bool *curr_state)
847{
848 int ret;
849 u8 state;
850
851 if (!curr_state)
852 return -EINVAL;
853
854 ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state);
855 if (ret)
856 return ret;
857
858 *curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
859
860 return 0;
861}
862
863/**
864 * ti_sci_cmd_set_device_resets() - command to set resets for device managed
865 * by TISCI
866 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
867 * @id: Device Identifier
868 * @reset_state: Device specific reset bit field
869 *
870 * Return: 0 if all went fine, else return appropriate error.
871 */
872static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle,
873 u32 id, u32 reset_state)
874{
875 struct ti_sci_msg_req_set_device_resets req;
876 struct ti_sci_msg_hdr *resp;
877 struct ti_sci_info *info;
878 struct ti_sci_xfer *xfer;
879 int ret = 0;
880
881 if (IS_ERR(handle))
882 return PTR_ERR(handle);
883 if (!handle)
884 return -EINVAL;
885
886 info = handle_to_ti_sci_info(handle);
887
888 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS,
889 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
890 (u32 *)&req, sizeof(req), sizeof(*resp));
891 if (IS_ERR(xfer)) {
892 ret = PTR_ERR(xfer);
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530893 return ret;
894 }
895 req.id = id;
896 req.resets = reset_state;
897
898 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -0500899 if (ret)
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530900 return ret;
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530901
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530902 return ret;
903}
904
905/**
906 * ti_sci_cmd_get_device_resets() - Get reset state for device managed
907 * by TISCI
908 * @handle: Pointer to TISCI handle
909 * @id: Device Identifier
910 * @reset_state: Pointer to reset state to populate
911 *
912 * Return: 0 if all went fine, else return appropriate error.
913 */
914static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle,
915 u32 id, u32 *reset_state)
916{
917 return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL,
918 NULL);
919}
920
Lokesh Vutlad10c80c2018-08-27 15:57:35 +0530921/**
922 * ti_sci_set_clock_state() - Set clock state helper
923 * @handle: pointer to TI SCI handle
924 * @dev_id: Device identifier this request is for
925 * @clk_id: Clock identifier for the device for this request.
926 * Each device has it's own set of clock inputs. This indexes
927 * which clock input to modify.
928 * @flags: Header flags as needed
929 * @state: State to request for the clock.
930 *
931 * Return: 0 if all went well, else returns appropriate error value.
932 */
933static int ti_sci_set_clock_state(const struct ti_sci_handle *handle,
934 u32 dev_id, u8 clk_id,
935 u32 flags, u8 state)
936{
937 struct ti_sci_msg_req_set_clock_state req;
938 struct ti_sci_msg_hdr *resp;
939 struct ti_sci_info *info;
940 struct ti_sci_xfer *xfer;
941 int ret = 0;
942
943 if (IS_ERR(handle))
944 return PTR_ERR(handle);
945 if (!handle)
946 return -EINVAL;
947
948 info = handle_to_ti_sci_info(handle);
949
950 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE,
951 flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
952 (u32 *)&req, sizeof(req), sizeof(*resp));
953 if (IS_ERR(xfer)) {
954 ret = PTR_ERR(xfer);
Lokesh Vutlad10c80c2018-08-27 15:57:35 +0530955 return ret;
956 }
957 req.dev_id = dev_id;
958 req.clk_id = clk_id;
959 req.request_state = state;
960
961 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -0500962 if (ret)
Lokesh Vutlad10c80c2018-08-27 15:57:35 +0530963 return ret;
Lokesh Vutlad10c80c2018-08-27 15:57:35 +0530964
Lokesh Vutlad10c80c2018-08-27 15:57:35 +0530965 return ret;
966}
967
968/**
969 * ti_sci_cmd_get_clock_state() - Get clock state helper
970 * @handle: pointer to TI SCI handle
971 * @dev_id: Device identifier this request is for
972 * @clk_id: Clock identifier for the device for this request.
973 * Each device has it's own set of clock inputs. This indexes
974 * which clock input to modify.
975 * @programmed_state: State requested for clock to move to
976 * @current_state: State that the clock is currently in
977 *
978 * Return: 0 if all went well, else returns appropriate error value.
979 */
980static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle,
981 u32 dev_id, u8 clk_id,
982 u8 *programmed_state, u8 *current_state)
983{
984 struct ti_sci_msg_resp_get_clock_state *resp;
985 struct ti_sci_msg_req_get_clock_state req;
986 struct ti_sci_info *info;
987 struct ti_sci_xfer *xfer;
988 int ret = 0;
989
990 if (IS_ERR(handle))
991 return PTR_ERR(handle);
992 if (!handle)
993 return -EINVAL;
994
995 if (!programmed_state && !current_state)
996 return -EINVAL;
997
998 info = handle_to_ti_sci_info(handle);
999
1000 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE,
1001 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1002 (u32 *)&req, sizeof(req), sizeof(*resp));
1003 if (IS_ERR(xfer)) {
1004 ret = PTR_ERR(xfer);
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301005 return ret;
1006 }
1007 req.dev_id = dev_id;
1008 req.clk_id = clk_id;
1009
1010 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001011 if (ret)
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301012 return ret;
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301013
1014 resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->tx_message.buf;
1015
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301016 if (programmed_state)
1017 *programmed_state = resp->programmed_state;
1018 if (current_state)
1019 *current_state = resp->current_state;
1020
1021 return ret;
1022}
1023
1024/**
1025 * ti_sci_cmd_get_clock() - Get control of a clock from TI SCI
1026 * @handle: pointer to TI SCI handle
1027 * @dev_id: Device identifier this request is for
1028 * @clk_id: Clock identifier for the device for this request.
1029 * Each device has it's own set of clock inputs. This indexes
1030 * which clock input to modify.
1031 * @needs_ssc: 'true' if Spread Spectrum clock is desired, else 'false'
1032 * @can_change_freq: 'true' if frequency change is desired, else 'false'
1033 * @enable_input_term: 'true' if input termination is desired, else 'false'
1034 *
1035 * Return: 0 if all went well, else returns appropriate error value.
1036 */
1037static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id,
1038 u8 clk_id, bool needs_ssc, bool can_change_freq,
1039 bool enable_input_term)
1040{
1041 u32 flags = 0;
1042
1043 flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
1044 flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
1045 flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
1046
1047 return ti_sci_set_clock_state(handle, dev_id, clk_id, flags,
1048 MSG_CLOCK_SW_STATE_REQ);
1049}
1050
1051/**
1052 * ti_sci_cmd_idle_clock() - Idle a clock which is in our control
1053 * @handle: pointer to TI SCI handle
1054 * @dev_id: Device identifier this request is for
1055 * @clk_id: Clock identifier for the device for this request.
1056 * Each device has it's own set of clock inputs. This indexes
1057 * which clock input to modify.
1058 *
1059 * NOTE: This clock must have been requested by get_clock previously.
1060 *
1061 * Return: 0 if all went well, else returns appropriate error value.
1062 */
1063static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle,
1064 u32 dev_id, u8 clk_id)
1065{
1066 return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
1067 MSG_CLOCK_SW_STATE_UNREQ);
1068}
1069
1070/**
1071 * ti_sci_cmd_put_clock() - Release a clock from our control back to TISCI
1072 * @handle: pointer to TI SCI handle
1073 * @dev_id: Device identifier this request is for
1074 * @clk_id: Clock identifier for the device for this request.
1075 * Each device has it's own set of clock inputs. This indexes
1076 * which clock input to modify.
1077 *
1078 * NOTE: This clock must have been requested by get_clock previously.
1079 *
1080 * Return: 0 if all went well, else returns appropriate error value.
1081 */
1082static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle,
1083 u32 dev_id, u8 clk_id)
1084{
1085 return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
1086 MSG_CLOCK_SW_STATE_AUTO);
1087}
1088
1089/**
1090 * ti_sci_cmd_clk_is_auto() - Is the clock being auto managed
1091 * @handle: pointer to TI SCI handle
1092 * @dev_id: Device identifier this request is for
1093 * @clk_id: Clock identifier for the device for this request.
1094 * Each device has it's own set of clock inputs. This indexes
1095 * which clock input to modify.
1096 * @req_state: state indicating if the clock is auto managed
1097 *
1098 * Return: 0 if all went well, else returns appropriate error value.
1099 */
1100static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle,
1101 u32 dev_id, u8 clk_id, bool *req_state)
1102{
1103 u8 state = 0;
1104 int ret;
1105
1106 if (!req_state)
1107 return -EINVAL;
1108
1109 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL);
1110 if (ret)
1111 return ret;
1112
1113 *req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
1114 return 0;
1115}
1116
1117/**
1118 * ti_sci_cmd_clk_is_on() - Is the clock ON
1119 * @handle: pointer to TI SCI handle
1120 * @dev_id: Device identifier this request is for
1121 * @clk_id: Clock identifier for the device for this request.
1122 * Each device has it's own set of clock inputs. This indexes
1123 * which clock input to modify.
1124 * @req_state: state indicating if the clock is managed by us and enabled
1125 * @curr_state: state indicating if the clock is ready for operation
1126 *
1127 * Return: 0 if all went well, else returns appropriate error value.
1128 */
1129static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id,
1130 u8 clk_id, bool *req_state, bool *curr_state)
1131{
1132 u8 c_state = 0, r_state = 0;
1133 int ret;
1134
1135 if (!req_state && !curr_state)
1136 return -EINVAL;
1137
1138 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1139 &r_state, &c_state);
1140 if (ret)
1141 return ret;
1142
1143 if (req_state)
1144 *req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
1145 if (curr_state)
1146 *curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
1147 return 0;
1148}
1149
1150/**
1151 * ti_sci_cmd_clk_is_off() - Is the clock OFF
1152 * @handle: pointer to TI SCI handle
1153 * @dev_id: Device identifier this request is for
1154 * @clk_id: Clock identifier for the device for this request.
1155 * Each device has it's own set of clock inputs. This indexes
1156 * which clock input to modify.
1157 * @req_state: state indicating if the clock is managed by us and disabled
1158 * @curr_state: state indicating if the clock is NOT ready for operation
1159 *
1160 * Return: 0 if all went well, else returns appropriate error value.
1161 */
1162static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id,
1163 u8 clk_id, bool *req_state, bool *curr_state)
1164{
1165 u8 c_state = 0, r_state = 0;
1166 int ret;
1167
1168 if (!req_state && !curr_state)
1169 return -EINVAL;
1170
1171 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1172 &r_state, &c_state);
1173 if (ret)
1174 return ret;
1175
1176 if (req_state)
1177 *req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
1178 if (curr_state)
1179 *curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
1180 return 0;
1181}
1182
1183/**
1184 * ti_sci_cmd_clk_set_parent() - Set the clock source of a specific device clock
1185 * @handle: pointer to TI SCI handle
1186 * @dev_id: Device identifier this request is for
1187 * @clk_id: Clock identifier for the device for this request.
1188 * Each device has it's own set of clock inputs. This indexes
1189 * which clock input to modify.
1190 * @parent_id: Parent clock identifier to set
1191 *
1192 * Return: 0 if all went well, else returns appropriate error value.
1193 */
1194static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle,
1195 u32 dev_id, u8 clk_id, u8 parent_id)
1196{
1197 struct ti_sci_msg_req_set_clock_parent req;
1198 struct ti_sci_msg_hdr *resp;
1199 struct ti_sci_info *info;
1200 struct ti_sci_xfer *xfer;
1201 int ret = 0;
1202
1203 if (IS_ERR(handle))
1204 return PTR_ERR(handle);
1205 if (!handle)
1206 return -EINVAL;
1207
1208 info = handle_to_ti_sci_info(handle);
1209
1210 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT,
1211 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1212 (u32 *)&req, sizeof(req), sizeof(*resp));
1213 if (IS_ERR(xfer)) {
1214 ret = PTR_ERR(xfer);
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301215 return ret;
1216 }
1217 req.dev_id = dev_id;
1218 req.clk_id = clk_id;
1219 req.parent_id = parent_id;
1220
1221 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001222 if (ret)
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301223 return ret;
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301224
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301225 return ret;
1226}
1227
1228/**
1229 * ti_sci_cmd_clk_get_parent() - Get current parent clock source
1230 * @handle: pointer to TI SCI handle
1231 * @dev_id: Device identifier this request is for
1232 * @clk_id: Clock identifier for the device for this request.
1233 * Each device has it's own set of clock inputs. This indexes
1234 * which clock input to modify.
1235 * @parent_id: Current clock parent
1236 *
1237 * Return: 0 if all went well, else returns appropriate error value.
1238 */
1239static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle,
1240 u32 dev_id, u8 clk_id, u8 *parent_id)
1241{
1242 struct ti_sci_msg_resp_get_clock_parent *resp;
1243 struct ti_sci_msg_req_get_clock_parent req;
1244 struct ti_sci_info *info;
1245 struct ti_sci_xfer *xfer;
1246 int ret = 0;
1247
1248 if (IS_ERR(handle))
1249 return PTR_ERR(handle);
1250 if (!handle || !parent_id)
1251 return -EINVAL;
1252
1253 info = handle_to_ti_sci_info(handle);
1254
1255 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT,
1256 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1257 (u32 *)&req, sizeof(req), sizeof(*resp));
1258 if (IS_ERR(xfer)) {
1259 ret = PTR_ERR(xfer);
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301260 return ret;
1261 }
1262 req.dev_id = dev_id;
1263 req.clk_id = clk_id;
1264
1265 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001266 if (ret)
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301267 return ret;
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301268
Andrew Davis04e43932022-07-25 20:25:06 -05001269 *parent_id = resp->parent_id;
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301270
1271 return ret;
1272}
1273
1274/**
1275 * ti_sci_cmd_clk_get_num_parents() - Get num parents of the current clk source
1276 * @handle: pointer to TI SCI handle
1277 * @dev_id: Device identifier this request is for
1278 * @clk_id: Clock identifier for the device for this request.
1279 * Each device has it's own set of clock inputs. This indexes
1280 * which clock input to modify.
1281 * @num_parents: Returns he number of parents to the current clock.
1282 *
1283 * Return: 0 if all went well, else returns appropriate error value.
1284 */
1285static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle,
1286 u32 dev_id, u8 clk_id,
1287 u8 *num_parents)
1288{
1289 struct ti_sci_msg_resp_get_clock_num_parents *resp;
1290 struct ti_sci_msg_req_get_clock_num_parents req;
1291 struct ti_sci_info *info;
1292 struct ti_sci_xfer *xfer;
1293 int ret = 0;
1294
1295 if (IS_ERR(handle))
1296 return PTR_ERR(handle);
1297 if (!handle || !num_parents)
1298 return -EINVAL;
1299
1300 info = handle_to_ti_sci_info(handle);
1301
1302 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS,
1303 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1304 (u32 *)&req, sizeof(req), sizeof(*resp));
1305 if (IS_ERR(xfer)) {
1306 ret = PTR_ERR(xfer);
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301307 return ret;
1308 }
1309 req.dev_id = dev_id;
1310 req.clk_id = clk_id;
1311
1312 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001313 if (ret)
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301314 return ret;
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301315
1316 resp = (struct ti_sci_msg_resp_get_clock_num_parents *)
1317 xfer->tx_message.buf;
1318
Andrew Davis04e43932022-07-25 20:25:06 -05001319 *num_parents = resp->num_parents;
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301320
1321 return ret;
1322}
1323
1324/**
1325 * ti_sci_cmd_clk_get_match_freq() - Find a good match for frequency
1326 * @handle: pointer to TI SCI handle
1327 * @dev_id: Device identifier this request is for
1328 * @clk_id: Clock identifier for the device for this request.
1329 * Each device has it's own set of clock inputs. This indexes
1330 * which clock input to modify.
1331 * @min_freq: The minimum allowable frequency in Hz. This is the minimum
1332 * allowable programmed frequency and does not account for clock
1333 * tolerances and jitter.
1334 * @target_freq: The target clock frequency in Hz. A frequency will be
1335 * processed as close to this target frequency as possible.
1336 * @max_freq: The maximum allowable frequency in Hz. This is the maximum
1337 * allowable programmed frequency and does not account for clock
1338 * tolerances and jitter.
1339 * @match_freq: Frequency match in Hz response.
1340 *
1341 * Return: 0 if all went well, else returns appropriate error value.
1342 */
1343static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle,
1344 u32 dev_id, u8 clk_id, u64 min_freq,
1345 u64 target_freq, u64 max_freq,
1346 u64 *match_freq)
1347{
1348 struct ti_sci_msg_resp_query_clock_freq *resp;
1349 struct ti_sci_msg_req_query_clock_freq req;
1350 struct ti_sci_info *info;
1351 struct ti_sci_xfer *xfer;
1352 int ret = 0;
1353
1354 if (IS_ERR(handle))
1355 return PTR_ERR(handle);
1356 if (!handle || !match_freq)
1357 return -EINVAL;
1358
1359 info = handle_to_ti_sci_info(handle);
1360
1361 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ,
1362 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1363 (u32 *)&req, sizeof(req), sizeof(*resp));
1364 if (IS_ERR(xfer)) {
1365 ret = PTR_ERR(xfer);
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301366 return ret;
1367 }
1368 req.dev_id = dev_id;
1369 req.clk_id = clk_id;
1370 req.min_freq_hz = min_freq;
1371 req.target_freq_hz = target_freq;
1372 req.max_freq_hz = max_freq;
1373
1374 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001375 if (ret)
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301376 return ret;
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301377
1378 resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->tx_message.buf;
1379
Andrew Davis04e43932022-07-25 20:25:06 -05001380 *match_freq = resp->freq_hz;
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301381
1382 return ret;
1383}
1384
1385/**
1386 * ti_sci_cmd_clk_set_freq() - Set a frequency for clock
1387 * @handle: pointer to TI SCI handle
1388 * @dev_id: Device identifier this request is for
1389 * @clk_id: Clock identifier for the device for this request.
1390 * Each device has it's own set of clock inputs. This indexes
1391 * which clock input to modify.
1392 * @min_freq: The minimum allowable frequency in Hz. This is the minimum
1393 * allowable programmed frequency and does not account for clock
1394 * tolerances and jitter.
1395 * @target_freq: The target clock frequency in Hz. A frequency will be
1396 * processed as close to this target frequency as possible.
1397 * @max_freq: The maximum allowable frequency in Hz. This is the maximum
1398 * allowable programmed frequency and does not account for clock
1399 * tolerances and jitter.
1400 *
1401 * Return: 0 if all went well, else returns appropriate error value.
1402 */
1403static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle,
1404 u32 dev_id, u8 clk_id, u64 min_freq,
1405 u64 target_freq, u64 max_freq)
1406{
1407 struct ti_sci_msg_req_set_clock_freq req;
1408 struct ti_sci_msg_hdr *resp;
1409 struct ti_sci_info *info;
1410 struct ti_sci_xfer *xfer;
1411 int ret = 0;
1412
1413 if (IS_ERR(handle))
1414 return PTR_ERR(handle);
1415 if (!handle)
1416 return -EINVAL;
1417
1418 info = handle_to_ti_sci_info(handle);
1419
1420 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ,
1421 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1422 (u32 *)&req, sizeof(req), sizeof(*resp));
1423 if (IS_ERR(xfer)) {
1424 ret = PTR_ERR(xfer);
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301425 return ret;
1426 }
1427 req.dev_id = dev_id;
1428 req.clk_id = clk_id;
1429 req.min_freq_hz = min_freq;
1430 req.target_freq_hz = target_freq;
1431 req.max_freq_hz = max_freq;
1432
1433 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001434 if (ret)
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301435 return ret;
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301436
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301437 return ret;
1438}
1439
1440/**
1441 * ti_sci_cmd_clk_get_freq() - Get current frequency
1442 * @handle: pointer to TI SCI handle
1443 * @dev_id: Device identifier this request is for
1444 * @clk_id: Clock identifier for the device for this request.
1445 * Each device has it's own set of clock inputs. This indexes
1446 * which clock input to modify.
1447 * @freq: Currently frequency in Hz
1448 *
1449 * Return: 0 if all went well, else returns appropriate error value.
1450 */
1451static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle,
1452 u32 dev_id, u8 clk_id, u64 *freq)
1453{
1454 struct ti_sci_msg_resp_get_clock_freq *resp;
1455 struct ti_sci_msg_req_get_clock_freq req;
1456 struct ti_sci_info *info;
1457 struct ti_sci_xfer *xfer;
1458 int ret = 0;
1459
1460 if (IS_ERR(handle))
1461 return PTR_ERR(handle);
1462 if (!handle || !freq)
1463 return -EINVAL;
1464
1465 info = handle_to_ti_sci_info(handle);
1466
1467 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ,
1468 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1469 (u32 *)&req, sizeof(req), sizeof(*resp));
1470 if (IS_ERR(xfer)) {
1471 ret = PTR_ERR(xfer);
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301472 return ret;
1473 }
1474 req.dev_id = dev_id;
1475 req.clk_id = clk_id;
1476
1477 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001478 if (ret)
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301479 return ret;
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301480
1481 resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->tx_message.buf;
1482
Andrew Davis04e43932022-07-25 20:25:06 -05001483 *freq = resp->freq_hz;
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301484
1485 return ret;
1486}
1487
Andreas Dannenberg5bd08372018-08-27 15:57:36 +05301488/**
1489 * ti_sci_cmd_core_reboot() - Command to request system reset
1490 * @handle: pointer to TI SCI handle
1491 *
1492 * Return: 0 if all went well, else returns appropriate error value.
1493 */
1494static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
1495{
1496 struct ti_sci_msg_req_reboot req;
1497 struct ti_sci_msg_hdr *resp;
1498 struct ti_sci_info *info;
1499 struct ti_sci_xfer *xfer;
1500 int ret = 0;
1501
1502 if (IS_ERR(handle))
1503 return PTR_ERR(handle);
1504 if (!handle)
1505 return -EINVAL;
1506
1507 info = handle_to_ti_sci_info(handle);
1508
1509 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SYS_RESET,
1510 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1511 (u32 *)&req, sizeof(req), sizeof(*resp));
1512 if (IS_ERR(xfer)) {
1513 ret = PTR_ERR(xfer);
Andreas Dannenberg5bd08372018-08-27 15:57:36 +05301514 return ret;
1515 }
Dave Gerlach366df4e2021-05-13 20:10:55 -05001516 req.domain = 0;
Andreas Dannenberg5bd08372018-08-27 15:57:36 +05301517
1518 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001519 if (ret)
Andreas Dannenberg5bd08372018-08-27 15:57:36 +05301520 return ret;
Andreas Dannenberg5bd08372018-08-27 15:57:36 +05301521
Andreas Dannenberg5bd08372018-08-27 15:57:36 +05301522 return ret;
1523}
1524
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05301525/**
1526 * ti_sci_get_resource_range - Helper to get a range of resources assigned
1527 * to a host. Resource is uniquely identified by
1528 * type and subtype.
1529 * @handle: Pointer to TISCI handle.
1530 * @dev_id: TISCI device ID.
1531 * @subtype: Resource assignment subtype that is being requested
1532 * from the given device.
1533 * @s_host: Host processor ID to which the resources are allocated
1534 * @range_start: Start index of the resource range
1535 * @range_num: Number of resources in the range
1536 *
1537 * Return: 0 if all went fine, else return appropriate error.
1538 */
1539static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
1540 u32 dev_id, u8 subtype, u8 s_host,
1541 u16 *range_start, u16 *range_num)
1542{
1543 struct ti_sci_msg_resp_get_resource_range *resp;
1544 struct ti_sci_msg_req_get_resource_range req;
1545 struct ti_sci_xfer *xfer;
1546 struct ti_sci_info *info;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05301547 int ret = 0;
1548
1549 if (IS_ERR(handle))
1550 return PTR_ERR(handle);
1551 if (!handle)
1552 return -EINVAL;
1553
1554 info = handle_to_ti_sci_info(handle);
1555
1556 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_RESOURCE_RANGE,
1557 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1558 (u32 *)&req, sizeof(req), sizeof(*resp));
1559 if (IS_ERR(xfer)) {
1560 ret = PTR_ERR(xfer);
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05301561 return ret;
1562 }
1563
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05301564 req.secondary_host = s_host;
Lokesh Vutla0acf1dc2020-08-17 11:00:48 +05301565 req.type = dev_id & MSG_RM_RESOURCE_TYPE_MASK;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05301566 req.subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK;
1567
1568 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001569 if (ret)
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05301570 goto fail;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05301571
1572 resp = (struct ti_sci_msg_resp_get_resource_range *)xfer->tx_message.buf;
Andrew Davis04e43932022-07-25 20:25:06 -05001573 if (!resp->range_start && !resp->range_num) {
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05301574 ret = -ENODEV;
1575 } else {
1576 *range_start = resp->range_start;
1577 *range_num = resp->range_num;
1578 };
1579
1580fail:
1581 return ret;
1582}
1583
Vignesh Raghavendra4214a812021-06-07 19:47:48 +05301584static int __maybe_unused
Vignesh Raghavendraaa0e3fc2021-06-07 19:47:49 +05301585ti_sci_cmd_get_resource_range_static(const struct ti_sci_handle *handle,
1586 u32 dev_id, u8 subtype,
1587 u16 *range_start, u16 *range_num)
Vignesh Raghavendra4214a812021-06-07 19:47:48 +05301588{
1589 struct ti_sci_resource_static_data *data;
1590 int i = 0;
1591
1592 while (1) {
1593 data = &rm_static_data[i];
1594
1595 if (!data->dev_id)
1596 return -EINVAL;
1597
1598 if (data->dev_id != dev_id || data->subtype != subtype) {
1599 i++;
1600 continue;
1601 }
1602
1603 *range_start = data->range_start;
1604 *range_num = data->range_num;
1605
1606 return 0;
1607 }
1608
1609 return -EINVAL;
1610}
1611
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05301612/**
1613 * ti_sci_cmd_get_resource_range - Get a range of resources assigned to host
1614 * that is same as ti sci interface host.
1615 * @handle: Pointer to TISCI handle.
1616 * @dev_id: TISCI device ID.
1617 * @subtype: Resource assignment subtype that is being requested
1618 * from the given device.
1619 * @range_start: Start index of the resource range
1620 * @range_num: Number of resources in the range
1621 *
1622 * Return: 0 if all went fine, else return appropriate error.
1623 */
1624static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
1625 u32 dev_id, u8 subtype,
1626 u16 *range_start, u16 *range_num)
1627{
1628 return ti_sci_get_resource_range(handle, dev_id, subtype,
1629 TI_SCI_IRQ_SECONDARY_HOST_INVALID,
1630 range_start, range_num);
1631}
1632
1633/**
1634 * ti_sci_cmd_get_resource_range_from_shost - Get a range of resources
1635 * assigned to a specified host.
1636 * @handle: Pointer to TISCI handle.
1637 * @dev_id: TISCI device ID.
1638 * @subtype: Resource assignment subtype that is being requested
1639 * from the given device.
1640 * @s_host: Host processor ID to which the resources are allocated
1641 * @range_start: Start index of the resource range
1642 * @range_num: Number of resources in the range
1643 *
1644 * Return: 0 if all went fine, else return appropriate error.
1645 */
1646static
1647int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle,
1648 u32 dev_id, u8 subtype, u8 s_host,
1649 u16 *range_start, u16 *range_num)
1650{
1651 return ti_sci_get_resource_range(handle, dev_id, subtype, s_host,
1652 range_start, range_num);
1653}
1654
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301655/**
Lokesh Vutla032dce82019-03-08 11:47:32 +05301656 * ti_sci_cmd_query_msmc() - Command to query currently available msmc memory
1657 * @handle: pointer to TI SCI handle
1658 * @msms_start: MSMC start as returned by tisci
1659 * @msmc_end: MSMC end as returned by tisci
1660 *
1661 * Return: 0 if all went well, else returns appropriate error value.
1662 */
1663static int ti_sci_cmd_query_msmc(const struct ti_sci_handle *handle,
1664 u64 *msmc_start, u64 *msmc_end)
1665{
1666 struct ti_sci_msg_resp_query_msmc *resp;
1667 struct ti_sci_msg_hdr req;
1668 struct ti_sci_info *info;
1669 struct ti_sci_xfer *xfer;
1670 int ret = 0;
1671
1672 if (IS_ERR(handle))
1673 return PTR_ERR(handle);
1674 if (!handle)
1675 return -EINVAL;
1676
1677 info = handle_to_ti_sci_info(handle);
1678
1679 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_QUERY_MSMC,
1680 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1681 (u32 *)&req, sizeof(req), sizeof(*resp));
1682 if (IS_ERR(xfer)) {
1683 ret = PTR_ERR(xfer);
Lokesh Vutla032dce82019-03-08 11:47:32 +05301684 return ret;
1685 }
1686
1687 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001688 if (ret)
Lokesh Vutla032dce82019-03-08 11:47:32 +05301689 return ret;
Lokesh Vutla032dce82019-03-08 11:47:32 +05301690
1691 resp = (struct ti_sci_msg_resp_query_msmc *)xfer->tx_message.buf;
1692
Lokesh Vutla032dce82019-03-08 11:47:32 +05301693 *msmc_start = ((u64)resp->msmc_start_high << TISCI_ADDR_HIGH_SHIFT) |
1694 resp->msmc_start_low;
1695 *msmc_end = ((u64)resp->msmc_end_high << TISCI_ADDR_HIGH_SHIFT) |
1696 resp->msmc_end_low;
1697
1698 return ret;
1699}
1700
1701/**
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301702 * ti_sci_cmd_proc_request() - Command to request a physical processor control
1703 * @handle: Pointer to TI SCI handle
1704 * @proc_id: Processor ID this request is for
1705 *
1706 * Return: 0 if all went well, else returns appropriate error value.
1707 */
1708static int ti_sci_cmd_proc_request(const struct ti_sci_handle *handle,
1709 u8 proc_id)
1710{
1711 struct ti_sci_msg_req_proc_request req;
1712 struct ti_sci_msg_hdr *resp;
1713 struct ti_sci_info *info;
1714 struct ti_sci_xfer *xfer;
1715 int ret = 0;
1716
1717 if (IS_ERR(handle))
1718 return PTR_ERR(handle);
1719 if (!handle)
1720 return -EINVAL;
1721
1722 info = handle_to_ti_sci_info(handle);
1723
1724 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_REQUEST,
1725 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1726 (u32 *)&req, sizeof(req), sizeof(*resp));
1727 if (IS_ERR(xfer)) {
1728 ret = PTR_ERR(xfer);
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301729 return ret;
1730 }
1731 req.processor_id = proc_id;
1732
1733 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001734 if (ret)
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301735 return ret;
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301736
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301737 return ret;
1738}
1739
1740/**
1741 * ti_sci_cmd_proc_release() - Command to release a physical processor control
1742 * @handle: Pointer to TI SCI handle
1743 * @proc_id: Processor ID this request is for
1744 *
1745 * Return: 0 if all went well, else returns appropriate error value.
1746 */
1747static int ti_sci_cmd_proc_release(const struct ti_sci_handle *handle,
1748 u8 proc_id)
1749{
1750 struct ti_sci_msg_req_proc_release req;
1751 struct ti_sci_msg_hdr *resp;
1752 struct ti_sci_info *info;
1753 struct ti_sci_xfer *xfer;
1754 int ret = 0;
1755
1756 if (IS_ERR(handle))
1757 return PTR_ERR(handle);
1758 if (!handle)
1759 return -EINVAL;
1760
1761 info = handle_to_ti_sci_info(handle);
1762
1763 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_RELEASE,
1764 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1765 (u32 *)&req, sizeof(req), sizeof(*resp));
1766 if (IS_ERR(xfer)) {
1767 ret = PTR_ERR(xfer);
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301768 return ret;
1769 }
1770 req.processor_id = proc_id;
1771
1772 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001773 if (ret)
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301774 return ret;
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301775
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301776 return ret;
1777}
1778
1779/**
1780 * ti_sci_cmd_proc_handover() - Command to handover a physical processor
1781 * control to a host in the processor's access
1782 * control list.
1783 * @handle: Pointer to TI SCI handle
1784 * @proc_id: Processor ID this request is for
1785 * @host_id: Host ID to get the control of the processor
1786 *
1787 * Return: 0 if all went well, else returns appropriate error value.
1788 */
1789static int ti_sci_cmd_proc_handover(const struct ti_sci_handle *handle,
1790 u8 proc_id, u8 host_id)
1791{
1792 struct ti_sci_msg_req_proc_handover req;
1793 struct ti_sci_msg_hdr *resp;
1794 struct ti_sci_info *info;
1795 struct ti_sci_xfer *xfer;
1796 int ret = 0;
1797
1798 if (IS_ERR(handle))
1799 return PTR_ERR(handle);
1800 if (!handle)
1801 return -EINVAL;
1802
1803 info = handle_to_ti_sci_info(handle);
1804
1805 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_HANDOVER,
1806 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1807 (u32 *)&req, sizeof(req), sizeof(*resp));
1808 if (IS_ERR(xfer)) {
1809 ret = PTR_ERR(xfer);
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301810 return ret;
1811 }
1812 req.processor_id = proc_id;
1813 req.host_id = host_id;
1814
1815 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001816 if (ret)
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301817 return ret;
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301818
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301819 return ret;
1820}
1821
1822/**
1823 * ti_sci_cmd_set_proc_boot_cfg() - Command to set the processor boot
1824 * configuration flags
1825 * @handle: Pointer to TI SCI handle
1826 * @proc_id: Processor ID this request is for
1827 * @config_flags_set: Configuration flags to be set
1828 * @config_flags_clear: Configuration flags to be cleared.
1829 *
1830 * Return: 0 if all went well, else returns appropriate error value.
1831 */
1832static int ti_sci_cmd_set_proc_boot_cfg(const struct ti_sci_handle *handle,
1833 u8 proc_id, u64 bootvector,
1834 u32 config_flags_set,
1835 u32 config_flags_clear)
1836{
1837 struct ti_sci_msg_req_set_proc_boot_config req;
1838 struct ti_sci_msg_hdr *resp;
1839 struct ti_sci_info *info;
1840 struct ti_sci_xfer *xfer;
1841 int ret = 0;
1842
1843 if (IS_ERR(handle))
1844 return PTR_ERR(handle);
1845 if (!handle)
1846 return -EINVAL;
1847
1848 info = handle_to_ti_sci_info(handle);
1849
1850 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_SET_PROC_BOOT_CONFIG,
1851 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1852 (u32 *)&req, sizeof(req), sizeof(*resp));
1853 if (IS_ERR(xfer)) {
1854 ret = PTR_ERR(xfer);
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301855 return ret;
1856 }
1857 req.processor_id = proc_id;
1858 req.bootvector_low = bootvector & TISCI_ADDR_LOW_MASK;
1859 req.bootvector_high = (bootvector & TISCI_ADDR_HIGH_MASK) >>
1860 TISCI_ADDR_HIGH_SHIFT;
1861 req.config_flags_set = config_flags_set;
1862 req.config_flags_clear = config_flags_clear;
1863
1864 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001865 if (ret)
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301866 return ret;
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301867
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301868 return ret;
1869}
1870
1871/**
1872 * ti_sci_cmd_set_proc_boot_ctrl() - Command to set the processor boot
1873 * control flags
1874 * @handle: Pointer to TI SCI handle
1875 * @proc_id: Processor ID this request is for
1876 * @control_flags_set: Control flags to be set
1877 * @control_flags_clear: Control flags to be cleared
1878 *
1879 * Return: 0 if all went well, else returns appropriate error value.
1880 */
1881static int ti_sci_cmd_set_proc_boot_ctrl(const struct ti_sci_handle *handle,
1882 u8 proc_id, u32 control_flags_set,
1883 u32 control_flags_clear)
1884{
1885 struct ti_sci_msg_req_set_proc_boot_ctrl req;
1886 struct ti_sci_msg_hdr *resp;
1887 struct ti_sci_info *info;
1888 struct ti_sci_xfer *xfer;
1889 int ret = 0;
1890
1891 if (IS_ERR(handle))
1892 return PTR_ERR(handle);
1893 if (!handle)
1894 return -EINVAL;
1895
1896 info = handle_to_ti_sci_info(handle);
1897
1898 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_SET_PROC_BOOT_CTRL,
1899 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1900 (u32 *)&req, sizeof(req), sizeof(*resp));
1901 if (IS_ERR(xfer)) {
1902 ret = PTR_ERR(xfer);
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301903 return ret;
1904 }
1905 req.processor_id = proc_id;
1906 req.control_flags_set = control_flags_set;
1907 req.control_flags_clear = control_flags_clear;
1908
1909 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001910 if (ret)
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301911 return ret;
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301912
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301913 return ret;
1914}
1915
1916/**
1917 * ti_sci_cmd_proc_auth_boot_image() - Command to authenticate and load the
1918 * image and then set the processor configuration flags.
1919 * @handle: Pointer to TI SCI handle
Andrew F. Davis7aa9a082019-04-12 12:54:44 -04001920 * @image_addr: Memory address at which payload image and certificate is
1921 * located in memory, this is updated if the image data is
1922 * moved during authentication.
1923 * @image_size: This is updated with the final size of the image after
1924 * authentication.
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301925 *
1926 * Return: 0 if all went well, else returns appropriate error value.
1927 */
1928static int ti_sci_cmd_proc_auth_boot_image(const struct ti_sci_handle *handle,
Andrew F. Davis7aa9a082019-04-12 12:54:44 -04001929 u64 *image_addr, u32 *image_size)
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301930{
1931 struct ti_sci_msg_req_proc_auth_boot_image req;
Andrew F. Davis7aa9a082019-04-12 12:54:44 -04001932 struct ti_sci_msg_resp_proc_auth_boot_image *resp;
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301933 struct ti_sci_info *info;
1934 struct ti_sci_xfer *xfer;
1935 int ret = 0;
1936
1937 if (IS_ERR(handle))
1938 return PTR_ERR(handle);
1939 if (!handle)
1940 return -EINVAL;
1941
1942 info = handle_to_ti_sci_info(handle);
1943
Jorge Ramirez-Ortizb0373282023-01-10 18:29:48 +01001944 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_AUTH_BOOT_IMAGE,
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301945 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1946 (u32 *)&req, sizeof(req), sizeof(*resp));
1947 if (IS_ERR(xfer)) {
1948 ret = PTR_ERR(xfer);
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301949 return ret;
1950 }
Andrew F. Davis7aa9a082019-04-12 12:54:44 -04001951 req.cert_addr_low = *image_addr & TISCI_ADDR_LOW_MASK;
1952 req.cert_addr_high = (*image_addr & TISCI_ADDR_HIGH_MASK) >>
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301953 TISCI_ADDR_HIGH_SHIFT;
1954
1955 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001956 if (ret)
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301957 return ret;
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301958
Andrew F. Davis7aa9a082019-04-12 12:54:44 -04001959 resp = (struct ti_sci_msg_resp_proc_auth_boot_image *)xfer->tx_message.buf;
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301960
Andrew F. Davis7aa9a082019-04-12 12:54:44 -04001961 *image_addr = (resp->image_addr_low & TISCI_ADDR_LOW_MASK) |
1962 (((u64)resp->image_addr_high <<
1963 TISCI_ADDR_HIGH_SHIFT) & TISCI_ADDR_HIGH_MASK);
1964 *image_size = resp->image_size;
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301965
1966 return ret;
1967}
1968
1969/**
1970 * ti_sci_cmd_get_proc_boot_status() - Command to get the processor boot status
1971 * @handle: Pointer to TI SCI handle
1972 * @proc_id: Processor ID this request is for
1973 *
1974 * Return: 0 if all went well, else returns appropriate error value.
1975 */
1976static int ti_sci_cmd_get_proc_boot_status(const struct ti_sci_handle *handle,
1977 u8 proc_id, u64 *bv, u32 *cfg_flags,
1978 u32 *ctrl_flags, u32 *sts_flags)
1979{
1980 struct ti_sci_msg_resp_get_proc_boot_status *resp;
1981 struct ti_sci_msg_req_get_proc_boot_status req;
1982 struct ti_sci_info *info;
1983 struct ti_sci_xfer *xfer;
1984 int ret = 0;
1985
1986 if (IS_ERR(handle))
1987 return PTR_ERR(handle);
1988 if (!handle)
1989 return -EINVAL;
1990
1991 info = handle_to_ti_sci_info(handle);
1992
1993 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_GET_PROC_BOOT_STATUS,
1994 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1995 (u32 *)&req, sizeof(req), sizeof(*resp));
1996 if (IS_ERR(xfer)) {
1997 ret = PTR_ERR(xfer);
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301998 return ret;
1999 }
2000 req.processor_id = proc_id;
2001
2002 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05002003 if (ret)
Lokesh Vutlab8856af2018-08-27 15:57:37 +05302004 return ret;
Lokesh Vutlab8856af2018-08-27 15:57:37 +05302005
2006 resp = (struct ti_sci_msg_resp_get_proc_boot_status *)
2007 xfer->tx_message.buf;
2008
Lokesh Vutlab8856af2018-08-27 15:57:37 +05302009 *bv = (resp->bootvector_low & TISCI_ADDR_LOW_MASK) |
2010 (((u64)resp->bootvector_high <<
2011 TISCI_ADDR_HIGH_SHIFT) & TISCI_ADDR_HIGH_MASK);
2012 *cfg_flags = resp->config_flags;
2013 *ctrl_flags = resp->control_flags;
2014 *sts_flags = resp->status_flags;
2015
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302016 return ret;
2017}
2018
2019/**
Andreas Dannenbergca08cb32019-06-07 19:24:40 +05302020 * ti_sci_proc_wait_boot_status_no_wait() - Helper function to wait for a
2021 * processor boot status without requesting or
2022 * waiting for a response.
2023 * @proc_id: Processor ID this request is for
2024 * @num_wait_iterations: Total number of iterations we will check before
2025 * we will timeout and give up
2026 * @num_match_iterations: How many iterations should we have continued
2027 * status to account for status bits glitching.
2028 * This is to make sure that match occurs for
2029 * consecutive checks. This implies that the
2030 * worst case should consider that the stable
2031 * time should at the worst be num_wait_iterations
2032 * num_match_iterations to prevent timeout.
2033 * @delay_per_iteration_us: Specifies how long to wait (in micro seconds)
2034 * between each status checks. This is the minimum
2035 * duration, and overhead of register reads and
2036 * checks are on top of this and can vary based on
2037 * varied conditions.
2038 * @delay_before_iterations_us: Specifies how long to wait (in micro seconds)
2039 * before the very first check in the first
2040 * iteration of status check loop. This is the
2041 * minimum duration, and overhead of register
2042 * reads and checks are.
2043 * @status_flags_1_set_all_wait:If non-zero, Specifies that all bits of the
2044 * status matching this field requested MUST be 1.
2045 * @status_flags_1_set_any_wait:If non-zero, Specifies that at least one of the
2046 * bits matching this field requested MUST be 1.
2047 * @status_flags_1_clr_all_wait:If non-zero, Specifies that all bits of the
2048 * status matching this field requested MUST be 0.
2049 * @status_flags_1_clr_any_wait:If non-zero, Specifies that at least one of the
2050 * bits matching this field requested MUST be 0.
2051 *
2052 * Return: 0 if all goes well, else appropriate error message
2053 */
2054static int
2055ti_sci_proc_wait_boot_status_no_wait(const struct ti_sci_handle *handle,
2056 u8 proc_id,
2057 u8 num_wait_iterations,
2058 u8 num_match_iterations,
2059 u8 delay_per_iteration_us,
2060 u8 delay_before_iterations_us,
2061 u32 status_flags_1_set_all_wait,
2062 u32 status_flags_1_set_any_wait,
2063 u32 status_flags_1_clr_all_wait,
2064 u32 status_flags_1_clr_any_wait)
2065{
2066 struct ti_sci_msg_req_wait_proc_boot_status req;
2067 struct ti_sci_info *info;
2068 struct ti_sci_xfer *xfer;
2069 int ret = 0;
2070
2071 if (IS_ERR(handle))
2072 return PTR_ERR(handle);
2073 if (!handle)
2074 return -EINVAL;
2075
2076 info = handle_to_ti_sci_info(handle);
2077
2078 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_WAIT_PROC_BOOT_STATUS,
2079 TI_SCI_FLAG_REQ_GENERIC_NORESPONSE,
2080 (u32 *)&req, sizeof(req), 0);
2081 if (IS_ERR(xfer)) {
2082 ret = PTR_ERR(xfer);
Andreas Dannenbergca08cb32019-06-07 19:24:40 +05302083 return ret;
2084 }
2085 req.processor_id = proc_id;
2086 req.num_wait_iterations = num_wait_iterations;
2087 req.num_match_iterations = num_match_iterations;
2088 req.delay_per_iteration_us = delay_per_iteration_us;
2089 req.delay_before_iterations_us = delay_before_iterations_us;
2090 req.status_flags_1_set_all_wait = status_flags_1_set_all_wait;
2091 req.status_flags_1_set_any_wait = status_flags_1_set_any_wait;
2092 req.status_flags_1_clr_all_wait = status_flags_1_clr_all_wait;
2093 req.status_flags_1_clr_any_wait = status_flags_1_clr_any_wait;
2094
2095 ret = ti_sci_do_xfer(info, xfer);
2096 if (ret)
Andrew Davis771a16f2022-07-25 20:25:03 -05002097 return ret;
Andreas Dannenbergca08cb32019-06-07 19:24:40 +05302098
2099 return ret;
2100}
2101
2102/**
2103 * ti_sci_cmd_proc_shutdown_no_wait() - Command to shutdown a core without
2104 * requesting or waiting for a response. Note that this API call
2105 * should be followed by placing the respective processor into
2106 * either WFE or WFI mode.
2107 * @handle: Pointer to TI SCI handle
2108 * @proc_id: Processor ID this request is for
2109 *
2110 * Return: 0 if all went well, else returns appropriate error value.
2111 */
2112static int ti_sci_cmd_proc_shutdown_no_wait(const struct ti_sci_handle *handle,
2113 u8 proc_id)
2114{
2115 int ret;
Sean Anderson405dc242020-09-15 10:44:38 -04002116 struct ti_sci_info *info;
2117
2118 if (IS_ERR(handle))
2119 return PTR_ERR(handle);
2120 if (!handle)
2121 return -EINVAL;
2122
2123 info = handle_to_ti_sci_info(handle);
Andreas Dannenbergca08cb32019-06-07 19:24:40 +05302124
2125 /*
2126 * Send the core boot status wait message waiting for either WFE or
2127 * WFI without requesting or waiting for a TISCI response with the
2128 * maximum wait time to give us the best chance to get to the WFE/WFI
2129 * command that should follow the invocation of this API before the
2130 * DMSC-internal processing of this command times out. Note that
2131 * waiting for the R5 WFE/WFI flags will also work on an ARMV8 type
2132 * core as the related flag bit positions are the same.
2133 */
2134 ret = ti_sci_proc_wait_boot_status_no_wait(handle, proc_id,
2135 U8_MAX, 100, U8_MAX, U8_MAX,
2136 0, PROC_BOOT_STATUS_FLAG_R5_WFE | PROC_BOOT_STATUS_FLAG_R5_WFI,
2137 0, 0);
2138 if (ret) {
2139 dev_err(info->dev, "Sending core %u wait message fail %d\n",
2140 proc_id, ret);
2141 return ret;
2142 }
2143
2144 /*
2145 * Release a processor managed by TISCI without requesting or waiting
2146 * for a response.
2147 */
2148 ret = ti_sci_set_device_state_no_wait(handle, proc_id, 0,
2149 MSG_DEVICE_SW_STATE_AUTO_OFF);
2150 if (ret)
2151 dev_err(info->dev, "Sending core %u shutdown message fail %d\n",
2152 proc_id, ret);
2153
2154 return ret;
2155}
2156
2157/**
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302158 * ti_sci_cmd_ring_config() - configure RA ring
2159 * @handle: pointer to TI SCI handle
2160 * @valid_params: Bitfield defining validity of ring configuration parameters.
2161 * @nav_id: Device ID of Navigator Subsystem from which the ring is allocated
2162 * @index: Ring index.
2163 * @addr_lo: The ring base address lo 32 bits
2164 * @addr_hi: The ring base address hi 32 bits
2165 * @count: Number of ring elements.
2166 * @mode: The mode of the ring
2167 * @size: The ring element size.
2168 * @order_id: Specifies the ring's bus order ID.
2169 *
2170 * Return: 0 if all went well, else returns appropriate error value.
2171 *
2172 * See @ti_sci_msg_rm_ring_cfg_req for more info.
2173 */
2174static int ti_sci_cmd_ring_config(const struct ti_sci_handle *handle,
2175 u32 valid_params, u16 nav_id, u16 index,
2176 u32 addr_lo, u32 addr_hi, u32 count,
2177 u8 mode, u8 size, u8 order_id)
2178{
2179 struct ti_sci_msg_rm_ring_cfg_resp *resp;
2180 struct ti_sci_msg_rm_ring_cfg_req req;
2181 struct ti_sci_xfer *xfer;
2182 struct ti_sci_info *info;
2183 int ret = 0;
2184
2185 if (IS_ERR(handle))
2186 return PTR_ERR(handle);
2187 if (!handle)
2188 return -EINVAL;
2189
2190 info = handle_to_ti_sci_info(handle);
2191
2192 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_RING_CFG,
2193 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2194 (u32 *)&req, sizeof(req), sizeof(*resp));
2195 if (IS_ERR(xfer)) {
2196 ret = PTR_ERR(xfer);
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302197 return ret;
2198 }
2199 req.valid_params = valid_params;
2200 req.nav_id = nav_id;
2201 req.index = index;
2202 req.addr_lo = addr_lo;
2203 req.addr_hi = addr_hi;
2204 req.count = count;
2205 req.mode = mode;
2206 req.size = size;
2207 req.order_id = order_id;
2208
2209 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05002210 if (ret)
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302211 goto fail;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302212
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302213fail:
2214 dev_dbg(info->dev, "RM_RA:config ring %u ret:%d\n", index, ret);
2215 return ret;
2216}
2217
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302218static int ti_sci_cmd_rm_psil_pair(const struct ti_sci_handle *handle,
2219 u32 nav_id, u32 src_thread, u32 dst_thread)
2220{
2221 struct ti_sci_msg_hdr *resp;
2222 struct ti_sci_msg_psil_pair req;
2223 struct ti_sci_xfer *xfer;
2224 struct ti_sci_info *info;
2225 int ret = 0;
2226
2227 if (IS_ERR(handle))
2228 return PTR_ERR(handle);
2229 if (!handle)
2230 return -EINVAL;
2231
2232 info = handle_to_ti_sci_info(handle);
2233
2234 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_PSIL_PAIR,
2235 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2236 (u32 *)&req, sizeof(req), sizeof(*resp));
2237 if (IS_ERR(xfer)) {
2238 ret = PTR_ERR(xfer);
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302239 return ret;
2240 }
2241 req.nav_id = nav_id;
2242 req.src_thread = src_thread;
2243 req.dst_thread = dst_thread;
2244
2245 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05002246 if (ret)
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302247 goto fail;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302248
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302249fail:
2250 dev_dbg(info->dev, "RM_PSIL: nav: %u link pair %u->%u ret:%u\n",
2251 nav_id, src_thread, dst_thread, ret);
2252 return ret;
2253}
2254
2255static int ti_sci_cmd_rm_psil_unpair(const struct ti_sci_handle *handle,
2256 u32 nav_id, u32 src_thread, u32 dst_thread)
2257{
2258 struct ti_sci_msg_hdr *resp;
2259 struct ti_sci_msg_psil_unpair req;
2260 struct ti_sci_xfer *xfer;
2261 struct ti_sci_info *info;
2262 int ret = 0;
2263
2264 if (IS_ERR(handle))
2265 return PTR_ERR(handle);
2266 if (!handle)
2267 return -EINVAL;
2268
2269 info = handle_to_ti_sci_info(handle);
2270
2271 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_PSIL_UNPAIR,
2272 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2273 (u32 *)&req, sizeof(req), sizeof(*resp));
2274 if (IS_ERR(xfer)) {
2275 ret = PTR_ERR(xfer);
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302276 return ret;
2277 }
2278 req.nav_id = nav_id;
2279 req.src_thread = src_thread;
2280 req.dst_thread = dst_thread;
2281
2282 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05002283 if (ret)
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302284 goto fail;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302285
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302286fail:
2287 dev_dbg(info->dev, "RM_PSIL: link unpair %u->%u ret:%u\n",
2288 src_thread, dst_thread, ret);
2289 return ret;
2290}
2291
2292static int ti_sci_cmd_rm_udmap_tx_ch_cfg(
2293 const struct ti_sci_handle *handle,
2294 const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params)
2295{
2296 struct ti_sci_msg_rm_udmap_tx_ch_cfg_resp *resp;
2297 struct ti_sci_msg_rm_udmap_tx_ch_cfg_req req;
2298 struct ti_sci_xfer *xfer;
2299 struct ti_sci_info *info;
2300 int ret = 0;
2301
2302 if (IS_ERR(handle))
2303 return PTR_ERR(handle);
2304 if (!handle)
2305 return -EINVAL;
2306
2307 info = handle_to_ti_sci_info(handle);
2308
2309 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_TX_CH_CFG,
2310 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2311 (u32 *)&req, sizeof(req), sizeof(*resp));
2312 if (IS_ERR(xfer)) {
2313 ret = PTR_ERR(xfer);
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302314 return ret;
2315 }
2316 req.valid_params = params->valid_params;
2317 req.nav_id = params->nav_id;
2318 req.index = params->index;
2319 req.tx_pause_on_err = params->tx_pause_on_err;
2320 req.tx_filt_einfo = params->tx_filt_einfo;
2321 req.tx_filt_pswords = params->tx_filt_pswords;
2322 req.tx_atype = params->tx_atype;
2323 req.tx_chan_type = params->tx_chan_type;
2324 req.tx_supr_tdpkt = params->tx_supr_tdpkt;
2325 req.tx_fetch_size = params->tx_fetch_size;
2326 req.tx_credit_count = params->tx_credit_count;
2327 req.txcq_qnum = params->txcq_qnum;
2328 req.tx_priority = params->tx_priority;
2329 req.tx_qos = params->tx_qos;
2330 req.tx_orderid = params->tx_orderid;
2331 req.fdepth = params->fdepth;
2332 req.tx_sched_priority = params->tx_sched_priority;
Vignesh Raghavendraa8a2b8a2021-05-10 20:06:02 +05302333 req.tx_burst_size = params->tx_burst_size;
2334 req.tx_tdtype = params->tx_tdtype;
2335 req.extended_ch_type = params->extended_ch_type;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302336
2337 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05002338 if (ret)
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302339 goto fail;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302340
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302341fail:
2342 dev_dbg(info->dev, "TX_CH_CFG: chn %u ret:%u\n", params->index, ret);
2343 return ret;
2344}
2345
2346static int ti_sci_cmd_rm_udmap_rx_ch_cfg(
2347 const struct ti_sci_handle *handle,
2348 const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params)
2349{
2350 struct ti_sci_msg_rm_udmap_rx_ch_cfg_resp *resp;
2351 struct ti_sci_msg_rm_udmap_rx_ch_cfg_req req;
2352 struct ti_sci_xfer *xfer;
2353 struct ti_sci_info *info;
2354 int ret = 0;
2355
2356 if (IS_ERR(handle))
2357 return PTR_ERR(handle);
2358 if (!handle)
2359 return -EINVAL;
2360
2361 info = handle_to_ti_sci_info(handle);
2362
2363 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_RX_CH_CFG,
2364 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2365 (u32 *)&req, sizeof(req), sizeof(*resp));
2366 if (IS_ERR(xfer)) {
2367 ret = PTR_ERR(xfer);
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302368 return ret;
2369 }
2370
2371 req.valid_params = params->valid_params;
2372 req.nav_id = params->nav_id;
2373 req.index = params->index;
2374 req.rx_fetch_size = params->rx_fetch_size;
2375 req.rxcq_qnum = params->rxcq_qnum;
2376 req.rx_priority = params->rx_priority;
2377 req.rx_qos = params->rx_qos;
2378 req.rx_orderid = params->rx_orderid;
2379 req.rx_sched_priority = params->rx_sched_priority;
2380 req.flowid_start = params->flowid_start;
2381 req.flowid_cnt = params->flowid_cnt;
2382 req.rx_pause_on_err = params->rx_pause_on_err;
2383 req.rx_atype = params->rx_atype;
2384 req.rx_chan_type = params->rx_chan_type;
2385 req.rx_ignore_short = params->rx_ignore_short;
2386 req.rx_ignore_long = params->rx_ignore_long;
2387
2388 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05002389 if (ret)
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302390 goto fail;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302391
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302392fail:
2393 dev_dbg(info->dev, "RX_CH_CFG: chn %u ret:%d\n", params->index, ret);
2394 return ret;
2395}
2396
2397static int ti_sci_cmd_rm_udmap_rx_flow_cfg(
2398 const struct ti_sci_handle *handle,
2399 const struct ti_sci_msg_rm_udmap_flow_cfg *params)
2400{
2401 struct ti_sci_msg_rm_udmap_flow_cfg_resp *resp;
2402 struct ti_sci_msg_rm_udmap_flow_cfg_req req;
2403 struct ti_sci_xfer *xfer;
2404 struct ti_sci_info *info;
2405 int ret = 0;
2406
2407 if (IS_ERR(handle))
2408 return PTR_ERR(handle);
2409 if (!handle)
2410 return -EINVAL;
2411
2412 info = handle_to_ti_sci_info(handle);
2413
2414 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_FLOW_CFG,
2415 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2416 (u32 *)&req, sizeof(req), sizeof(*resp));
2417 if (IS_ERR(xfer)) {
2418 ret = PTR_ERR(xfer);
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302419 return ret;
2420 }
2421
2422 req.valid_params = params->valid_params;
2423 req.nav_id = params->nav_id;
2424 req.flow_index = params->flow_index;
2425 req.rx_einfo_present = params->rx_einfo_present;
2426 req.rx_psinfo_present = params->rx_psinfo_present;
2427 req.rx_error_handling = params->rx_error_handling;
2428 req.rx_desc_type = params->rx_desc_type;
2429 req.rx_sop_offset = params->rx_sop_offset;
2430 req.rx_dest_qnum = params->rx_dest_qnum;
2431 req.rx_src_tag_hi = params->rx_src_tag_hi;
2432 req.rx_src_tag_lo = params->rx_src_tag_lo;
2433 req.rx_dest_tag_hi = params->rx_dest_tag_hi;
2434 req.rx_dest_tag_lo = params->rx_dest_tag_lo;
2435 req.rx_src_tag_hi_sel = params->rx_src_tag_hi_sel;
2436 req.rx_src_tag_lo_sel = params->rx_src_tag_lo_sel;
2437 req.rx_dest_tag_hi_sel = params->rx_dest_tag_hi_sel;
2438 req.rx_dest_tag_lo_sel = params->rx_dest_tag_lo_sel;
2439 req.rx_fdq0_sz0_qnum = params->rx_fdq0_sz0_qnum;
2440 req.rx_fdq1_qnum = params->rx_fdq1_qnum;
2441 req.rx_fdq2_qnum = params->rx_fdq2_qnum;
2442 req.rx_fdq3_qnum = params->rx_fdq3_qnum;
2443 req.rx_ps_location = params->rx_ps_location;
2444
2445 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05002446 if (ret)
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302447 goto fail;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302448
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302449fail:
2450 dev_dbg(info->dev, "RX_FL_CFG: %u ret:%d\n", params->flow_index, ret);
Lokesh Vutlab8856af2018-08-27 15:57:37 +05302451 return ret;
2452}
2453
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002454/**
2455 * ti_sci_cmd_set_fwl_region() - Request for configuring a firewall region
2456 * @handle: pointer to TI SCI handle
2457 * @region: region configuration parameters
2458 *
2459 * Return: 0 if all went well, else returns appropriate error value.
2460 */
2461static int ti_sci_cmd_set_fwl_region(const struct ti_sci_handle *handle,
2462 const struct ti_sci_msg_fwl_region *region)
2463{
2464 struct ti_sci_msg_fwl_set_firewall_region_req req;
2465 struct ti_sci_msg_hdr *resp;
2466 struct ti_sci_info *info;
2467 struct ti_sci_xfer *xfer;
2468 int ret = 0;
2469
2470 if (IS_ERR(handle))
2471 return PTR_ERR(handle);
2472 if (!handle)
2473 return -EINVAL;
2474
2475 info = handle_to_ti_sci_info(handle);
2476
2477 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_FWL_SET,
2478 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2479 (u32 *)&req, sizeof(req), sizeof(*resp));
2480 if (IS_ERR(xfer)) {
2481 ret = PTR_ERR(xfer);
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002482 return ret;
2483 }
2484
2485 req.fwl_id = region->fwl_id;
2486 req.region = region->region;
2487 req.n_permission_regs = region->n_permission_regs;
2488 req.control = region->control;
2489 req.permissions[0] = region->permissions[0];
2490 req.permissions[1] = region->permissions[1];
2491 req.permissions[2] = region->permissions[2];
2492 req.start_address = region->start_address;
2493 req.end_address = region->end_address;
2494
2495 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05002496 if (ret)
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002497 return ret;
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002498
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002499 return 0;
2500}
2501
2502/**
2503 * ti_sci_cmd_get_fwl_region() - Request for getting a firewall region
2504 * @handle: pointer to TI SCI handle
2505 * @region: region configuration parameters
2506 *
2507 * Return: 0 if all went well, else returns appropriate error value.
2508 */
2509static int ti_sci_cmd_get_fwl_region(const struct ti_sci_handle *handle,
2510 struct ti_sci_msg_fwl_region *region)
2511{
2512 struct ti_sci_msg_fwl_get_firewall_region_req req;
2513 struct ti_sci_msg_fwl_get_firewall_region_resp *resp;
2514 struct ti_sci_info *info;
2515 struct ti_sci_xfer *xfer;
2516 int ret = 0;
2517
2518 if (IS_ERR(handle))
2519 return PTR_ERR(handle);
2520 if (!handle)
2521 return -EINVAL;
2522
2523 info = handle_to_ti_sci_info(handle);
2524
2525 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_FWL_GET,
2526 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2527 (u32 *)&req, sizeof(req), sizeof(*resp));
2528 if (IS_ERR(xfer)) {
2529 ret = PTR_ERR(xfer);
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002530 return ret;
2531 }
2532
2533 req.fwl_id = region->fwl_id;
2534 req.region = region->region;
2535 req.n_permission_regs = region->n_permission_regs;
2536
2537 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05002538 if (ret)
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002539 return ret;
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002540
2541 resp = (struct ti_sci_msg_fwl_get_firewall_region_resp *)xfer->tx_message.buf;
2542
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002543 region->fwl_id = resp->fwl_id;
2544 region->region = resp->region;
2545 region->n_permission_regs = resp->n_permission_regs;
2546 region->control = resp->control;
2547 region->permissions[0] = resp->permissions[0];
2548 region->permissions[1] = resp->permissions[1];
2549 region->permissions[2] = resp->permissions[2];
2550 region->start_address = resp->start_address;
2551 region->end_address = resp->end_address;
2552
2553 return 0;
2554}
2555
2556/**
2557 * ti_sci_cmd_change_fwl_owner() - Request for changing a firewall owner
2558 * @handle: pointer to TI SCI handle
2559 * @region: region configuration parameters
2560 *
2561 * Return: 0 if all went well, else returns appropriate error value.
2562 */
2563static int ti_sci_cmd_change_fwl_owner(const struct ti_sci_handle *handle,
2564 struct ti_sci_msg_fwl_owner *owner)
2565{
2566 struct ti_sci_msg_fwl_change_owner_info_req req;
2567 struct ti_sci_msg_fwl_change_owner_info_resp *resp;
2568 struct ti_sci_info *info;
2569 struct ti_sci_xfer *xfer;
2570 int ret = 0;
2571
2572 if (IS_ERR(handle))
2573 return PTR_ERR(handle);
2574 if (!handle)
2575 return -EINVAL;
2576
2577 info = handle_to_ti_sci_info(handle);
2578
Andrew F. Davis8928fbd2019-04-29 09:04:11 -04002579 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_FWL_CHANGE_OWNER,
2580 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002581 (u32 *)&req, sizeof(req), sizeof(*resp));
2582 if (IS_ERR(xfer)) {
2583 ret = PTR_ERR(xfer);
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002584 return ret;
2585 }
2586
2587 req.fwl_id = owner->fwl_id;
2588 req.region = owner->region;
2589 req.owner_index = owner->owner_index;
2590
2591 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05002592 if (ret)
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002593 return ret;
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002594
2595 resp = (struct ti_sci_msg_fwl_change_owner_info_resp *)xfer->tx_message.buf;
2596
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002597 owner->fwl_id = resp->fwl_id;
2598 owner->region = resp->region;
2599 owner->owner_index = resp->owner_index;
2600 owner->owner_privid = resp->owner_privid;
2601 owner->owner_permission_bits = resp->owner_permission_bits;
2602
2603 return ret;
2604}
2605
Andreas Dannenberg5299c4c2018-08-27 15:57:33 +05302606/*
2607 * ti_sci_setup_ops() - Setup the operations structures
2608 * @info: pointer to TISCI pointer
2609 */
2610static void ti_sci_setup_ops(struct ti_sci_info *info)
2611{
2612 struct ti_sci_ops *ops = &info->handle.ops;
2613 struct ti_sci_board_ops *bops = &ops->board_ops;
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +05302614 struct ti_sci_dev_ops *dops = &ops->dev_ops;
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05302615 struct ti_sci_clk_ops *cops = &ops->clk_ops;
Andreas Dannenberg5bd08372018-08-27 15:57:36 +05302616 struct ti_sci_core_ops *core_ops = &ops->core_ops;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302617 struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops;
Lokesh Vutlab8856af2018-08-27 15:57:37 +05302618 struct ti_sci_proc_ops *pops = &ops->proc_ops;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302619 struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops;
2620 struct ti_sci_rm_psil_ops *psilops = &ops->rm_psil_ops;
2621 struct ti_sci_rm_udmap_ops *udmap_ops = &ops->rm_udmap_ops;
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002622 struct ti_sci_fwl_ops *fwl_ops = &ops->fwl_ops;
Andreas Dannenberg5299c4c2018-08-27 15:57:33 +05302623
2624 bops->board_config = ti_sci_cmd_set_board_config;
2625 bops->board_config_rm = ti_sci_cmd_set_board_config_rm;
2626 bops->board_config_security = ti_sci_cmd_set_board_config_security;
2627 bops->board_config_pm = ti_sci_cmd_set_board_config_pm;
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +05302628
2629 dops->get_device = ti_sci_cmd_get_device;
Lokesh Vutlaf5613002019-06-07 19:24:39 +05302630 dops->get_device_exclusive = ti_sci_cmd_get_device_exclusive;
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +05302631 dops->idle_device = ti_sci_cmd_idle_device;
Lokesh Vutlaf5613002019-06-07 19:24:39 +05302632 dops->idle_device_exclusive = ti_sci_cmd_idle_device_exclusive;
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +05302633 dops->put_device = ti_sci_cmd_put_device;
2634 dops->is_valid = ti_sci_cmd_dev_is_valid;
2635 dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt;
2636 dops->is_idle = ti_sci_cmd_dev_is_idle;
2637 dops->is_stop = ti_sci_cmd_dev_is_stop;
2638 dops->is_on = ti_sci_cmd_dev_is_on;
2639 dops->is_transitioning = ti_sci_cmd_dev_is_trans;
2640 dops->set_device_resets = ti_sci_cmd_set_device_resets;
2641 dops->get_device_resets = ti_sci_cmd_get_device_resets;
Lokesh Vutla0d0412a2019-06-07 19:24:41 +05302642 dops->release_exclusive_devices = ti_sci_cmd_release_exclusive_devices;
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05302643
2644 cops->get_clock = ti_sci_cmd_get_clock;
2645 cops->idle_clock = ti_sci_cmd_idle_clock;
2646 cops->put_clock = ti_sci_cmd_put_clock;
2647 cops->is_auto = ti_sci_cmd_clk_is_auto;
2648 cops->is_on = ti_sci_cmd_clk_is_on;
2649 cops->is_off = ti_sci_cmd_clk_is_off;
2650
2651 cops->set_parent = ti_sci_cmd_clk_set_parent;
2652 cops->get_parent = ti_sci_cmd_clk_get_parent;
2653 cops->get_num_parents = ti_sci_cmd_clk_get_num_parents;
2654
2655 cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
2656 cops->set_freq = ti_sci_cmd_clk_set_freq;
2657 cops->get_freq = ti_sci_cmd_clk_get_freq;
Andreas Dannenberg5bd08372018-08-27 15:57:36 +05302658
2659 core_ops->reboot_device = ti_sci_cmd_core_reboot;
Lokesh Vutla032dce82019-03-08 11:47:32 +05302660 core_ops->query_msmc = ti_sci_cmd_query_msmc;
Lokesh Vutlab8856af2018-08-27 15:57:37 +05302661
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302662 rm_core_ops->get_range = ti_sci_cmd_get_resource_range;
2663 rm_core_ops->get_range_from_shost =
2664 ti_sci_cmd_get_resource_range_from_shost;
2665
Lokesh Vutlab8856af2018-08-27 15:57:37 +05302666 pops->proc_request = ti_sci_cmd_proc_request;
2667 pops->proc_release = ti_sci_cmd_proc_release;
2668 pops->proc_handover = ti_sci_cmd_proc_handover;
2669 pops->set_proc_boot_cfg = ti_sci_cmd_set_proc_boot_cfg;
2670 pops->set_proc_boot_ctrl = ti_sci_cmd_set_proc_boot_ctrl;
2671 pops->proc_auth_boot_image = ti_sci_cmd_proc_auth_boot_image;
2672 pops->get_proc_boot_status = ti_sci_cmd_get_proc_boot_status;
Andreas Dannenbergca08cb32019-06-07 19:24:40 +05302673 pops->proc_shutdown_no_wait = ti_sci_cmd_proc_shutdown_no_wait;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302674
2675 rops->config = ti_sci_cmd_ring_config;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302676
2677 psilops->pair = ti_sci_cmd_rm_psil_pair;
2678 psilops->unpair = ti_sci_cmd_rm_psil_unpair;
2679
2680 udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg;
2681 udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg;
2682 udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg;
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002683
2684 fwl_ops->set_fwl_region = ti_sci_cmd_set_fwl_region;
2685 fwl_ops->get_fwl_region = ti_sci_cmd_get_fwl_region;
2686 fwl_ops->change_fwl_owner = ti_sci_cmd_change_fwl_owner;
Andreas Dannenberg5299c4c2018-08-27 15:57:33 +05302687}
2688
2689/**
Lokesh Vutla5af02db2018-08-27 15:57:32 +05302690 * ti_sci_get_handle_from_sysfw() - Get the TI SCI handle of the SYSFW
2691 * @dev: Pointer to the SYSFW device
2692 *
2693 * Return: pointer to handle if successful, else EINVAL if invalid conditions
2694 * are encountered.
2695 */
2696const
2697struct ti_sci_handle *ti_sci_get_handle_from_sysfw(struct udevice *sci_dev)
2698{
Neha Malcom Francisc7dedd02023-09-27 18:39:53 +05302699 int ret;
2700
Lokesh Vutla5af02db2018-08-27 15:57:32 +05302701 if (!sci_dev)
2702 return ERR_PTR(-EINVAL);
2703
2704 struct ti_sci_info *info = dev_get_priv(sci_dev);
2705
2706 if (!info)
2707 return ERR_PTR(-EINVAL);
2708
2709 struct ti_sci_handle *handle = &info->handle;
2710
2711 if (!handle)
2712 return ERR_PTR(-EINVAL);
2713
Neha Malcom Francisc7dedd02023-09-27 18:39:53 +05302714 ret = ti_sci_cmd_get_revision(handle);
2715
2716 if (ret)
2717 return ERR_PTR(-EINVAL);
2718
Lokesh Vutla5af02db2018-08-27 15:57:32 +05302719 return handle;
2720}
2721
2722/**
2723 * ti_sci_get_handle() - Get the TI SCI handle for a device
2724 * @dev: Pointer to device for which we want SCI handle
2725 *
2726 * Return: pointer to handle if successful, else EINVAL if invalid conditions
2727 * are encountered.
2728 */
2729const struct ti_sci_handle *ti_sci_get_handle(struct udevice *dev)
2730{
2731 if (!dev)
2732 return ERR_PTR(-EINVAL);
2733
2734 struct udevice *sci_dev = dev_get_parent(dev);
2735
2736 return ti_sci_get_handle_from_sysfw(sci_dev);
2737}
2738
2739/**
2740 * ti_sci_get_by_phandle() - Get the TI SCI handle using DT phandle
2741 * @dev: device node
2742 * @propname: property name containing phandle on TISCI node
2743 *
2744 * Return: pointer to handle if successful, else appropriate error value.
2745 */
2746const struct ti_sci_handle *ti_sci_get_by_phandle(struct udevice *dev,
2747 const char *property)
2748{
2749 struct ti_sci_info *entry, *info = NULL;
2750 u32 phandle, err;
2751 ofnode node;
2752
2753 err = ofnode_read_u32(dev_ofnode(dev), property, &phandle);
2754 if (err)
2755 return ERR_PTR(err);
2756
2757 node = ofnode_get_by_phandle(phandle);
2758 if (!ofnode_valid(node))
2759 return ERR_PTR(-EINVAL);
2760
2761 list_for_each_entry(entry, &ti_sci_list, list)
2762 if (ofnode_equal(dev_ofnode(entry->dev), node)) {
2763 info = entry;
2764 break;
2765 }
2766
2767 if (!info)
2768 return ERR_PTR(-ENODEV);
2769
2770 return &info->handle;
2771}
2772
2773/**
2774 * ti_sci_of_to_info() - generate private data from device tree
2775 * @dev: corresponding system controller interface device
2776 * @info: pointer to driver specific private data
2777 *
2778 * Return: 0 if all goes good, else appropriate error message.
2779 */
2780static int ti_sci_of_to_info(struct udevice *dev, struct ti_sci_info *info)
2781{
2782 int ret;
2783
2784 ret = mbox_get_by_name(dev, "tx", &info->chan_tx);
2785 if (ret) {
2786 dev_err(dev, "%s: Acquiring Tx channel failed. ret = %d\n",
2787 __func__, ret);
2788 return ret;
2789 }
2790
2791 ret = mbox_get_by_name(dev, "rx", &info->chan_rx);
2792 if (ret) {
2793 dev_err(dev, "%s: Acquiring Rx channel failed. ret = %d\n",
2794 __func__, ret);
2795 return ret;
2796 }
2797
2798 /* Notify channel is optional. Enable only if populated */
2799 ret = mbox_get_by_name(dev, "notify", &info->chan_notify);
2800 if (ret) {
2801 dev_dbg(dev, "%s: Acquiring notify channel failed. ret = %d\n",
2802 __func__, ret);
2803 }
2804
2805 info->host_id = dev_read_u32_default(dev, "ti,host-id",
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302806 info->desc->default_host_id);
Lokesh Vutla5af02db2018-08-27 15:57:32 +05302807
2808 info->is_secure = dev_read_bool(dev, "ti,secure-host");
2809
2810 return 0;
2811}
2812
2813/**
2814 * ti_sci_probe() - Basic probe
2815 * @dev: corresponding system controller interface device
2816 *
2817 * Return: 0 if all goes good, else appropriate error message.
2818 */
2819static int ti_sci_probe(struct udevice *dev)
2820{
2821 struct ti_sci_info *info;
2822 int ret;
2823
2824 debug("%s(dev=%p)\n", __func__, dev);
2825
2826 info = dev_get_priv(dev);
2827 info->desc = (void *)dev_get_driver_data(dev);
2828
2829 ret = ti_sci_of_to_info(dev, info);
2830 if (ret) {
2831 dev_err(dev, "%s: Probe failed with error %d\n", __func__, ret);
2832 return ret;
2833 }
2834
2835 info->dev = dev;
2836 info->seq = 0xA;
2837
2838 list_add_tail(&info->list, &ti_sci_list);
Andreas Dannenberg5299c4c2018-08-27 15:57:33 +05302839 ti_sci_setup_ops(info);
Lokesh Vutla5af02db2018-08-27 15:57:32 +05302840
Lokesh Vutla0d0412a2019-06-07 19:24:41 +05302841 INIT_LIST_HEAD(&info->dev_list);
2842
Andrew Davis1ed20d62024-04-02 11:09:07 -05002843 if (IS_ENABLED(CONFIG_SYSRESET_TI_SCI)) {
2844 ret = device_bind_driver(dev, "ti-sci-sysreset", "sysreset", NULL);
2845 if (ret)
2846 dev_warn(dev, "cannot bind SYSRESET (ret = %d)\n", ret);
2847 }
2848
Neha Malcom Francisc7dedd02023-09-27 18:39:53 +05302849 return 0;
Lokesh Vutla5af02db2018-08-27 15:57:32 +05302850}
2851
Vignesh Raghavendraaa0e3fc2021-06-07 19:47:49 +05302852/**
2853 * ti_sci_dm_probe() - Basic probe for DM to TIFS SCI
2854 * @dev: corresponding system controller interface device
2855 *
2856 * Return: 0 if all goes good, else appropriate error message.
2857 */
2858static __maybe_unused int ti_sci_dm_probe(struct udevice *dev)
2859{
2860 struct ti_sci_rm_core_ops *rm_core_ops;
2861 struct ti_sci_rm_udmap_ops *udmap_ops;
2862 struct ti_sci_rm_ringacc_ops *rops;
2863 struct ti_sci_rm_psil_ops *psilops;
2864 struct ti_sci_ops *ops;
2865 struct ti_sci_info *info;
2866 int ret;
2867
2868 debug("%s(dev=%p)\n", __func__, dev);
2869
2870 info = dev_get_priv(dev);
2871 info->desc = (void *)dev_get_driver_data(dev);
2872
2873 ret = ti_sci_of_to_info(dev, info);
2874 if (ret) {
2875 dev_err(dev, "%s: Probe failed with error %d\n", __func__, ret);
2876 return ret;
2877 }
2878
2879 info->dev = dev;
2880 info->seq = 0xA;
2881
2882 list_add_tail(&info->list, &ti_sci_list);
2883
2884 ops = &info->handle.ops;
2885
2886 rm_core_ops = &ops->rm_core_ops;
2887 rm_core_ops->get_range = ti_sci_cmd_get_resource_range_static;
2888
2889 rops = &ops->rm_ring_ops;
2890 rops->config = ti_sci_cmd_ring_config;
2891
2892 psilops = &ops->rm_psil_ops;
2893 psilops->pair = ti_sci_cmd_rm_psil_pair;
2894 psilops->unpair = ti_sci_cmd_rm_psil_unpair;
2895
2896 udmap_ops = &ops->rm_udmap_ops;
2897 udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg;
2898 udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg;
2899 udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg;
2900
2901 return ret;
2902}
2903
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302904/*
2905 * ti_sci_get_free_resource() - Get a free resource from TISCI resource.
2906 * @res: Pointer to the TISCI resource
2907 *
2908 * Return: resource num if all went ok else TI_SCI_RESOURCE_NULL.
2909 */
2910u16 ti_sci_get_free_resource(struct ti_sci_resource *res)
2911{
2912 u16 set, free_bit;
2913
2914 for (set = 0; set < res->sets; set++) {
2915 free_bit = find_first_zero_bit(res->desc[set].res_map,
2916 res->desc[set].num);
2917 if (free_bit != res->desc[set].num) {
2918 set_bit(free_bit, res->desc[set].res_map);
2919 return res->desc[set].start + free_bit;
2920 }
2921 }
2922
2923 return TI_SCI_RESOURCE_NULL;
2924}
2925
2926/**
2927 * ti_sci_release_resource() - Release a resource from TISCI resource.
2928 * @res: Pointer to the TISCI resource
2929 */
2930void ti_sci_release_resource(struct ti_sci_resource *res, u16 id)
2931{
2932 u16 set;
2933
2934 for (set = 0; set < res->sets; set++) {
2935 if (res->desc[set].start <= id &&
2936 (res->desc[set].num + res->desc[set].start) > id)
2937 clear_bit(id - res->desc[set].start,
2938 res->desc[set].res_map);
2939 }
2940}
2941
2942/**
2943 * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device
2944 * @handle: TISCI handle
2945 * @dev: Device pointer to which the resource is assigned
2946 * @of_prop: property name by which the resource are represented
2947 *
2948 * Note: This function expects of_prop to be in the form of tuples
2949 * <type, subtype>. Allocates and initializes ti_sci_resource structure
2950 * for each of_prop. Client driver can directly call
2951 * ti_sci_(get_free, release)_resource apis for handling the resource.
2952 *
2953 * Return: Pointer to ti_sci_resource if all went well else appropriate
2954 * error pointer.
2955 */
2956struct ti_sci_resource *
2957devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
2958 struct udevice *dev, u32 dev_id, char *of_prop)
2959{
2960 u32 resource_subtype;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302961 struct ti_sci_resource *res;
Vignesh Raghavendrae1164dd2019-08-05 12:26:44 -05002962 bool valid_set = false;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302963 int sets, i, ret;
2964 u32 *temp;
2965
2966 res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
2967 if (!res)
2968 return ERR_PTR(-ENOMEM);
2969
2970 sets = dev_read_size(dev, of_prop);
2971 if (sets < 0) {
2972 dev_err(dev, "%s resource type ids not available\n", of_prop);
2973 return ERR_PTR(sets);
2974 }
2975 temp = malloc(sets);
2976 sets /= sizeof(u32);
2977 res->sets = sets;
2978
2979 res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc),
2980 GFP_KERNEL);
2981 if (!res->desc)
2982 return ERR_PTR(-ENOMEM);
2983
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302984 ret = dev_read_u32_array(dev, of_prop, temp, res->sets);
2985 if (ret)
2986 return ERR_PTR(-EINVAL);
2987
2988 for (i = 0; i < res->sets; i++) {
2989 resource_subtype = temp[i];
2990 ret = handle->ops.rm_core_ops.get_range(handle, dev_id,
2991 resource_subtype,
2992 &res->desc[i].start,
2993 &res->desc[i].num);
2994 if (ret) {
Vignesh Raghavendrae1164dd2019-08-05 12:26:44 -05002995 dev_dbg(dev, "type %d subtype %d not allocated for host %d\n",
Lokesh Vutla0acf1dc2020-08-17 11:00:48 +05302996 dev_id, resource_subtype,
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302997 handle_to_ti_sci_info(handle)->host_id);
Vignesh Raghavendrae1164dd2019-08-05 12:26:44 -05002998 res->desc[i].start = 0;
2999 res->desc[i].num = 0;
3000 continue;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05303001 }
3002
Vignesh Raghavendrae1164dd2019-08-05 12:26:44 -05003003 valid_set = true;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05303004 dev_dbg(dev, "res type = %d, subtype = %d, start = %d, num = %d\n",
Lokesh Vutla0acf1dc2020-08-17 11:00:48 +05303005 dev_id, resource_subtype, res->desc[i].start,
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05303006 res->desc[i].num);
3007
3008 res->desc[i].res_map =
3009 devm_kzalloc(dev, BITS_TO_LONGS(res->desc[i].num) *
3010 sizeof(*res->desc[i].res_map), GFP_KERNEL);
3011 if (!res->desc[i].res_map)
3012 return ERR_PTR(-ENOMEM);
3013 }
3014
Vignesh Raghavendrae1164dd2019-08-05 12:26:44 -05003015 if (valid_set)
3016 return res;
3017
3018 return ERR_PTR(-EINVAL);
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05303019}
3020
3021/* Description for K2G */
3022static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
3023 .default_host_id = 2,
3024 /* Conservative duration */
3025 .max_rx_timeout_ms = 10000,
3026 /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
3027 .max_msgs = 20,
3028 .max_msg_size = 64,
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05303029};
3030
Lokesh Vutla5af02db2018-08-27 15:57:32 +05303031/* Description for AM654 */
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05303032static const struct ti_sci_desc ti_sci_pmmc_am654_desc = {
3033 .default_host_id = 12,
3034 /* Conservative duration */
3035 .max_rx_timeout_ms = 10000,
3036 /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
3037 .max_msgs = 20,
Lokesh Vutla5af02db2018-08-27 15:57:32 +05303038 .max_msg_size = 60,
3039};
3040
Vignesh Raghavendraaa0e3fc2021-06-07 19:47:49 +05303041/* Description for J721e DM to DMSC communication */
3042static const struct ti_sci_desc ti_sci_dm_j721e_desc = {
3043 .default_host_id = 3,
3044 .max_rx_timeout_ms = 10000,
3045 .max_msgs = 20,
3046 .max_msg_size = 60,
3047};
3048
Lokesh Vutla5af02db2018-08-27 15:57:32 +05303049static const struct udevice_id ti_sci_ids[] = {
3050 {
3051 .compatible = "ti,k2g-sci",
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05303052 .data = (ulong)&ti_sci_pmmc_k2g_desc
3053 },
3054 {
3055 .compatible = "ti,am654-sci",
3056 .data = (ulong)&ti_sci_pmmc_am654_desc
Lokesh Vutla5af02db2018-08-27 15:57:32 +05303057 },
3058 { /* Sentinel */ },
3059};
3060
Vignesh Raghavendraaa0e3fc2021-06-07 19:47:49 +05303061static __maybe_unused const struct udevice_id ti_sci_dm_ids[] = {
3062 {
3063 .compatible = "ti,j721e-dm-sci",
3064 .data = (ulong)&ti_sci_dm_j721e_desc
3065 },
3066 { /* Sentinel */ },
3067};
3068
Lokesh Vutla5af02db2018-08-27 15:57:32 +05303069U_BOOT_DRIVER(ti_sci) = {
3070 .name = "ti_sci",
3071 .id = UCLASS_FIRMWARE,
3072 .of_match = ti_sci_ids,
3073 .probe = ti_sci_probe,
Simon Glass8a2b47f2020-12-03 16:55:17 -07003074 .priv_auto = sizeof(struct ti_sci_info),
Lokesh Vutla5af02db2018-08-27 15:57:32 +05303075};
Vignesh Raghavendraaa0e3fc2021-06-07 19:47:49 +05303076
3077#if IS_ENABLED(CONFIG_K3_DM_FW)
3078U_BOOT_DRIVER(ti_sci_dm) = {
3079 .name = "ti_sci_dm",
3080 .id = UCLASS_FIRMWARE,
3081 .of_match = ti_sci_dm_ids,
3082 .probe = ti_sci_dm_probe,
3083 .priv_auto = sizeof(struct ti_sci_info),
3084};
3085#endif