blob: 166bd78ca501e1112244ad4a8ea55a507138f263 [file] [log] [blame]
Lokesh Vutla5af02db2018-08-27 15:57:32 +05301// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Texas Instruments System Control Interface Protocol Driver
4 * Based on drivers/firmware/ti_sci.c from Linux.
5 *
6 * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
7 * Lokesh Vutla <lokeshvutla@ti.com>
8 */
9
10#include <common.h>
11#include <dm.h>
12#include <errno.h>
Simon Glass0f2af882020-05-10 11:40:05 -060013#include <log.h>
Lokesh Vutla5af02db2018-08-27 15:57:32 +053014#include <mailbox.h>
Simon Glass9bc15642020-02-03 07:36:16 -070015#include <malloc.h>
Lokesh Vutla5af02db2018-08-27 15:57:32 +053016#include <dm/device.h>
Simon Glass9bc15642020-02-03 07:36:16 -070017#include <dm/device_compat.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070018#include <dm/devres.h>
Simon Glass4dcacfc2020-05-10 11:40:13 -060019#include <linux/bitops.h>
Grygorii Strashkod64c5b22019-02-05 17:31:21 +053020#include <linux/compat.h>
Lokesh Vutla5af02db2018-08-27 15:57:32 +053021#include <linux/err.h>
22#include <linux/soc/ti/k3-sec-proxy.h>
23#include <linux/soc/ti/ti_sci_protocol.h>
24
25#include "ti_sci.h"
Vignesh Raghavendra4214a812021-06-07 19:47:48 +053026#include "ti_sci_static_data.h"
Lokesh Vutla5af02db2018-08-27 15:57:32 +053027
28/* List of all TI SCI devices active in system */
29static LIST_HEAD(ti_sci_list);
30
31/**
32 * struct ti_sci_xfer - Structure representing a message flow
33 * @tx_message: Transmit message
34 * @rx_len: Receive message length
35 */
36struct ti_sci_xfer {
37 struct k3_sec_proxy_msg tx_message;
38 u8 rx_len;
39};
40
41/**
Grygorii Strashkod64c5b22019-02-05 17:31:21 +053042 * struct ti_sci_rm_type_map - Structure representing TISCI Resource
43 * management representation of dev_ids.
44 * @dev_id: TISCI device ID
45 * @type: Corresponding id as identified by TISCI RM.
46 *
47 * Note: This is used only as a work around for using RM range apis
48 * for AM654 SoC. For future SoCs dev_id will be used as type
49 * for RM range APIs. In order to maintain ABI backward compatibility
50 * type is not being changed for AM654 SoC.
51 */
52struct ti_sci_rm_type_map {
53 u32 dev_id;
54 u16 type;
55};
56
57/**
Lokesh Vutla5af02db2018-08-27 15:57:32 +053058 * struct ti_sci_desc - Description of SoC integration
Grygorii Strashkod64c5b22019-02-05 17:31:21 +053059 * @default_host_id: Host identifier representing the compute entity
60 * @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
61 * @max_msgs: Maximum number of messages that can be pending
62 * simultaneously in the system
63 * @max_msg_size: Maximum size of data per message that can be handled.
Lokesh Vutla5af02db2018-08-27 15:57:32 +053064 */
65struct ti_sci_desc {
Grygorii Strashkod64c5b22019-02-05 17:31:21 +053066 u8 default_host_id;
67 int max_rx_timeout_ms;
68 int max_msgs;
Lokesh Vutla5af02db2018-08-27 15:57:32 +053069 int max_msg_size;
70};
71
72/**
73 * struct ti_sci_info - Structure representing a TI SCI instance
74 * @dev: Device pointer
75 * @desc: SoC description for this instance
76 * @handle: Instance of TI SCI handle to send to clients.
77 * @chan_tx: Transmit mailbox channel
78 * @chan_rx: Receive mailbox channel
79 * @xfer: xfer info
80 * @list: list head
81 * @is_secure: Determines if the communication is through secure threads.
82 * @host_id: Host identifier representing the compute entity
83 * @seq: Seq id used for verification for tx and rx message.
84 */
85struct ti_sci_info {
86 struct udevice *dev;
87 const struct ti_sci_desc *desc;
88 struct ti_sci_handle handle;
89 struct mbox_chan chan_tx;
90 struct mbox_chan chan_rx;
91 struct mbox_chan chan_notify;
92 struct ti_sci_xfer xfer;
93 struct list_head list;
Lokesh Vutla0d0412a2019-06-07 19:24:41 +053094 struct list_head dev_list;
Lokesh Vutla5af02db2018-08-27 15:57:32 +053095 bool is_secure;
96 u8 host_id;
97 u8 seq;
98};
99
Lokesh Vutla0d0412a2019-06-07 19:24:41 +0530100struct ti_sci_exclusive_dev {
101 u32 id;
102 u32 count;
103 struct list_head list;
104};
105
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530106#define handle_to_ti_sci_info(h) container_of(h, struct ti_sci_info, handle)
107
108/**
109 * ti_sci_setup_one_xfer() - Setup one message type
110 * @info: Pointer to SCI entity information
111 * @msg_type: Message type
112 * @msg_flags: Flag to set for the message
113 * @buf: Buffer to be send to mailbox channel
114 * @tx_message_size: transmit message size
Andreas Dannenbergca08cb32019-06-07 19:24:40 +0530115 * @rx_message_size: receive message size. may be set to zero for send-only
116 * transactions.
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530117 *
118 * Helper function which is used by various command functions that are
119 * exposed to clients of this driver for allocating a message traffic event.
120 *
121 * Return: Corresponding ti_sci_xfer pointer if all went fine,
122 * else appropriate error pointer.
123 */
124static struct ti_sci_xfer *ti_sci_setup_one_xfer(struct ti_sci_info *info,
125 u16 msg_type, u32 msg_flags,
126 u32 *buf,
127 size_t tx_message_size,
128 size_t rx_message_size)
129{
130 struct ti_sci_xfer *xfer = &info->xfer;
131 struct ti_sci_msg_hdr *hdr;
132
133 /* Ensure we have sane transfer sizes */
134 if (rx_message_size > info->desc->max_msg_size ||
135 tx_message_size > info->desc->max_msg_size ||
Andreas Dannenbergca08cb32019-06-07 19:24:40 +0530136 (rx_message_size > 0 && rx_message_size < sizeof(*hdr)) ||
Andrew Davis22563722022-07-25 20:25:04 -0500137 tx_message_size < sizeof(*hdr)) {
138 dev_err(info->dev, "TI-SCI message transfer size not sane\n");
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530139 return ERR_PTR(-ERANGE);
Andrew Davis22563722022-07-25 20:25:04 -0500140 }
141
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530142
143 info->seq = ~info->seq;
144 xfer->tx_message.buf = buf;
145 xfer->tx_message.len = tx_message_size;
146 xfer->rx_len = (u8)rx_message_size;
147
148 hdr = (struct ti_sci_msg_hdr *)buf;
149 hdr->seq = info->seq;
150 hdr->type = msg_type;
151 hdr->host = info->host_id;
152 hdr->flags = msg_flags;
153
154 return xfer;
155}
156
157/**
158 * ti_sci_get_response() - Receive response from mailbox channel
159 * @info: Pointer to SCI entity information
160 * @xfer: Transfer to initiate and wait for response
161 * @chan: Channel to receive the response
162 *
163 * Return: -ETIMEDOUT in case of no response, if transmit error,
164 * return corresponding error, else if all goes well,
165 * return 0.
166 */
Andrew Davisb3e71b72022-07-25 20:25:05 -0500167static int ti_sci_get_response(struct ti_sci_info *info,
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530168 struct ti_sci_xfer *xfer,
169 struct mbox_chan *chan)
170{
171 struct k3_sec_proxy_msg *msg = &xfer->tx_message;
172 struct ti_sci_secure_msg_hdr *secure_hdr;
173 struct ti_sci_msg_hdr *hdr;
174 int ret;
175
176 /* Receive the response */
Andreas Dannenberg607d4ca2019-04-24 14:20:08 -0500177 ret = mbox_recv(chan, msg, info->desc->max_rx_timeout_ms * 1000);
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530178 if (ret) {
179 dev_err(info->dev, "%s: Message receive failed. ret = %d\n",
180 __func__, ret);
181 return ret;
182 }
183
184 /* ToDo: Verify checksum */
185 if (info->is_secure) {
186 secure_hdr = (struct ti_sci_secure_msg_hdr *)msg->buf;
187 msg->buf = (u32 *)((void *)msg->buf + sizeof(*secure_hdr));
188 }
189
190 /* msg is updated by mailbox driver */
191 hdr = (struct ti_sci_msg_hdr *)msg->buf;
192
193 /* Sanity check for message response */
194 if (hdr->seq != info->seq) {
195 dev_dbg(info->dev, "%s: Message for %d is not expected\n",
196 __func__, hdr->seq);
197 return ret;
198 }
199
200 if (msg->len > info->desc->max_msg_size) {
201 dev_err(info->dev, "%s: Unable to handle %zu xfer (max %d)\n",
202 __func__, msg->len, info->desc->max_msg_size);
203 return -EINVAL;
204 }
205
206 if (msg->len < xfer->rx_len) {
207 dev_err(info->dev, "%s: Recv xfer %zu < expected %d length\n",
208 __func__, msg->len, xfer->rx_len);
209 }
210
211 return ret;
212}
213
214/**
Andrew Davis04e43932022-07-25 20:25:06 -0500215 * ti_sci_is_response_ack() - Generic ACK/NACK message checkup
216 * @r: pointer to response buffer
217 *
218 * Return: true if the response was an ACK, else returns false.
219 */
220static bool ti_sci_is_response_ack(void *r)
221{
222 struct ti_sci_msg_hdr *hdr = r;
223
224 return hdr->flags & TI_SCI_FLAG_RESP_GENERIC_ACK ? true : false;
225}
226
227/**
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530228 * ti_sci_do_xfer() - Do one transfer
229 * @info: Pointer to SCI entity information
230 * @xfer: Transfer to initiate and wait for response
231 *
232 * Return: 0 if all went fine, else return appropriate error.
233 */
Andrew Davisb3e71b72022-07-25 20:25:05 -0500234static int ti_sci_do_xfer(struct ti_sci_info *info,
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530235 struct ti_sci_xfer *xfer)
236{
237 struct k3_sec_proxy_msg *msg = &xfer->tx_message;
238 u8 secure_buf[info->desc->max_msg_size];
239 struct ti_sci_secure_msg_hdr secure_hdr;
240 int ret;
241
242 if (info->is_secure) {
243 /* ToDo: get checksum of the entire message */
244 secure_hdr.checksum = 0;
245 secure_hdr.reserved = 0;
246 memcpy(&secure_buf[sizeof(secure_hdr)], xfer->tx_message.buf,
247 xfer->tx_message.len);
248
249 xfer->tx_message.buf = (u32 *)secure_buf;
250 xfer->tx_message.len += sizeof(secure_hdr);
Andreas Dannenbergca08cb32019-06-07 19:24:40 +0530251
252 if (xfer->rx_len)
253 xfer->rx_len += sizeof(secure_hdr);
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530254 }
255
256 /* Send the message */
257 ret = mbox_send(&info->chan_tx, msg);
258 if (ret) {
259 dev_err(info->dev, "%s: Message sending failed. ret = %d\n",
260 __func__, ret);
261 return ret;
262 }
263
Andreas Dannenbergca08cb32019-06-07 19:24:40 +0530264 /* Get response if requested */
Andrew Davis04e43932022-07-25 20:25:06 -0500265 if (xfer->rx_len) {
Andreas Dannenbergca08cb32019-06-07 19:24:40 +0530266 ret = ti_sci_get_response(info, xfer, &info->chan_rx);
Andrew Davis04e43932022-07-25 20:25:06 -0500267 if (!ti_sci_is_response_ack(xfer->tx_message.buf)) {
Andreas Dannenberg831b73f2023-05-09 16:38:13 -0500268 dev_err(info->dev, "Message not acknowledged\n");
Andrew Davis04e43932022-07-25 20:25:06 -0500269 ret = -ENODEV;
270 }
271 }
Andreas Dannenbergca08cb32019-06-07 19:24:40 +0530272
273 return ret;
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530274}
275
276/**
277 * ti_sci_cmd_get_revision() - command to get the revision of the SCI entity
278 * @handle: pointer to TI SCI handle
279 *
280 * Updates the SCI information in the internal data structure.
281 *
282 * Return: 0 if all went fine, else return appropriate error.
283 */
284static int ti_sci_cmd_get_revision(struct ti_sci_handle *handle)
285{
286 struct ti_sci_msg_resp_version *rev_info;
287 struct ti_sci_version_info *ver;
288 struct ti_sci_msg_hdr hdr;
289 struct ti_sci_info *info;
290 struct ti_sci_xfer *xfer;
291 int ret;
292
293 if (IS_ERR(handle))
294 return PTR_ERR(handle);
295 if (!handle)
296 return -EINVAL;
297
298 info = handle_to_ti_sci_info(handle);
299
Andrew F. Davis8928fbd2019-04-29 09:04:11 -0400300 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_VERSION,
301 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530302 (u32 *)&hdr, sizeof(struct ti_sci_msg_hdr),
303 sizeof(*rev_info));
304 if (IS_ERR(xfer)) {
305 ret = PTR_ERR(xfer);
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530306 return ret;
307 }
308
309 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -0500310 if (ret)
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530311 return ret;
Lokesh Vutla5af02db2018-08-27 15:57:32 +0530312
313 rev_info = (struct ti_sci_msg_resp_version *)xfer->tx_message.buf;
314
315 ver = &handle->version;
316 ver->abi_major = rev_info->abi_major;
317 ver->abi_minor = rev_info->abi_minor;
318 ver->firmware_revision = rev_info->firmware_revision;
319 strncpy(ver->firmware_description, rev_info->firmware_description,
320 sizeof(ver->firmware_description));
321
322 return 0;
323}
324
325/**
Andreas Dannenberg5299c4c2018-08-27 15:57:33 +0530326 * cmd_set_board_config_using_msg() - Common command to send board configuration
327 * message
328 * @handle: pointer to TI SCI handle
329 * @msg_type: One of the TISCI message types to set board configuration
330 * @addr: Address where the board config structure is located
331 * @size: Size of the board config structure
332 *
333 * Return: 0 if all went well, else returns appropriate error value.
334 */
335static int cmd_set_board_config_using_msg(const struct ti_sci_handle *handle,
336 u16 msg_type, u64 addr, u32 size)
337{
338 struct ti_sci_msg_board_config req;
339 struct ti_sci_msg_hdr *resp;
340 struct ti_sci_info *info;
341 struct ti_sci_xfer *xfer;
342 int ret = 0;
343
344 if (IS_ERR(handle))
345 return PTR_ERR(handle);
346 if (!handle)
347 return -EINVAL;
348
349 info = handle_to_ti_sci_info(handle);
350
351 xfer = ti_sci_setup_one_xfer(info, msg_type,
352 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
353 (u32 *)&req, sizeof(req), sizeof(*resp));
354 if (IS_ERR(xfer)) {
355 ret = PTR_ERR(xfer);
Andreas Dannenberg5299c4c2018-08-27 15:57:33 +0530356 return ret;
357 }
358 req.boardcfgp_high = (addr >> 32) & 0xffffffff;
359 req.boardcfgp_low = addr & 0xffffffff;
360 req.boardcfg_size = size;
361
362 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -0500363 if (ret)
Andreas Dannenberg5299c4c2018-08-27 15:57:33 +0530364 return ret;
Andreas Dannenberg5299c4c2018-08-27 15:57:33 +0530365
Andreas Dannenberg5299c4c2018-08-27 15:57:33 +0530366 return ret;
367}
368
369/**
370 * ti_sci_cmd_set_board_config() - Command to send board configuration message
371 * @handle: pointer to TI SCI handle
372 * @addr: Address where the board config structure is located
373 * @size: Size of the board config structure
374 *
375 * Return: 0 if all went well, else returns appropriate error value.
376 */
377static int ti_sci_cmd_set_board_config(const struct ti_sci_handle *handle,
378 u64 addr, u32 size)
379{
380 return cmd_set_board_config_using_msg(handle,
381 TI_SCI_MSG_BOARD_CONFIG,
382 addr, size);
383}
384
385/**
386 * ti_sci_cmd_set_board_config_rm() - Command to send board resource
387 * management configuration
388 * @handle: pointer to TI SCI handle
389 * @addr: Address where the board RM config structure is located
390 * @size: Size of the RM config structure
391 *
392 * Return: 0 if all went well, else returns appropriate error value.
393 */
394static
395int ti_sci_cmd_set_board_config_rm(const struct ti_sci_handle *handle,
396 u64 addr, u32 size)
397{
398 return cmd_set_board_config_using_msg(handle,
399 TI_SCI_MSG_BOARD_CONFIG_RM,
400 addr, size);
401}
402
403/**
404 * ti_sci_cmd_set_board_config_security() - Command to send board security
405 * configuration message
406 * @handle: pointer to TI SCI handle
407 * @addr: Address where the board security config structure is located
408 * @size: Size of the security config structure
409 *
410 * Return: 0 if all went well, else returns appropriate error value.
411 */
412static
413int ti_sci_cmd_set_board_config_security(const struct ti_sci_handle *handle,
414 u64 addr, u32 size)
415{
416 return cmd_set_board_config_using_msg(handle,
417 TI_SCI_MSG_BOARD_CONFIG_SECURITY,
418 addr, size);
419}
420
421/**
422 * ti_sci_cmd_set_board_config_pm() - Command to send board power and clock
423 * configuration message
424 * @handle: pointer to TI SCI handle
425 * @addr: Address where the board PM config structure is located
426 * @size: Size of the PM config structure
427 *
428 * Return: 0 if all went well, else returns appropriate error value.
429 */
430static int ti_sci_cmd_set_board_config_pm(const struct ti_sci_handle *handle,
431 u64 addr, u32 size)
432{
433 return cmd_set_board_config_using_msg(handle,
434 TI_SCI_MSG_BOARD_CONFIG_PM,
435 addr, size);
436}
437
Lokesh Vutla0d0412a2019-06-07 19:24:41 +0530438static struct ti_sci_exclusive_dev
439*ti_sci_get_exclusive_dev(struct list_head *dev_list, u32 id)
440{
441 struct ti_sci_exclusive_dev *dev;
442
443 list_for_each_entry(dev, dev_list, list)
444 if (dev->id == id)
445 return dev;
446
447 return NULL;
448}
449
450static void ti_sci_add_exclusive_dev(struct ti_sci_info *info, u32 id)
451{
452 struct ti_sci_exclusive_dev *dev;
453
454 dev = ti_sci_get_exclusive_dev(&info->dev_list, id);
455 if (dev) {
456 dev->count++;
457 return;
458 }
459
460 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
461 dev->id = id;
462 dev->count = 1;
463 INIT_LIST_HEAD(&dev->list);
464 list_add_tail(&dev->list, &info->dev_list);
465}
466
467static void ti_sci_delete_exclusive_dev(struct ti_sci_info *info, u32 id)
468{
469 struct ti_sci_exclusive_dev *dev;
470
471 dev = ti_sci_get_exclusive_dev(&info->dev_list, id);
472 if (!dev)
473 return;
474
475 if (dev->count > 0)
476 dev->count--;
477}
478
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530479/**
480 * ti_sci_set_device_state() - Set device state helper
481 * @handle: pointer to TI SCI handle
482 * @id: Device identifier
483 * @flags: flags to setup for the device
484 * @state: State to move the device to
485 *
486 * Return: 0 if all went well, else returns appropriate error value.
487 */
488static int ti_sci_set_device_state(const struct ti_sci_handle *handle,
489 u32 id, u32 flags, u8 state)
490{
491 struct ti_sci_msg_req_set_device_state req;
492 struct ti_sci_msg_hdr *resp;
493 struct ti_sci_info *info;
494 struct ti_sci_xfer *xfer;
495 int ret = 0;
496
497 if (IS_ERR(handle))
498 return PTR_ERR(handle);
499 if (!handle)
500 return -EINVAL;
501
502 info = handle_to_ti_sci_info(handle);
503
504 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
505 flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
506 (u32 *)&req, sizeof(req), sizeof(*resp));
507 if (IS_ERR(xfer)) {
508 ret = PTR_ERR(xfer);
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530509 return ret;
510 }
511 req.id = id;
512 req.state = state;
513
514 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -0500515 if (ret)
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530516 return ret;
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530517
Lokesh Vutla0d0412a2019-06-07 19:24:41 +0530518 if (state == MSG_DEVICE_SW_STATE_AUTO_OFF)
519 ti_sci_delete_exclusive_dev(info, id);
520 else if (flags & MSG_FLAG_DEVICE_EXCLUSIVE)
521 ti_sci_add_exclusive_dev(info, id);
522
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530523 return ret;
524}
525
526/**
Andreas Dannenbergca08cb32019-06-07 19:24:40 +0530527 * ti_sci_set_device_state_no_wait() - Set device state helper without
528 * requesting or waiting for a response.
529 * @handle: pointer to TI SCI handle
530 * @id: Device identifier
531 * @flags: flags to setup for the device
532 * @state: State to move the device to
533 *
534 * Return: 0 if all went well, else returns appropriate error value.
535 */
536static int ti_sci_set_device_state_no_wait(const struct ti_sci_handle *handle,
537 u32 id, u32 flags, u8 state)
538{
539 struct ti_sci_msg_req_set_device_state req;
540 struct ti_sci_info *info;
541 struct ti_sci_xfer *xfer;
542 int ret = 0;
543
544 if (IS_ERR(handle))
545 return PTR_ERR(handle);
546 if (!handle)
547 return -EINVAL;
548
549 info = handle_to_ti_sci_info(handle);
550
551 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_DEVICE_STATE,
552 flags | TI_SCI_FLAG_REQ_GENERIC_NORESPONSE,
553 (u32 *)&req, sizeof(req), 0);
554 if (IS_ERR(xfer)) {
555 ret = PTR_ERR(xfer);
Andreas Dannenbergca08cb32019-06-07 19:24:40 +0530556 return ret;
557 }
558 req.id = id;
559 req.state = state;
560
561 ret = ti_sci_do_xfer(info, xfer);
562 if (ret)
Andrew Davis771a16f2022-07-25 20:25:03 -0500563 return ret;
Andreas Dannenbergca08cb32019-06-07 19:24:40 +0530564
565 return ret;
566}
567
568/**
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530569 * ti_sci_get_device_state() - Get device state helper
570 * @handle: Handle to the device
571 * @id: Device Identifier
572 * @clcnt: Pointer to Context Loss Count
573 * @resets: pointer to resets
574 * @p_state: pointer to p_state
575 * @c_state: pointer to c_state
576 *
577 * Return: 0 if all went fine, else return appropriate error.
578 */
579static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
580 u32 id, u32 *clcnt, u32 *resets,
581 u8 *p_state, u8 *c_state)
582{
583 struct ti_sci_msg_resp_get_device_state *resp;
584 struct ti_sci_msg_req_get_device_state req;
585 struct ti_sci_info *info;
586 struct ti_sci_xfer *xfer;
587 int ret = 0;
588
589 if (IS_ERR(handle))
590 return PTR_ERR(handle);
591 if (!handle)
592 return -EINVAL;
593
594 if (!clcnt && !resets && !p_state && !c_state)
595 return -EINVAL;
596
597 info = handle_to_ti_sci_info(handle);
598
Andrew F. Davis8928fbd2019-04-29 09:04:11 -0400599 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
600 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530601 (u32 *)&req, sizeof(req), sizeof(*resp));
602 if (IS_ERR(xfer)) {
603 ret = PTR_ERR(xfer);
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530604 return ret;
605 }
606 req.id = id;
607
608 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -0500609 if (ret)
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530610 return ret;
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530611
612 resp = (struct ti_sci_msg_resp_get_device_state *)xfer->tx_message.buf;
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530613
614 if (clcnt)
615 *clcnt = resp->context_loss_count;
616 if (resets)
617 *resets = resp->resets;
618 if (p_state)
619 *p_state = resp->programmed_state;
620 if (c_state)
621 *c_state = resp->current_state;
622
623 return ret;
624}
625
626/**
627 * ti_sci_cmd_get_device() - command to request for device managed by TISCI
628 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
629 * @id: Device Identifier
630 *
631 * Request for the device - NOTE: the client MUST maintain integrity of
632 * usage count by balancing get_device with put_device. No refcounting is
633 * managed by driver for that purpose.
634 *
635 * NOTE: The request is for exclusive access for the processor.
636 *
637 * Return: 0 if all went fine, else return appropriate error.
638 */
639static int ti_sci_cmd_get_device(const struct ti_sci_handle *handle, u32 id)
640{
Lokesh Vutlaf5613002019-06-07 19:24:39 +0530641 return ti_sci_set_device_state(handle, id, 0,
642 MSG_DEVICE_SW_STATE_ON);
643}
644
645static int ti_sci_cmd_get_device_exclusive(const struct ti_sci_handle *handle,
646 u32 id)
647{
648 return ti_sci_set_device_state(handle, id, MSG_FLAG_DEVICE_EXCLUSIVE,
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530649 MSG_DEVICE_SW_STATE_ON);
650}
651
652/**
653 * ti_sci_cmd_idle_device() - Command to idle a device managed by TISCI
654 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
655 * @id: Device Identifier
656 *
657 * Request for the device - NOTE: the client MUST maintain integrity of
658 * usage count by balancing get_device with put_device. No refcounting is
659 * managed by driver for that purpose.
660 *
661 * Return: 0 if all went fine, else return appropriate error.
662 */
663static int ti_sci_cmd_idle_device(const struct ti_sci_handle *handle, u32 id)
664{
665 return ti_sci_set_device_state(handle, id,
Lokesh Vutlaf5613002019-06-07 19:24:39 +0530666 0,
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530667 MSG_DEVICE_SW_STATE_RETENTION);
668}
669
Lokesh Vutlaf5613002019-06-07 19:24:39 +0530670static int ti_sci_cmd_idle_device_exclusive(const struct ti_sci_handle *handle,
671 u32 id)
672{
673 return ti_sci_set_device_state(handle, id, MSG_FLAG_DEVICE_EXCLUSIVE,
674 MSG_DEVICE_SW_STATE_RETENTION);
675}
676
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530677/**
678 * ti_sci_cmd_put_device() - command to release a device managed by TISCI
679 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
680 * @id: Device Identifier
681 *
682 * Request for the device - NOTE: the client MUST maintain integrity of
683 * usage count by balancing get_device with put_device. No refcounting is
684 * managed by driver for that purpose.
685 *
686 * Return: 0 if all went fine, else return appropriate error.
687 */
688static int ti_sci_cmd_put_device(const struct ti_sci_handle *handle, u32 id)
689{
Lokesh Vutlaf5613002019-06-07 19:24:39 +0530690 return ti_sci_set_device_state(handle, id, 0,
691 MSG_DEVICE_SW_STATE_AUTO_OFF);
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530692}
693
Lokesh Vutla0d0412a2019-06-07 19:24:41 +0530694static
695int ti_sci_cmd_release_exclusive_devices(const struct ti_sci_handle *handle)
696{
697 struct ti_sci_exclusive_dev *dev, *tmp;
698 struct ti_sci_info *info;
699 int i, cnt;
700
701 info = handle_to_ti_sci_info(handle);
702
703 list_for_each_entry_safe(dev, tmp, &info->dev_list, list) {
704 cnt = dev->count;
705 debug("%s: id = %d, cnt = %d\n", __func__, dev->id, cnt);
706 for (i = 0; i < cnt; i++)
707 ti_sci_cmd_put_device(handle, dev->id);
708 }
709
710 return 0;
711}
712
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530713/**
714 * ti_sci_cmd_dev_is_valid() - Is the device valid
715 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
716 * @id: Device Identifier
717 *
718 * Return: 0 if all went fine and the device ID is valid, else return
719 * appropriate error.
720 */
721static int ti_sci_cmd_dev_is_valid(const struct ti_sci_handle *handle, u32 id)
722{
723 u8 unused;
724
725 /* check the device state which will also tell us if the ID is valid */
726 return ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &unused);
727}
728
729/**
730 * ti_sci_cmd_dev_get_clcnt() - Get context loss counter
731 * @handle: Pointer to TISCI handle
732 * @id: Device Identifier
733 * @count: Pointer to Context Loss counter to populate
734 *
735 * Return: 0 if all went fine, else return appropriate error.
736 */
737static int ti_sci_cmd_dev_get_clcnt(const struct ti_sci_handle *handle, u32 id,
738 u32 *count)
739{
740 return ti_sci_get_device_state(handle, id, count, NULL, NULL, NULL);
741}
742
743/**
744 * ti_sci_cmd_dev_is_idle() - Check if the device is requested to be idle
745 * @handle: Pointer to TISCI handle
746 * @id: Device Identifier
747 * @r_state: true if requested to be idle
748 *
749 * Return: 0 if all went fine, else return appropriate error.
750 */
751static int ti_sci_cmd_dev_is_idle(const struct ti_sci_handle *handle, u32 id,
752 bool *r_state)
753{
754 int ret;
755 u8 state;
756
757 if (!r_state)
758 return -EINVAL;
759
760 ret = ti_sci_get_device_state(handle, id, NULL, NULL, &state, NULL);
761 if (ret)
762 return ret;
763
764 *r_state = (state == MSG_DEVICE_SW_STATE_RETENTION);
765
766 return 0;
767}
768
769/**
770 * ti_sci_cmd_dev_is_stop() - Check if the device is requested to be stopped
771 * @handle: Pointer to TISCI handle
772 * @id: Device Identifier
773 * @r_state: true if requested to be stopped
774 * @curr_state: true if currently stopped.
775 *
776 * Return: 0 if all went fine, else return appropriate error.
777 */
778static int ti_sci_cmd_dev_is_stop(const struct ti_sci_handle *handle, u32 id,
779 bool *r_state, bool *curr_state)
780{
781 int ret;
782 u8 p_state, c_state;
783
784 if (!r_state && !curr_state)
785 return -EINVAL;
786
787 ret =
788 ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
789 if (ret)
790 return ret;
791
792 if (r_state)
793 *r_state = (p_state == MSG_DEVICE_SW_STATE_AUTO_OFF);
794 if (curr_state)
795 *curr_state = (c_state == MSG_DEVICE_HW_STATE_OFF);
796
797 return 0;
798}
799
800/**
801 * ti_sci_cmd_dev_is_on() - Check if the device is requested to be ON
802 * @handle: Pointer to TISCI handle
803 * @id: Device Identifier
804 * @r_state: true if requested to be ON
805 * @curr_state: true if currently ON and active
806 *
807 * Return: 0 if all went fine, else return appropriate error.
808 */
809static int ti_sci_cmd_dev_is_on(const struct ti_sci_handle *handle, u32 id,
810 bool *r_state, bool *curr_state)
811{
812 int ret;
813 u8 p_state, c_state;
814
815 if (!r_state && !curr_state)
816 return -EINVAL;
817
818 ret =
819 ti_sci_get_device_state(handle, id, NULL, NULL, &p_state, &c_state);
820 if (ret)
821 return ret;
822
823 if (r_state)
824 *r_state = (p_state == MSG_DEVICE_SW_STATE_ON);
825 if (curr_state)
826 *curr_state = (c_state == MSG_DEVICE_HW_STATE_ON);
827
828 return 0;
829}
830
831/**
832 * ti_sci_cmd_dev_is_trans() - Check if the device is currently transitioning
833 * @handle: Pointer to TISCI handle
834 * @id: Device Identifier
835 * @curr_state: true if currently transitioning.
836 *
837 * Return: 0 if all went fine, else return appropriate error.
838 */
839static int ti_sci_cmd_dev_is_trans(const struct ti_sci_handle *handle, u32 id,
840 bool *curr_state)
841{
842 int ret;
843 u8 state;
844
845 if (!curr_state)
846 return -EINVAL;
847
848 ret = ti_sci_get_device_state(handle, id, NULL, NULL, NULL, &state);
849 if (ret)
850 return ret;
851
852 *curr_state = (state == MSG_DEVICE_HW_STATE_TRANS);
853
854 return 0;
855}
856
857/**
858 * ti_sci_cmd_set_device_resets() - command to set resets for device managed
859 * by TISCI
860 * @handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle
861 * @id: Device Identifier
862 * @reset_state: Device specific reset bit field
863 *
864 * Return: 0 if all went fine, else return appropriate error.
865 */
866static int ti_sci_cmd_set_device_resets(const struct ti_sci_handle *handle,
867 u32 id, u32 reset_state)
868{
869 struct ti_sci_msg_req_set_device_resets req;
870 struct ti_sci_msg_hdr *resp;
871 struct ti_sci_info *info;
872 struct ti_sci_xfer *xfer;
873 int ret = 0;
874
875 if (IS_ERR(handle))
876 return PTR_ERR(handle);
877 if (!handle)
878 return -EINVAL;
879
880 info = handle_to_ti_sci_info(handle);
881
882 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_DEVICE_RESETS,
883 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
884 (u32 *)&req, sizeof(req), sizeof(*resp));
885 if (IS_ERR(xfer)) {
886 ret = PTR_ERR(xfer);
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530887 return ret;
888 }
889 req.id = id;
890 req.resets = reset_state;
891
892 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -0500893 if (ret)
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530894 return ret;
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530895
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +0530896 return ret;
897}
898
899/**
900 * ti_sci_cmd_get_device_resets() - Get reset state for device managed
901 * by TISCI
902 * @handle: Pointer to TISCI handle
903 * @id: Device Identifier
904 * @reset_state: Pointer to reset state to populate
905 *
906 * Return: 0 if all went fine, else return appropriate error.
907 */
908static int ti_sci_cmd_get_device_resets(const struct ti_sci_handle *handle,
909 u32 id, u32 *reset_state)
910{
911 return ti_sci_get_device_state(handle, id, NULL, reset_state, NULL,
912 NULL);
913}
914
Lokesh Vutlad10c80c2018-08-27 15:57:35 +0530915/**
916 * ti_sci_set_clock_state() - Set clock state helper
917 * @handle: pointer to TI SCI handle
918 * @dev_id: Device identifier this request is for
919 * @clk_id: Clock identifier for the device for this request.
920 * Each device has it's own set of clock inputs. This indexes
921 * which clock input to modify.
922 * @flags: Header flags as needed
923 * @state: State to request for the clock.
924 *
925 * Return: 0 if all went well, else returns appropriate error value.
926 */
927static int ti_sci_set_clock_state(const struct ti_sci_handle *handle,
928 u32 dev_id, u8 clk_id,
929 u32 flags, u8 state)
930{
931 struct ti_sci_msg_req_set_clock_state req;
932 struct ti_sci_msg_hdr *resp;
933 struct ti_sci_info *info;
934 struct ti_sci_xfer *xfer;
935 int ret = 0;
936
937 if (IS_ERR(handle))
938 return PTR_ERR(handle);
939 if (!handle)
940 return -EINVAL;
941
942 info = handle_to_ti_sci_info(handle);
943
944 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_CLOCK_STATE,
945 flags | TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
946 (u32 *)&req, sizeof(req), sizeof(*resp));
947 if (IS_ERR(xfer)) {
948 ret = PTR_ERR(xfer);
Lokesh Vutlad10c80c2018-08-27 15:57:35 +0530949 return ret;
950 }
951 req.dev_id = dev_id;
952 req.clk_id = clk_id;
953 req.request_state = state;
954
955 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -0500956 if (ret)
Lokesh Vutlad10c80c2018-08-27 15:57:35 +0530957 return ret;
Lokesh Vutlad10c80c2018-08-27 15:57:35 +0530958
Lokesh Vutlad10c80c2018-08-27 15:57:35 +0530959 return ret;
960}
961
962/**
963 * ti_sci_cmd_get_clock_state() - Get clock state helper
964 * @handle: pointer to TI SCI handle
965 * @dev_id: Device identifier this request is for
966 * @clk_id: Clock identifier for the device for this request.
967 * Each device has it's own set of clock inputs. This indexes
968 * which clock input to modify.
969 * @programmed_state: State requested for clock to move to
970 * @current_state: State that the clock is currently in
971 *
972 * Return: 0 if all went well, else returns appropriate error value.
973 */
974static int ti_sci_cmd_get_clock_state(const struct ti_sci_handle *handle,
975 u32 dev_id, u8 clk_id,
976 u8 *programmed_state, u8 *current_state)
977{
978 struct ti_sci_msg_resp_get_clock_state *resp;
979 struct ti_sci_msg_req_get_clock_state req;
980 struct ti_sci_info *info;
981 struct ti_sci_xfer *xfer;
982 int ret = 0;
983
984 if (IS_ERR(handle))
985 return PTR_ERR(handle);
986 if (!handle)
987 return -EINVAL;
988
989 if (!programmed_state && !current_state)
990 return -EINVAL;
991
992 info = handle_to_ti_sci_info(handle);
993
994 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_CLOCK_STATE,
995 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
996 (u32 *)&req, sizeof(req), sizeof(*resp));
997 if (IS_ERR(xfer)) {
998 ret = PTR_ERR(xfer);
Lokesh Vutlad10c80c2018-08-27 15:57:35 +0530999 return ret;
1000 }
1001 req.dev_id = dev_id;
1002 req.clk_id = clk_id;
1003
1004 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001005 if (ret)
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301006 return ret;
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301007
1008 resp = (struct ti_sci_msg_resp_get_clock_state *)xfer->tx_message.buf;
1009
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301010 if (programmed_state)
1011 *programmed_state = resp->programmed_state;
1012 if (current_state)
1013 *current_state = resp->current_state;
1014
1015 return ret;
1016}
1017
1018/**
1019 * ti_sci_cmd_get_clock() - Get control of a clock from TI SCI
1020 * @handle: pointer to TI SCI handle
1021 * @dev_id: Device identifier this request is for
1022 * @clk_id: Clock identifier for the device for this request.
1023 * Each device has it's own set of clock inputs. This indexes
1024 * which clock input to modify.
1025 * @needs_ssc: 'true' if Spread Spectrum clock is desired, else 'false'
1026 * @can_change_freq: 'true' if frequency change is desired, else 'false'
1027 * @enable_input_term: 'true' if input termination is desired, else 'false'
1028 *
1029 * Return: 0 if all went well, else returns appropriate error value.
1030 */
1031static int ti_sci_cmd_get_clock(const struct ti_sci_handle *handle, u32 dev_id,
1032 u8 clk_id, bool needs_ssc, bool can_change_freq,
1033 bool enable_input_term)
1034{
1035 u32 flags = 0;
1036
1037 flags |= needs_ssc ? MSG_FLAG_CLOCK_ALLOW_SSC : 0;
1038 flags |= can_change_freq ? MSG_FLAG_CLOCK_ALLOW_FREQ_CHANGE : 0;
1039 flags |= enable_input_term ? MSG_FLAG_CLOCK_INPUT_TERM : 0;
1040
1041 return ti_sci_set_clock_state(handle, dev_id, clk_id, flags,
1042 MSG_CLOCK_SW_STATE_REQ);
1043}
1044
1045/**
1046 * ti_sci_cmd_idle_clock() - Idle a clock which is in our control
1047 * @handle: pointer to TI SCI handle
1048 * @dev_id: Device identifier this request is for
1049 * @clk_id: Clock identifier for the device for this request.
1050 * Each device has it's own set of clock inputs. This indexes
1051 * which clock input to modify.
1052 *
1053 * NOTE: This clock must have been requested by get_clock previously.
1054 *
1055 * Return: 0 if all went well, else returns appropriate error value.
1056 */
1057static int ti_sci_cmd_idle_clock(const struct ti_sci_handle *handle,
1058 u32 dev_id, u8 clk_id)
1059{
1060 return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
1061 MSG_CLOCK_SW_STATE_UNREQ);
1062}
1063
1064/**
1065 * ti_sci_cmd_put_clock() - Release a clock from our control back to TISCI
1066 * @handle: pointer to TI SCI handle
1067 * @dev_id: Device identifier this request is for
1068 * @clk_id: Clock identifier for the device for this request.
1069 * Each device has it's own set of clock inputs. This indexes
1070 * which clock input to modify.
1071 *
1072 * NOTE: This clock must have been requested by get_clock previously.
1073 *
1074 * Return: 0 if all went well, else returns appropriate error value.
1075 */
1076static int ti_sci_cmd_put_clock(const struct ti_sci_handle *handle,
1077 u32 dev_id, u8 clk_id)
1078{
1079 return ti_sci_set_clock_state(handle, dev_id, clk_id, 0,
1080 MSG_CLOCK_SW_STATE_AUTO);
1081}
1082
1083/**
1084 * ti_sci_cmd_clk_is_auto() - Is the clock being auto managed
1085 * @handle: pointer to TI SCI handle
1086 * @dev_id: Device identifier this request is for
1087 * @clk_id: Clock identifier for the device for this request.
1088 * Each device has it's own set of clock inputs. This indexes
1089 * which clock input to modify.
1090 * @req_state: state indicating if the clock is auto managed
1091 *
1092 * Return: 0 if all went well, else returns appropriate error value.
1093 */
1094static int ti_sci_cmd_clk_is_auto(const struct ti_sci_handle *handle,
1095 u32 dev_id, u8 clk_id, bool *req_state)
1096{
1097 u8 state = 0;
1098 int ret;
1099
1100 if (!req_state)
1101 return -EINVAL;
1102
1103 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id, &state, NULL);
1104 if (ret)
1105 return ret;
1106
1107 *req_state = (state == MSG_CLOCK_SW_STATE_AUTO);
1108 return 0;
1109}
1110
1111/**
1112 * ti_sci_cmd_clk_is_on() - Is the clock ON
1113 * @handle: pointer to TI SCI handle
1114 * @dev_id: Device identifier this request is for
1115 * @clk_id: Clock identifier for the device for this request.
1116 * Each device has it's own set of clock inputs. This indexes
1117 * which clock input to modify.
1118 * @req_state: state indicating if the clock is managed by us and enabled
1119 * @curr_state: state indicating if the clock is ready for operation
1120 *
1121 * Return: 0 if all went well, else returns appropriate error value.
1122 */
1123static int ti_sci_cmd_clk_is_on(const struct ti_sci_handle *handle, u32 dev_id,
1124 u8 clk_id, bool *req_state, bool *curr_state)
1125{
1126 u8 c_state = 0, r_state = 0;
1127 int ret;
1128
1129 if (!req_state && !curr_state)
1130 return -EINVAL;
1131
1132 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1133 &r_state, &c_state);
1134 if (ret)
1135 return ret;
1136
1137 if (req_state)
1138 *req_state = (r_state == MSG_CLOCK_SW_STATE_REQ);
1139 if (curr_state)
1140 *curr_state = (c_state == MSG_CLOCK_HW_STATE_READY);
1141 return 0;
1142}
1143
1144/**
1145 * ti_sci_cmd_clk_is_off() - Is the clock OFF
1146 * @handle: pointer to TI SCI handle
1147 * @dev_id: Device identifier this request is for
1148 * @clk_id: Clock identifier for the device for this request.
1149 * Each device has it's own set of clock inputs. This indexes
1150 * which clock input to modify.
1151 * @req_state: state indicating if the clock is managed by us and disabled
1152 * @curr_state: state indicating if the clock is NOT ready for operation
1153 *
1154 * Return: 0 if all went well, else returns appropriate error value.
1155 */
1156static int ti_sci_cmd_clk_is_off(const struct ti_sci_handle *handle, u32 dev_id,
1157 u8 clk_id, bool *req_state, bool *curr_state)
1158{
1159 u8 c_state = 0, r_state = 0;
1160 int ret;
1161
1162 if (!req_state && !curr_state)
1163 return -EINVAL;
1164
1165 ret = ti_sci_cmd_get_clock_state(handle, dev_id, clk_id,
1166 &r_state, &c_state);
1167 if (ret)
1168 return ret;
1169
1170 if (req_state)
1171 *req_state = (r_state == MSG_CLOCK_SW_STATE_UNREQ);
1172 if (curr_state)
1173 *curr_state = (c_state == MSG_CLOCK_HW_STATE_NOT_READY);
1174 return 0;
1175}
1176
1177/**
1178 * ti_sci_cmd_clk_set_parent() - Set the clock source of a specific device clock
1179 * @handle: pointer to TI SCI handle
1180 * @dev_id: Device identifier this request is for
1181 * @clk_id: Clock identifier for the device for this request.
1182 * Each device has it's own set of clock inputs. This indexes
1183 * which clock input to modify.
1184 * @parent_id: Parent clock identifier to set
1185 *
1186 * Return: 0 if all went well, else returns appropriate error value.
1187 */
1188static int ti_sci_cmd_clk_set_parent(const struct ti_sci_handle *handle,
1189 u32 dev_id, u8 clk_id, u8 parent_id)
1190{
1191 struct ti_sci_msg_req_set_clock_parent req;
1192 struct ti_sci_msg_hdr *resp;
1193 struct ti_sci_info *info;
1194 struct ti_sci_xfer *xfer;
1195 int ret = 0;
1196
1197 if (IS_ERR(handle))
1198 return PTR_ERR(handle);
1199 if (!handle)
1200 return -EINVAL;
1201
1202 info = handle_to_ti_sci_info(handle);
1203
1204 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_CLOCK_PARENT,
1205 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1206 (u32 *)&req, sizeof(req), sizeof(*resp));
1207 if (IS_ERR(xfer)) {
1208 ret = PTR_ERR(xfer);
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301209 return ret;
1210 }
1211 req.dev_id = dev_id;
1212 req.clk_id = clk_id;
1213 req.parent_id = parent_id;
1214
1215 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001216 if (ret)
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301217 return ret;
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301218
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301219 return ret;
1220}
1221
1222/**
1223 * ti_sci_cmd_clk_get_parent() - Get current parent clock source
1224 * @handle: pointer to TI SCI handle
1225 * @dev_id: Device identifier this request is for
1226 * @clk_id: Clock identifier for the device for this request.
1227 * Each device has it's own set of clock inputs. This indexes
1228 * which clock input to modify.
1229 * @parent_id: Current clock parent
1230 *
1231 * Return: 0 if all went well, else returns appropriate error value.
1232 */
1233static int ti_sci_cmd_clk_get_parent(const struct ti_sci_handle *handle,
1234 u32 dev_id, u8 clk_id, u8 *parent_id)
1235{
1236 struct ti_sci_msg_resp_get_clock_parent *resp;
1237 struct ti_sci_msg_req_get_clock_parent req;
1238 struct ti_sci_info *info;
1239 struct ti_sci_xfer *xfer;
1240 int ret = 0;
1241
1242 if (IS_ERR(handle))
1243 return PTR_ERR(handle);
1244 if (!handle || !parent_id)
1245 return -EINVAL;
1246
1247 info = handle_to_ti_sci_info(handle);
1248
1249 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_CLOCK_PARENT,
1250 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1251 (u32 *)&req, sizeof(req), sizeof(*resp));
1252 if (IS_ERR(xfer)) {
1253 ret = PTR_ERR(xfer);
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301254 return ret;
1255 }
1256 req.dev_id = dev_id;
1257 req.clk_id = clk_id;
1258
1259 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001260 if (ret)
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301261 return ret;
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301262
Andrew Davis04e43932022-07-25 20:25:06 -05001263 *parent_id = resp->parent_id;
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301264
1265 return ret;
1266}
1267
1268/**
1269 * ti_sci_cmd_clk_get_num_parents() - Get num parents of the current clk source
1270 * @handle: pointer to TI SCI handle
1271 * @dev_id: Device identifier this request is for
1272 * @clk_id: Clock identifier for the device for this request.
1273 * Each device has it's own set of clock inputs. This indexes
1274 * which clock input to modify.
1275 * @num_parents: Returns he number of parents to the current clock.
1276 *
1277 * Return: 0 if all went well, else returns appropriate error value.
1278 */
1279static int ti_sci_cmd_clk_get_num_parents(const struct ti_sci_handle *handle,
1280 u32 dev_id, u8 clk_id,
1281 u8 *num_parents)
1282{
1283 struct ti_sci_msg_resp_get_clock_num_parents *resp;
1284 struct ti_sci_msg_req_get_clock_num_parents req;
1285 struct ti_sci_info *info;
1286 struct ti_sci_xfer *xfer;
1287 int ret = 0;
1288
1289 if (IS_ERR(handle))
1290 return PTR_ERR(handle);
1291 if (!handle || !num_parents)
1292 return -EINVAL;
1293
1294 info = handle_to_ti_sci_info(handle);
1295
1296 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_NUM_CLOCK_PARENTS,
1297 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1298 (u32 *)&req, sizeof(req), sizeof(*resp));
1299 if (IS_ERR(xfer)) {
1300 ret = PTR_ERR(xfer);
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301301 return ret;
1302 }
1303 req.dev_id = dev_id;
1304 req.clk_id = clk_id;
1305
1306 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001307 if (ret)
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301308 return ret;
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301309
1310 resp = (struct ti_sci_msg_resp_get_clock_num_parents *)
1311 xfer->tx_message.buf;
1312
Andrew Davis04e43932022-07-25 20:25:06 -05001313 *num_parents = resp->num_parents;
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301314
1315 return ret;
1316}
1317
1318/**
1319 * ti_sci_cmd_clk_get_match_freq() - Find a good match for frequency
1320 * @handle: pointer to TI SCI handle
1321 * @dev_id: Device identifier this request is for
1322 * @clk_id: Clock identifier for the device for this request.
1323 * Each device has it's own set of clock inputs. This indexes
1324 * which clock input to modify.
1325 * @min_freq: The minimum allowable frequency in Hz. This is the minimum
1326 * allowable programmed frequency and does not account for clock
1327 * tolerances and jitter.
1328 * @target_freq: The target clock frequency in Hz. A frequency will be
1329 * processed as close to this target frequency as possible.
1330 * @max_freq: The maximum allowable frequency in Hz. This is the maximum
1331 * allowable programmed frequency and does not account for clock
1332 * tolerances and jitter.
1333 * @match_freq: Frequency match in Hz response.
1334 *
1335 * Return: 0 if all went well, else returns appropriate error value.
1336 */
1337static int ti_sci_cmd_clk_get_match_freq(const struct ti_sci_handle *handle,
1338 u32 dev_id, u8 clk_id, u64 min_freq,
1339 u64 target_freq, u64 max_freq,
1340 u64 *match_freq)
1341{
1342 struct ti_sci_msg_resp_query_clock_freq *resp;
1343 struct ti_sci_msg_req_query_clock_freq req;
1344 struct ti_sci_info *info;
1345 struct ti_sci_xfer *xfer;
1346 int ret = 0;
1347
1348 if (IS_ERR(handle))
1349 return PTR_ERR(handle);
1350 if (!handle || !match_freq)
1351 return -EINVAL;
1352
1353 info = handle_to_ti_sci_info(handle);
1354
1355 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_QUERY_CLOCK_FREQ,
1356 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1357 (u32 *)&req, sizeof(req), sizeof(*resp));
1358 if (IS_ERR(xfer)) {
1359 ret = PTR_ERR(xfer);
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301360 return ret;
1361 }
1362 req.dev_id = dev_id;
1363 req.clk_id = clk_id;
1364 req.min_freq_hz = min_freq;
1365 req.target_freq_hz = target_freq;
1366 req.max_freq_hz = max_freq;
1367
1368 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001369 if (ret)
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301370 return ret;
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301371
1372 resp = (struct ti_sci_msg_resp_query_clock_freq *)xfer->tx_message.buf;
1373
Andrew Davis04e43932022-07-25 20:25:06 -05001374 *match_freq = resp->freq_hz;
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301375
1376 return ret;
1377}
1378
1379/**
1380 * ti_sci_cmd_clk_set_freq() - Set a frequency for clock
1381 * @handle: pointer to TI SCI handle
1382 * @dev_id: Device identifier this request is for
1383 * @clk_id: Clock identifier for the device for this request.
1384 * Each device has it's own set of clock inputs. This indexes
1385 * which clock input to modify.
1386 * @min_freq: The minimum allowable frequency in Hz. This is the minimum
1387 * allowable programmed frequency and does not account for clock
1388 * tolerances and jitter.
1389 * @target_freq: The target clock frequency in Hz. A frequency will be
1390 * processed as close to this target frequency as possible.
1391 * @max_freq: The maximum allowable frequency in Hz. This is the maximum
1392 * allowable programmed frequency and does not account for clock
1393 * tolerances and jitter.
1394 *
1395 * Return: 0 if all went well, else returns appropriate error value.
1396 */
1397static int ti_sci_cmd_clk_set_freq(const struct ti_sci_handle *handle,
1398 u32 dev_id, u8 clk_id, u64 min_freq,
1399 u64 target_freq, u64 max_freq)
1400{
1401 struct ti_sci_msg_req_set_clock_freq req;
1402 struct ti_sci_msg_hdr *resp;
1403 struct ti_sci_info *info;
1404 struct ti_sci_xfer *xfer;
1405 int ret = 0;
1406
1407 if (IS_ERR(handle))
1408 return PTR_ERR(handle);
1409 if (!handle)
1410 return -EINVAL;
1411
1412 info = handle_to_ti_sci_info(handle);
1413
1414 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SET_CLOCK_FREQ,
1415 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1416 (u32 *)&req, sizeof(req), sizeof(*resp));
1417 if (IS_ERR(xfer)) {
1418 ret = PTR_ERR(xfer);
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301419 return ret;
1420 }
1421 req.dev_id = dev_id;
1422 req.clk_id = clk_id;
1423 req.min_freq_hz = min_freq;
1424 req.target_freq_hz = target_freq;
1425 req.max_freq_hz = max_freq;
1426
1427 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001428 if (ret)
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301429 return ret;
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301430
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301431 return ret;
1432}
1433
1434/**
1435 * ti_sci_cmd_clk_get_freq() - Get current frequency
1436 * @handle: pointer to TI SCI handle
1437 * @dev_id: Device identifier this request is for
1438 * @clk_id: Clock identifier for the device for this request.
1439 * Each device has it's own set of clock inputs. This indexes
1440 * which clock input to modify.
1441 * @freq: Currently frequency in Hz
1442 *
1443 * Return: 0 if all went well, else returns appropriate error value.
1444 */
1445static int ti_sci_cmd_clk_get_freq(const struct ti_sci_handle *handle,
1446 u32 dev_id, u8 clk_id, u64 *freq)
1447{
1448 struct ti_sci_msg_resp_get_clock_freq *resp;
1449 struct ti_sci_msg_req_get_clock_freq req;
1450 struct ti_sci_info *info;
1451 struct ti_sci_xfer *xfer;
1452 int ret = 0;
1453
1454 if (IS_ERR(handle))
1455 return PTR_ERR(handle);
1456 if (!handle || !freq)
1457 return -EINVAL;
1458
1459 info = handle_to_ti_sci_info(handle);
1460
1461 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_CLOCK_FREQ,
1462 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1463 (u32 *)&req, sizeof(req), sizeof(*resp));
1464 if (IS_ERR(xfer)) {
1465 ret = PTR_ERR(xfer);
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301466 return ret;
1467 }
1468 req.dev_id = dev_id;
1469 req.clk_id = clk_id;
1470
1471 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001472 if (ret)
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301473 return ret;
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301474
1475 resp = (struct ti_sci_msg_resp_get_clock_freq *)xfer->tx_message.buf;
1476
Andrew Davis04e43932022-07-25 20:25:06 -05001477 *freq = resp->freq_hz;
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05301478
1479 return ret;
1480}
1481
Andreas Dannenberg5bd08372018-08-27 15:57:36 +05301482/**
1483 * ti_sci_cmd_core_reboot() - Command to request system reset
1484 * @handle: pointer to TI SCI handle
1485 *
1486 * Return: 0 if all went well, else returns appropriate error value.
1487 */
1488static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
1489{
1490 struct ti_sci_msg_req_reboot req;
1491 struct ti_sci_msg_hdr *resp;
1492 struct ti_sci_info *info;
1493 struct ti_sci_xfer *xfer;
1494 int ret = 0;
1495
1496 if (IS_ERR(handle))
1497 return PTR_ERR(handle);
1498 if (!handle)
1499 return -EINVAL;
1500
1501 info = handle_to_ti_sci_info(handle);
1502
1503 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_SYS_RESET,
1504 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1505 (u32 *)&req, sizeof(req), sizeof(*resp));
1506 if (IS_ERR(xfer)) {
1507 ret = PTR_ERR(xfer);
Andreas Dannenberg5bd08372018-08-27 15:57:36 +05301508 return ret;
1509 }
Dave Gerlach366df4e2021-05-13 20:10:55 -05001510 req.domain = 0;
Andreas Dannenberg5bd08372018-08-27 15:57:36 +05301511
1512 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001513 if (ret)
Andreas Dannenberg5bd08372018-08-27 15:57:36 +05301514 return ret;
Andreas Dannenberg5bd08372018-08-27 15:57:36 +05301515
Andreas Dannenberg5bd08372018-08-27 15:57:36 +05301516 return ret;
1517}
1518
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05301519/**
1520 * ti_sci_get_resource_range - Helper to get a range of resources assigned
1521 * to a host. Resource is uniquely identified by
1522 * type and subtype.
1523 * @handle: Pointer to TISCI handle.
1524 * @dev_id: TISCI device ID.
1525 * @subtype: Resource assignment subtype that is being requested
1526 * from the given device.
1527 * @s_host: Host processor ID to which the resources are allocated
1528 * @range_start: Start index of the resource range
1529 * @range_num: Number of resources in the range
1530 *
1531 * Return: 0 if all went fine, else return appropriate error.
1532 */
1533static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
1534 u32 dev_id, u8 subtype, u8 s_host,
1535 u16 *range_start, u16 *range_num)
1536{
1537 struct ti_sci_msg_resp_get_resource_range *resp;
1538 struct ti_sci_msg_req_get_resource_range req;
1539 struct ti_sci_xfer *xfer;
1540 struct ti_sci_info *info;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05301541 int ret = 0;
1542
1543 if (IS_ERR(handle))
1544 return PTR_ERR(handle);
1545 if (!handle)
1546 return -EINVAL;
1547
1548 info = handle_to_ti_sci_info(handle);
1549
1550 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_GET_RESOURCE_RANGE,
1551 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1552 (u32 *)&req, sizeof(req), sizeof(*resp));
1553 if (IS_ERR(xfer)) {
1554 ret = PTR_ERR(xfer);
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05301555 return ret;
1556 }
1557
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05301558 req.secondary_host = s_host;
Lokesh Vutla0acf1dc2020-08-17 11:00:48 +05301559 req.type = dev_id & MSG_RM_RESOURCE_TYPE_MASK;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05301560 req.subtype = subtype & MSG_RM_RESOURCE_SUBTYPE_MASK;
1561
1562 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001563 if (ret)
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05301564 goto fail;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05301565
1566 resp = (struct ti_sci_msg_resp_get_resource_range *)xfer->tx_message.buf;
Andrew Davis04e43932022-07-25 20:25:06 -05001567 if (!resp->range_start && !resp->range_num) {
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05301568 ret = -ENODEV;
1569 } else {
1570 *range_start = resp->range_start;
1571 *range_num = resp->range_num;
1572 };
1573
1574fail:
1575 return ret;
1576}
1577
Vignesh Raghavendra4214a812021-06-07 19:47:48 +05301578static int __maybe_unused
Vignesh Raghavendraaa0e3fc2021-06-07 19:47:49 +05301579ti_sci_cmd_get_resource_range_static(const struct ti_sci_handle *handle,
1580 u32 dev_id, u8 subtype,
1581 u16 *range_start, u16 *range_num)
Vignesh Raghavendra4214a812021-06-07 19:47:48 +05301582{
1583 struct ti_sci_resource_static_data *data;
1584 int i = 0;
1585
1586 while (1) {
1587 data = &rm_static_data[i];
1588
1589 if (!data->dev_id)
1590 return -EINVAL;
1591
1592 if (data->dev_id != dev_id || data->subtype != subtype) {
1593 i++;
1594 continue;
1595 }
1596
1597 *range_start = data->range_start;
1598 *range_num = data->range_num;
1599
1600 return 0;
1601 }
1602
1603 return -EINVAL;
1604}
1605
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05301606/**
1607 * ti_sci_cmd_get_resource_range - Get a range of resources assigned to host
1608 * that is same as ti sci interface host.
1609 * @handle: Pointer to TISCI handle.
1610 * @dev_id: TISCI device ID.
1611 * @subtype: Resource assignment subtype that is being requested
1612 * from the given device.
1613 * @range_start: Start index of the resource range
1614 * @range_num: Number of resources in the range
1615 *
1616 * Return: 0 if all went fine, else return appropriate error.
1617 */
1618static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
1619 u32 dev_id, u8 subtype,
1620 u16 *range_start, u16 *range_num)
1621{
1622 return ti_sci_get_resource_range(handle, dev_id, subtype,
1623 TI_SCI_IRQ_SECONDARY_HOST_INVALID,
1624 range_start, range_num);
1625}
1626
1627/**
1628 * ti_sci_cmd_get_resource_range_from_shost - Get a range of resources
1629 * assigned to a specified host.
1630 * @handle: Pointer to TISCI handle.
1631 * @dev_id: TISCI device ID.
1632 * @subtype: Resource assignment subtype that is being requested
1633 * from the given device.
1634 * @s_host: Host processor ID to which the resources are allocated
1635 * @range_start: Start index of the resource range
1636 * @range_num: Number of resources in the range
1637 *
1638 * Return: 0 if all went fine, else return appropriate error.
1639 */
1640static
1641int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle,
1642 u32 dev_id, u8 subtype, u8 s_host,
1643 u16 *range_start, u16 *range_num)
1644{
1645 return ti_sci_get_resource_range(handle, dev_id, subtype, s_host,
1646 range_start, range_num);
1647}
1648
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301649/**
Lokesh Vutla032dce82019-03-08 11:47:32 +05301650 * ti_sci_cmd_query_msmc() - Command to query currently available msmc memory
1651 * @handle: pointer to TI SCI handle
1652 * @msms_start: MSMC start as returned by tisci
1653 * @msmc_end: MSMC end as returned by tisci
1654 *
1655 * Return: 0 if all went well, else returns appropriate error value.
1656 */
1657static int ti_sci_cmd_query_msmc(const struct ti_sci_handle *handle,
1658 u64 *msmc_start, u64 *msmc_end)
1659{
1660 struct ti_sci_msg_resp_query_msmc *resp;
1661 struct ti_sci_msg_hdr req;
1662 struct ti_sci_info *info;
1663 struct ti_sci_xfer *xfer;
1664 int ret = 0;
1665
1666 if (IS_ERR(handle))
1667 return PTR_ERR(handle);
1668 if (!handle)
1669 return -EINVAL;
1670
1671 info = handle_to_ti_sci_info(handle);
1672
1673 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_QUERY_MSMC,
1674 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1675 (u32 *)&req, sizeof(req), sizeof(*resp));
1676 if (IS_ERR(xfer)) {
1677 ret = PTR_ERR(xfer);
Lokesh Vutla032dce82019-03-08 11:47:32 +05301678 return ret;
1679 }
1680
1681 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001682 if (ret)
Lokesh Vutla032dce82019-03-08 11:47:32 +05301683 return ret;
Lokesh Vutla032dce82019-03-08 11:47:32 +05301684
1685 resp = (struct ti_sci_msg_resp_query_msmc *)xfer->tx_message.buf;
1686
Lokesh Vutla032dce82019-03-08 11:47:32 +05301687 *msmc_start = ((u64)resp->msmc_start_high << TISCI_ADDR_HIGH_SHIFT) |
1688 resp->msmc_start_low;
1689 *msmc_end = ((u64)resp->msmc_end_high << TISCI_ADDR_HIGH_SHIFT) |
1690 resp->msmc_end_low;
1691
1692 return ret;
1693}
1694
1695/**
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301696 * ti_sci_cmd_proc_request() - Command to request a physical processor control
1697 * @handle: Pointer to TI SCI handle
1698 * @proc_id: Processor ID this request is for
1699 *
1700 * Return: 0 if all went well, else returns appropriate error value.
1701 */
1702static int ti_sci_cmd_proc_request(const struct ti_sci_handle *handle,
1703 u8 proc_id)
1704{
1705 struct ti_sci_msg_req_proc_request req;
1706 struct ti_sci_msg_hdr *resp;
1707 struct ti_sci_info *info;
1708 struct ti_sci_xfer *xfer;
1709 int ret = 0;
1710
1711 if (IS_ERR(handle))
1712 return PTR_ERR(handle);
1713 if (!handle)
1714 return -EINVAL;
1715
1716 info = handle_to_ti_sci_info(handle);
1717
1718 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_REQUEST,
1719 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1720 (u32 *)&req, sizeof(req), sizeof(*resp));
1721 if (IS_ERR(xfer)) {
1722 ret = PTR_ERR(xfer);
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301723 return ret;
1724 }
1725 req.processor_id = proc_id;
1726
1727 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001728 if (ret)
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301729 return ret;
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301730
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301731 return ret;
1732}
1733
1734/**
1735 * ti_sci_cmd_proc_release() - Command to release a physical processor control
1736 * @handle: Pointer to TI SCI handle
1737 * @proc_id: Processor ID this request is for
1738 *
1739 * Return: 0 if all went well, else returns appropriate error value.
1740 */
1741static int ti_sci_cmd_proc_release(const struct ti_sci_handle *handle,
1742 u8 proc_id)
1743{
1744 struct ti_sci_msg_req_proc_release req;
1745 struct ti_sci_msg_hdr *resp;
1746 struct ti_sci_info *info;
1747 struct ti_sci_xfer *xfer;
1748 int ret = 0;
1749
1750 if (IS_ERR(handle))
1751 return PTR_ERR(handle);
1752 if (!handle)
1753 return -EINVAL;
1754
1755 info = handle_to_ti_sci_info(handle);
1756
1757 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_RELEASE,
1758 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1759 (u32 *)&req, sizeof(req), sizeof(*resp));
1760 if (IS_ERR(xfer)) {
1761 ret = PTR_ERR(xfer);
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301762 return ret;
1763 }
1764 req.processor_id = proc_id;
1765
1766 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001767 if (ret)
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301768 return ret;
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301769
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301770 return ret;
1771}
1772
1773/**
1774 * ti_sci_cmd_proc_handover() - Command to handover a physical processor
1775 * control to a host in the processor's access
1776 * control list.
1777 * @handle: Pointer to TI SCI handle
1778 * @proc_id: Processor ID this request is for
1779 * @host_id: Host ID to get the control of the processor
1780 *
1781 * Return: 0 if all went well, else returns appropriate error value.
1782 */
1783static int ti_sci_cmd_proc_handover(const struct ti_sci_handle *handle,
1784 u8 proc_id, u8 host_id)
1785{
1786 struct ti_sci_msg_req_proc_handover req;
1787 struct ti_sci_msg_hdr *resp;
1788 struct ti_sci_info *info;
1789 struct ti_sci_xfer *xfer;
1790 int ret = 0;
1791
1792 if (IS_ERR(handle))
1793 return PTR_ERR(handle);
1794 if (!handle)
1795 return -EINVAL;
1796
1797 info = handle_to_ti_sci_info(handle);
1798
1799 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_HANDOVER,
1800 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1801 (u32 *)&req, sizeof(req), sizeof(*resp));
1802 if (IS_ERR(xfer)) {
1803 ret = PTR_ERR(xfer);
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301804 return ret;
1805 }
1806 req.processor_id = proc_id;
1807 req.host_id = host_id;
1808
1809 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001810 if (ret)
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301811 return ret;
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301812
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301813 return ret;
1814}
1815
1816/**
1817 * ti_sci_cmd_set_proc_boot_cfg() - Command to set the processor boot
1818 * configuration flags
1819 * @handle: Pointer to TI SCI handle
1820 * @proc_id: Processor ID this request is for
1821 * @config_flags_set: Configuration flags to be set
1822 * @config_flags_clear: Configuration flags to be cleared.
1823 *
1824 * Return: 0 if all went well, else returns appropriate error value.
1825 */
1826static int ti_sci_cmd_set_proc_boot_cfg(const struct ti_sci_handle *handle,
1827 u8 proc_id, u64 bootvector,
1828 u32 config_flags_set,
1829 u32 config_flags_clear)
1830{
1831 struct ti_sci_msg_req_set_proc_boot_config req;
1832 struct ti_sci_msg_hdr *resp;
1833 struct ti_sci_info *info;
1834 struct ti_sci_xfer *xfer;
1835 int ret = 0;
1836
1837 if (IS_ERR(handle))
1838 return PTR_ERR(handle);
1839 if (!handle)
1840 return -EINVAL;
1841
1842 info = handle_to_ti_sci_info(handle);
1843
1844 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_SET_PROC_BOOT_CONFIG,
1845 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1846 (u32 *)&req, sizeof(req), sizeof(*resp));
1847 if (IS_ERR(xfer)) {
1848 ret = PTR_ERR(xfer);
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301849 return ret;
1850 }
1851 req.processor_id = proc_id;
1852 req.bootvector_low = bootvector & TISCI_ADDR_LOW_MASK;
1853 req.bootvector_high = (bootvector & TISCI_ADDR_HIGH_MASK) >>
1854 TISCI_ADDR_HIGH_SHIFT;
1855 req.config_flags_set = config_flags_set;
1856 req.config_flags_clear = config_flags_clear;
1857
1858 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001859 if (ret)
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301860 return ret;
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301861
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301862 return ret;
1863}
1864
1865/**
1866 * ti_sci_cmd_set_proc_boot_ctrl() - Command to set the processor boot
1867 * control flags
1868 * @handle: Pointer to TI SCI handle
1869 * @proc_id: Processor ID this request is for
1870 * @control_flags_set: Control flags to be set
1871 * @control_flags_clear: Control flags to be cleared
1872 *
1873 * Return: 0 if all went well, else returns appropriate error value.
1874 */
1875static int ti_sci_cmd_set_proc_boot_ctrl(const struct ti_sci_handle *handle,
1876 u8 proc_id, u32 control_flags_set,
1877 u32 control_flags_clear)
1878{
1879 struct ti_sci_msg_req_set_proc_boot_ctrl req;
1880 struct ti_sci_msg_hdr *resp;
1881 struct ti_sci_info *info;
1882 struct ti_sci_xfer *xfer;
1883 int ret = 0;
1884
1885 if (IS_ERR(handle))
1886 return PTR_ERR(handle);
1887 if (!handle)
1888 return -EINVAL;
1889
1890 info = handle_to_ti_sci_info(handle);
1891
1892 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_SET_PROC_BOOT_CTRL,
1893 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1894 (u32 *)&req, sizeof(req), sizeof(*resp));
1895 if (IS_ERR(xfer)) {
1896 ret = PTR_ERR(xfer);
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301897 return ret;
1898 }
1899 req.processor_id = proc_id;
1900 req.control_flags_set = control_flags_set;
1901 req.control_flags_clear = control_flags_clear;
1902
1903 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001904 if (ret)
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301905 return ret;
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301906
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301907 return ret;
1908}
1909
1910/**
1911 * ti_sci_cmd_proc_auth_boot_image() - Command to authenticate and load the
1912 * image and then set the processor configuration flags.
1913 * @handle: Pointer to TI SCI handle
Andrew F. Davis7aa9a082019-04-12 12:54:44 -04001914 * @image_addr: Memory address at which payload image and certificate is
1915 * located in memory, this is updated if the image data is
1916 * moved during authentication.
1917 * @image_size: This is updated with the final size of the image after
1918 * authentication.
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301919 *
1920 * Return: 0 if all went well, else returns appropriate error value.
1921 */
1922static int ti_sci_cmd_proc_auth_boot_image(const struct ti_sci_handle *handle,
Andrew F. Davis7aa9a082019-04-12 12:54:44 -04001923 u64 *image_addr, u32 *image_size)
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301924{
1925 struct ti_sci_msg_req_proc_auth_boot_image req;
Andrew F. Davis7aa9a082019-04-12 12:54:44 -04001926 struct ti_sci_msg_resp_proc_auth_boot_image *resp;
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301927 struct ti_sci_info *info;
1928 struct ti_sci_xfer *xfer;
1929 int ret = 0;
1930
1931 if (IS_ERR(handle))
1932 return PTR_ERR(handle);
1933 if (!handle)
1934 return -EINVAL;
1935
1936 info = handle_to_ti_sci_info(handle);
1937
Jorge Ramirez-Ortizb0373282023-01-10 18:29:48 +01001938 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_PROC_AUTH_BOOT_IMAGE,
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301939 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1940 (u32 *)&req, sizeof(req), sizeof(*resp));
1941 if (IS_ERR(xfer)) {
1942 ret = PTR_ERR(xfer);
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301943 return ret;
1944 }
Andrew F. Davis7aa9a082019-04-12 12:54:44 -04001945 req.cert_addr_low = *image_addr & TISCI_ADDR_LOW_MASK;
1946 req.cert_addr_high = (*image_addr & TISCI_ADDR_HIGH_MASK) >>
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301947 TISCI_ADDR_HIGH_SHIFT;
1948
1949 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001950 if (ret)
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301951 return ret;
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301952
Andrew F. Davis7aa9a082019-04-12 12:54:44 -04001953 resp = (struct ti_sci_msg_resp_proc_auth_boot_image *)xfer->tx_message.buf;
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301954
Andrew F. Davis7aa9a082019-04-12 12:54:44 -04001955 *image_addr = (resp->image_addr_low & TISCI_ADDR_LOW_MASK) |
1956 (((u64)resp->image_addr_high <<
1957 TISCI_ADDR_HIGH_SHIFT) & TISCI_ADDR_HIGH_MASK);
1958 *image_size = resp->image_size;
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301959
1960 return ret;
1961}
1962
1963/**
1964 * ti_sci_cmd_get_proc_boot_status() - Command to get the processor boot status
1965 * @handle: Pointer to TI SCI handle
1966 * @proc_id: Processor ID this request is for
1967 *
1968 * Return: 0 if all went well, else returns appropriate error value.
1969 */
1970static int ti_sci_cmd_get_proc_boot_status(const struct ti_sci_handle *handle,
1971 u8 proc_id, u64 *bv, u32 *cfg_flags,
1972 u32 *ctrl_flags, u32 *sts_flags)
1973{
1974 struct ti_sci_msg_resp_get_proc_boot_status *resp;
1975 struct ti_sci_msg_req_get_proc_boot_status req;
1976 struct ti_sci_info *info;
1977 struct ti_sci_xfer *xfer;
1978 int ret = 0;
1979
1980 if (IS_ERR(handle))
1981 return PTR_ERR(handle);
1982 if (!handle)
1983 return -EINVAL;
1984
1985 info = handle_to_ti_sci_info(handle);
1986
1987 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_GET_PROC_BOOT_STATUS,
1988 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
1989 (u32 *)&req, sizeof(req), sizeof(*resp));
1990 if (IS_ERR(xfer)) {
1991 ret = PTR_ERR(xfer);
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301992 return ret;
1993 }
1994 req.processor_id = proc_id;
1995
1996 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05001997 if (ret)
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301998 return ret;
Lokesh Vutlab8856af2018-08-27 15:57:37 +05301999
2000 resp = (struct ti_sci_msg_resp_get_proc_boot_status *)
2001 xfer->tx_message.buf;
2002
Lokesh Vutlab8856af2018-08-27 15:57:37 +05302003 *bv = (resp->bootvector_low & TISCI_ADDR_LOW_MASK) |
2004 (((u64)resp->bootvector_high <<
2005 TISCI_ADDR_HIGH_SHIFT) & TISCI_ADDR_HIGH_MASK);
2006 *cfg_flags = resp->config_flags;
2007 *ctrl_flags = resp->control_flags;
2008 *sts_flags = resp->status_flags;
2009
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302010 return ret;
2011}
2012
2013/**
Andreas Dannenbergca08cb32019-06-07 19:24:40 +05302014 * ti_sci_proc_wait_boot_status_no_wait() - Helper function to wait for a
2015 * processor boot status without requesting or
2016 * waiting for a response.
2017 * @proc_id: Processor ID this request is for
2018 * @num_wait_iterations: Total number of iterations we will check before
2019 * we will timeout and give up
2020 * @num_match_iterations: How many iterations should we have continued
2021 * status to account for status bits glitching.
2022 * This is to make sure that match occurs for
2023 * consecutive checks. This implies that the
2024 * worst case should consider that the stable
2025 * time should at the worst be num_wait_iterations
2026 * num_match_iterations to prevent timeout.
2027 * @delay_per_iteration_us: Specifies how long to wait (in micro seconds)
2028 * between each status checks. This is the minimum
2029 * duration, and overhead of register reads and
2030 * checks are on top of this and can vary based on
2031 * varied conditions.
2032 * @delay_before_iterations_us: Specifies how long to wait (in micro seconds)
2033 * before the very first check in the first
2034 * iteration of status check loop. This is the
2035 * minimum duration, and overhead of register
2036 * reads and checks are.
2037 * @status_flags_1_set_all_wait:If non-zero, Specifies that all bits of the
2038 * status matching this field requested MUST be 1.
2039 * @status_flags_1_set_any_wait:If non-zero, Specifies that at least one of the
2040 * bits matching this field requested MUST be 1.
2041 * @status_flags_1_clr_all_wait:If non-zero, Specifies that all bits of the
2042 * status matching this field requested MUST be 0.
2043 * @status_flags_1_clr_any_wait:If non-zero, Specifies that at least one of the
2044 * bits matching this field requested MUST be 0.
2045 *
2046 * Return: 0 if all goes well, else appropriate error message
2047 */
2048static int
2049ti_sci_proc_wait_boot_status_no_wait(const struct ti_sci_handle *handle,
2050 u8 proc_id,
2051 u8 num_wait_iterations,
2052 u8 num_match_iterations,
2053 u8 delay_per_iteration_us,
2054 u8 delay_before_iterations_us,
2055 u32 status_flags_1_set_all_wait,
2056 u32 status_flags_1_set_any_wait,
2057 u32 status_flags_1_clr_all_wait,
2058 u32 status_flags_1_clr_any_wait)
2059{
2060 struct ti_sci_msg_req_wait_proc_boot_status req;
2061 struct ti_sci_info *info;
2062 struct ti_sci_xfer *xfer;
2063 int ret = 0;
2064
2065 if (IS_ERR(handle))
2066 return PTR_ERR(handle);
2067 if (!handle)
2068 return -EINVAL;
2069
2070 info = handle_to_ti_sci_info(handle);
2071
2072 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_WAIT_PROC_BOOT_STATUS,
2073 TI_SCI_FLAG_REQ_GENERIC_NORESPONSE,
2074 (u32 *)&req, sizeof(req), 0);
2075 if (IS_ERR(xfer)) {
2076 ret = PTR_ERR(xfer);
Andreas Dannenbergca08cb32019-06-07 19:24:40 +05302077 return ret;
2078 }
2079 req.processor_id = proc_id;
2080 req.num_wait_iterations = num_wait_iterations;
2081 req.num_match_iterations = num_match_iterations;
2082 req.delay_per_iteration_us = delay_per_iteration_us;
2083 req.delay_before_iterations_us = delay_before_iterations_us;
2084 req.status_flags_1_set_all_wait = status_flags_1_set_all_wait;
2085 req.status_flags_1_set_any_wait = status_flags_1_set_any_wait;
2086 req.status_flags_1_clr_all_wait = status_flags_1_clr_all_wait;
2087 req.status_flags_1_clr_any_wait = status_flags_1_clr_any_wait;
2088
2089 ret = ti_sci_do_xfer(info, xfer);
2090 if (ret)
Andrew Davis771a16f2022-07-25 20:25:03 -05002091 return ret;
Andreas Dannenbergca08cb32019-06-07 19:24:40 +05302092
2093 return ret;
2094}
2095
2096/**
2097 * ti_sci_cmd_proc_shutdown_no_wait() - Command to shutdown a core without
2098 * requesting or waiting for a response. Note that this API call
2099 * should be followed by placing the respective processor into
2100 * either WFE or WFI mode.
2101 * @handle: Pointer to TI SCI handle
2102 * @proc_id: Processor ID this request is for
2103 *
2104 * Return: 0 if all went well, else returns appropriate error value.
2105 */
2106static int ti_sci_cmd_proc_shutdown_no_wait(const struct ti_sci_handle *handle,
2107 u8 proc_id)
2108{
2109 int ret;
Sean Anderson405dc242020-09-15 10:44:38 -04002110 struct ti_sci_info *info;
2111
2112 if (IS_ERR(handle))
2113 return PTR_ERR(handle);
2114 if (!handle)
2115 return -EINVAL;
2116
2117 info = handle_to_ti_sci_info(handle);
Andreas Dannenbergca08cb32019-06-07 19:24:40 +05302118
2119 /*
2120 * Send the core boot status wait message waiting for either WFE or
2121 * WFI without requesting or waiting for a TISCI response with the
2122 * maximum wait time to give us the best chance to get to the WFE/WFI
2123 * command that should follow the invocation of this API before the
2124 * DMSC-internal processing of this command times out. Note that
2125 * waiting for the R5 WFE/WFI flags will also work on an ARMV8 type
2126 * core as the related flag bit positions are the same.
2127 */
2128 ret = ti_sci_proc_wait_boot_status_no_wait(handle, proc_id,
2129 U8_MAX, 100, U8_MAX, U8_MAX,
2130 0, PROC_BOOT_STATUS_FLAG_R5_WFE | PROC_BOOT_STATUS_FLAG_R5_WFI,
2131 0, 0);
2132 if (ret) {
2133 dev_err(info->dev, "Sending core %u wait message fail %d\n",
2134 proc_id, ret);
2135 return ret;
2136 }
2137
2138 /*
2139 * Release a processor managed by TISCI without requesting or waiting
2140 * for a response.
2141 */
2142 ret = ti_sci_set_device_state_no_wait(handle, proc_id, 0,
2143 MSG_DEVICE_SW_STATE_AUTO_OFF);
2144 if (ret)
2145 dev_err(info->dev, "Sending core %u shutdown message fail %d\n",
2146 proc_id, ret);
2147
2148 return ret;
2149}
2150
2151/**
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302152 * ti_sci_cmd_ring_config() - configure RA ring
2153 * @handle: pointer to TI SCI handle
2154 * @valid_params: Bitfield defining validity of ring configuration parameters.
2155 * @nav_id: Device ID of Navigator Subsystem from which the ring is allocated
2156 * @index: Ring index.
2157 * @addr_lo: The ring base address lo 32 bits
2158 * @addr_hi: The ring base address hi 32 bits
2159 * @count: Number of ring elements.
2160 * @mode: The mode of the ring
2161 * @size: The ring element size.
2162 * @order_id: Specifies the ring's bus order ID.
2163 *
2164 * Return: 0 if all went well, else returns appropriate error value.
2165 *
2166 * See @ti_sci_msg_rm_ring_cfg_req for more info.
2167 */
2168static int ti_sci_cmd_ring_config(const struct ti_sci_handle *handle,
2169 u32 valid_params, u16 nav_id, u16 index,
2170 u32 addr_lo, u32 addr_hi, u32 count,
2171 u8 mode, u8 size, u8 order_id)
2172{
2173 struct ti_sci_msg_rm_ring_cfg_resp *resp;
2174 struct ti_sci_msg_rm_ring_cfg_req req;
2175 struct ti_sci_xfer *xfer;
2176 struct ti_sci_info *info;
2177 int ret = 0;
2178
2179 if (IS_ERR(handle))
2180 return PTR_ERR(handle);
2181 if (!handle)
2182 return -EINVAL;
2183
2184 info = handle_to_ti_sci_info(handle);
2185
2186 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_RING_CFG,
2187 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2188 (u32 *)&req, sizeof(req), sizeof(*resp));
2189 if (IS_ERR(xfer)) {
2190 ret = PTR_ERR(xfer);
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302191 return ret;
2192 }
2193 req.valid_params = valid_params;
2194 req.nav_id = nav_id;
2195 req.index = index;
2196 req.addr_lo = addr_lo;
2197 req.addr_hi = addr_hi;
2198 req.count = count;
2199 req.mode = mode;
2200 req.size = size;
2201 req.order_id = order_id;
2202
2203 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05002204 if (ret)
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302205 goto fail;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302206
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302207fail:
2208 dev_dbg(info->dev, "RM_RA:config ring %u ret:%d\n", index, ret);
2209 return ret;
2210}
2211
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302212static int ti_sci_cmd_rm_psil_pair(const struct ti_sci_handle *handle,
2213 u32 nav_id, u32 src_thread, u32 dst_thread)
2214{
2215 struct ti_sci_msg_hdr *resp;
2216 struct ti_sci_msg_psil_pair req;
2217 struct ti_sci_xfer *xfer;
2218 struct ti_sci_info *info;
2219 int ret = 0;
2220
2221 if (IS_ERR(handle))
2222 return PTR_ERR(handle);
2223 if (!handle)
2224 return -EINVAL;
2225
2226 info = handle_to_ti_sci_info(handle);
2227
2228 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_PSIL_PAIR,
2229 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2230 (u32 *)&req, sizeof(req), sizeof(*resp));
2231 if (IS_ERR(xfer)) {
2232 ret = PTR_ERR(xfer);
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302233 return ret;
2234 }
2235 req.nav_id = nav_id;
2236 req.src_thread = src_thread;
2237 req.dst_thread = dst_thread;
2238
2239 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05002240 if (ret)
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302241 goto fail;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302242
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302243fail:
2244 dev_dbg(info->dev, "RM_PSIL: nav: %u link pair %u->%u ret:%u\n",
2245 nav_id, src_thread, dst_thread, ret);
2246 return ret;
2247}
2248
2249static int ti_sci_cmd_rm_psil_unpair(const struct ti_sci_handle *handle,
2250 u32 nav_id, u32 src_thread, u32 dst_thread)
2251{
2252 struct ti_sci_msg_hdr *resp;
2253 struct ti_sci_msg_psil_unpair req;
2254 struct ti_sci_xfer *xfer;
2255 struct ti_sci_info *info;
2256 int ret = 0;
2257
2258 if (IS_ERR(handle))
2259 return PTR_ERR(handle);
2260 if (!handle)
2261 return -EINVAL;
2262
2263 info = handle_to_ti_sci_info(handle);
2264
2265 xfer = ti_sci_setup_one_xfer(info, TI_SCI_MSG_RM_PSIL_UNPAIR,
2266 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2267 (u32 *)&req, sizeof(req), sizeof(*resp));
2268 if (IS_ERR(xfer)) {
2269 ret = PTR_ERR(xfer);
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302270 return ret;
2271 }
2272 req.nav_id = nav_id;
2273 req.src_thread = src_thread;
2274 req.dst_thread = dst_thread;
2275
2276 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05002277 if (ret)
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302278 goto fail;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302279
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302280fail:
2281 dev_dbg(info->dev, "RM_PSIL: link unpair %u->%u ret:%u\n",
2282 src_thread, dst_thread, ret);
2283 return ret;
2284}
2285
2286static int ti_sci_cmd_rm_udmap_tx_ch_cfg(
2287 const struct ti_sci_handle *handle,
2288 const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params)
2289{
2290 struct ti_sci_msg_rm_udmap_tx_ch_cfg_resp *resp;
2291 struct ti_sci_msg_rm_udmap_tx_ch_cfg_req req;
2292 struct ti_sci_xfer *xfer;
2293 struct ti_sci_info *info;
2294 int ret = 0;
2295
2296 if (IS_ERR(handle))
2297 return PTR_ERR(handle);
2298 if (!handle)
2299 return -EINVAL;
2300
2301 info = handle_to_ti_sci_info(handle);
2302
2303 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_TX_CH_CFG,
2304 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2305 (u32 *)&req, sizeof(req), sizeof(*resp));
2306 if (IS_ERR(xfer)) {
2307 ret = PTR_ERR(xfer);
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302308 return ret;
2309 }
2310 req.valid_params = params->valid_params;
2311 req.nav_id = params->nav_id;
2312 req.index = params->index;
2313 req.tx_pause_on_err = params->tx_pause_on_err;
2314 req.tx_filt_einfo = params->tx_filt_einfo;
2315 req.tx_filt_pswords = params->tx_filt_pswords;
2316 req.tx_atype = params->tx_atype;
2317 req.tx_chan_type = params->tx_chan_type;
2318 req.tx_supr_tdpkt = params->tx_supr_tdpkt;
2319 req.tx_fetch_size = params->tx_fetch_size;
2320 req.tx_credit_count = params->tx_credit_count;
2321 req.txcq_qnum = params->txcq_qnum;
2322 req.tx_priority = params->tx_priority;
2323 req.tx_qos = params->tx_qos;
2324 req.tx_orderid = params->tx_orderid;
2325 req.fdepth = params->fdepth;
2326 req.tx_sched_priority = params->tx_sched_priority;
Vignesh Raghavendraa8a2b8a2021-05-10 20:06:02 +05302327 req.tx_burst_size = params->tx_burst_size;
2328 req.tx_tdtype = params->tx_tdtype;
2329 req.extended_ch_type = params->extended_ch_type;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302330
2331 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05002332 if (ret)
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302333 goto fail;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302334
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302335fail:
2336 dev_dbg(info->dev, "TX_CH_CFG: chn %u ret:%u\n", params->index, ret);
2337 return ret;
2338}
2339
2340static int ti_sci_cmd_rm_udmap_rx_ch_cfg(
2341 const struct ti_sci_handle *handle,
2342 const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params)
2343{
2344 struct ti_sci_msg_rm_udmap_rx_ch_cfg_resp *resp;
2345 struct ti_sci_msg_rm_udmap_rx_ch_cfg_req req;
2346 struct ti_sci_xfer *xfer;
2347 struct ti_sci_info *info;
2348 int ret = 0;
2349
2350 if (IS_ERR(handle))
2351 return PTR_ERR(handle);
2352 if (!handle)
2353 return -EINVAL;
2354
2355 info = handle_to_ti_sci_info(handle);
2356
2357 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_RX_CH_CFG,
2358 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2359 (u32 *)&req, sizeof(req), sizeof(*resp));
2360 if (IS_ERR(xfer)) {
2361 ret = PTR_ERR(xfer);
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302362 return ret;
2363 }
2364
2365 req.valid_params = params->valid_params;
2366 req.nav_id = params->nav_id;
2367 req.index = params->index;
2368 req.rx_fetch_size = params->rx_fetch_size;
2369 req.rxcq_qnum = params->rxcq_qnum;
2370 req.rx_priority = params->rx_priority;
2371 req.rx_qos = params->rx_qos;
2372 req.rx_orderid = params->rx_orderid;
2373 req.rx_sched_priority = params->rx_sched_priority;
2374 req.flowid_start = params->flowid_start;
2375 req.flowid_cnt = params->flowid_cnt;
2376 req.rx_pause_on_err = params->rx_pause_on_err;
2377 req.rx_atype = params->rx_atype;
2378 req.rx_chan_type = params->rx_chan_type;
2379 req.rx_ignore_short = params->rx_ignore_short;
2380 req.rx_ignore_long = params->rx_ignore_long;
2381
2382 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05002383 if (ret)
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302384 goto fail;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302385
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302386fail:
2387 dev_dbg(info->dev, "RX_CH_CFG: chn %u ret:%d\n", params->index, ret);
2388 return ret;
2389}
2390
2391static int ti_sci_cmd_rm_udmap_rx_flow_cfg(
2392 const struct ti_sci_handle *handle,
2393 const struct ti_sci_msg_rm_udmap_flow_cfg *params)
2394{
2395 struct ti_sci_msg_rm_udmap_flow_cfg_resp *resp;
2396 struct ti_sci_msg_rm_udmap_flow_cfg_req req;
2397 struct ti_sci_xfer *xfer;
2398 struct ti_sci_info *info;
2399 int ret = 0;
2400
2401 if (IS_ERR(handle))
2402 return PTR_ERR(handle);
2403 if (!handle)
2404 return -EINVAL;
2405
2406 info = handle_to_ti_sci_info(handle);
2407
2408 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_RM_UDMAP_FLOW_CFG,
2409 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2410 (u32 *)&req, sizeof(req), sizeof(*resp));
2411 if (IS_ERR(xfer)) {
2412 ret = PTR_ERR(xfer);
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302413 return ret;
2414 }
2415
2416 req.valid_params = params->valid_params;
2417 req.nav_id = params->nav_id;
2418 req.flow_index = params->flow_index;
2419 req.rx_einfo_present = params->rx_einfo_present;
2420 req.rx_psinfo_present = params->rx_psinfo_present;
2421 req.rx_error_handling = params->rx_error_handling;
2422 req.rx_desc_type = params->rx_desc_type;
2423 req.rx_sop_offset = params->rx_sop_offset;
2424 req.rx_dest_qnum = params->rx_dest_qnum;
2425 req.rx_src_tag_hi = params->rx_src_tag_hi;
2426 req.rx_src_tag_lo = params->rx_src_tag_lo;
2427 req.rx_dest_tag_hi = params->rx_dest_tag_hi;
2428 req.rx_dest_tag_lo = params->rx_dest_tag_lo;
2429 req.rx_src_tag_hi_sel = params->rx_src_tag_hi_sel;
2430 req.rx_src_tag_lo_sel = params->rx_src_tag_lo_sel;
2431 req.rx_dest_tag_hi_sel = params->rx_dest_tag_hi_sel;
2432 req.rx_dest_tag_lo_sel = params->rx_dest_tag_lo_sel;
2433 req.rx_fdq0_sz0_qnum = params->rx_fdq0_sz0_qnum;
2434 req.rx_fdq1_qnum = params->rx_fdq1_qnum;
2435 req.rx_fdq2_qnum = params->rx_fdq2_qnum;
2436 req.rx_fdq3_qnum = params->rx_fdq3_qnum;
2437 req.rx_ps_location = params->rx_ps_location;
2438
2439 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05002440 if (ret)
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302441 goto fail;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302442
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302443fail:
2444 dev_dbg(info->dev, "RX_FL_CFG: %u ret:%d\n", params->flow_index, ret);
Lokesh Vutlab8856af2018-08-27 15:57:37 +05302445 return ret;
2446}
2447
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002448/**
2449 * ti_sci_cmd_set_fwl_region() - Request for configuring a firewall region
2450 * @handle: pointer to TI SCI handle
2451 * @region: region configuration parameters
2452 *
2453 * Return: 0 if all went well, else returns appropriate error value.
2454 */
2455static int ti_sci_cmd_set_fwl_region(const struct ti_sci_handle *handle,
2456 const struct ti_sci_msg_fwl_region *region)
2457{
2458 struct ti_sci_msg_fwl_set_firewall_region_req req;
2459 struct ti_sci_msg_hdr *resp;
2460 struct ti_sci_info *info;
2461 struct ti_sci_xfer *xfer;
2462 int ret = 0;
2463
2464 if (IS_ERR(handle))
2465 return PTR_ERR(handle);
2466 if (!handle)
2467 return -EINVAL;
2468
2469 info = handle_to_ti_sci_info(handle);
2470
2471 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_FWL_SET,
2472 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2473 (u32 *)&req, sizeof(req), sizeof(*resp));
2474 if (IS_ERR(xfer)) {
2475 ret = PTR_ERR(xfer);
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002476 return ret;
2477 }
2478
2479 req.fwl_id = region->fwl_id;
2480 req.region = region->region;
2481 req.n_permission_regs = region->n_permission_regs;
2482 req.control = region->control;
2483 req.permissions[0] = region->permissions[0];
2484 req.permissions[1] = region->permissions[1];
2485 req.permissions[2] = region->permissions[2];
2486 req.start_address = region->start_address;
2487 req.end_address = region->end_address;
2488
2489 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05002490 if (ret)
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002491 return ret;
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002492
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002493 return 0;
2494}
2495
2496/**
2497 * ti_sci_cmd_get_fwl_region() - Request for getting a firewall region
2498 * @handle: pointer to TI SCI handle
2499 * @region: region configuration parameters
2500 *
2501 * Return: 0 if all went well, else returns appropriate error value.
2502 */
2503static int ti_sci_cmd_get_fwl_region(const struct ti_sci_handle *handle,
2504 struct ti_sci_msg_fwl_region *region)
2505{
2506 struct ti_sci_msg_fwl_get_firewall_region_req req;
2507 struct ti_sci_msg_fwl_get_firewall_region_resp *resp;
2508 struct ti_sci_info *info;
2509 struct ti_sci_xfer *xfer;
2510 int ret = 0;
2511
2512 if (IS_ERR(handle))
2513 return PTR_ERR(handle);
2514 if (!handle)
2515 return -EINVAL;
2516
2517 info = handle_to_ti_sci_info(handle);
2518
2519 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_FWL_GET,
2520 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2521 (u32 *)&req, sizeof(req), sizeof(*resp));
2522 if (IS_ERR(xfer)) {
2523 ret = PTR_ERR(xfer);
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002524 return ret;
2525 }
2526
2527 req.fwl_id = region->fwl_id;
2528 req.region = region->region;
2529 req.n_permission_regs = region->n_permission_regs;
2530
2531 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05002532 if (ret)
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002533 return ret;
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002534
2535 resp = (struct ti_sci_msg_fwl_get_firewall_region_resp *)xfer->tx_message.buf;
2536
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002537 region->fwl_id = resp->fwl_id;
2538 region->region = resp->region;
2539 region->n_permission_regs = resp->n_permission_regs;
2540 region->control = resp->control;
2541 region->permissions[0] = resp->permissions[0];
2542 region->permissions[1] = resp->permissions[1];
2543 region->permissions[2] = resp->permissions[2];
2544 region->start_address = resp->start_address;
2545 region->end_address = resp->end_address;
2546
2547 return 0;
2548}
2549
2550/**
2551 * ti_sci_cmd_change_fwl_owner() - Request for changing a firewall owner
2552 * @handle: pointer to TI SCI handle
2553 * @region: region configuration parameters
2554 *
2555 * Return: 0 if all went well, else returns appropriate error value.
2556 */
2557static int ti_sci_cmd_change_fwl_owner(const struct ti_sci_handle *handle,
2558 struct ti_sci_msg_fwl_owner *owner)
2559{
2560 struct ti_sci_msg_fwl_change_owner_info_req req;
2561 struct ti_sci_msg_fwl_change_owner_info_resp *resp;
2562 struct ti_sci_info *info;
2563 struct ti_sci_xfer *xfer;
2564 int ret = 0;
2565
2566 if (IS_ERR(handle))
2567 return PTR_ERR(handle);
2568 if (!handle)
2569 return -EINVAL;
2570
2571 info = handle_to_ti_sci_info(handle);
2572
Andrew F. Davis8928fbd2019-04-29 09:04:11 -04002573 xfer = ti_sci_setup_one_xfer(info, TISCI_MSG_FWL_CHANGE_OWNER,
2574 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002575 (u32 *)&req, sizeof(req), sizeof(*resp));
2576 if (IS_ERR(xfer)) {
2577 ret = PTR_ERR(xfer);
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002578 return ret;
2579 }
2580
2581 req.fwl_id = owner->fwl_id;
2582 req.region = owner->region;
2583 req.owner_index = owner->owner_index;
2584
2585 ret = ti_sci_do_xfer(info, xfer);
Andrew Davis771a16f2022-07-25 20:25:03 -05002586 if (ret)
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002587 return ret;
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002588
2589 resp = (struct ti_sci_msg_fwl_change_owner_info_resp *)xfer->tx_message.buf;
2590
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002591 owner->fwl_id = resp->fwl_id;
2592 owner->region = resp->region;
2593 owner->owner_index = resp->owner_index;
2594 owner->owner_privid = resp->owner_privid;
2595 owner->owner_permission_bits = resp->owner_permission_bits;
2596
2597 return ret;
2598}
2599
Andreas Dannenberg5299c4c2018-08-27 15:57:33 +05302600/*
2601 * ti_sci_setup_ops() - Setup the operations structures
2602 * @info: pointer to TISCI pointer
2603 */
2604static void ti_sci_setup_ops(struct ti_sci_info *info)
2605{
2606 struct ti_sci_ops *ops = &info->handle.ops;
2607 struct ti_sci_board_ops *bops = &ops->board_ops;
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +05302608 struct ti_sci_dev_ops *dops = &ops->dev_ops;
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05302609 struct ti_sci_clk_ops *cops = &ops->clk_ops;
Andreas Dannenberg5bd08372018-08-27 15:57:36 +05302610 struct ti_sci_core_ops *core_ops = &ops->core_ops;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302611 struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops;
Lokesh Vutlab8856af2018-08-27 15:57:37 +05302612 struct ti_sci_proc_ops *pops = &ops->proc_ops;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302613 struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops;
2614 struct ti_sci_rm_psil_ops *psilops = &ops->rm_psil_ops;
2615 struct ti_sci_rm_udmap_ops *udmap_ops = &ops->rm_udmap_ops;
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002616 struct ti_sci_fwl_ops *fwl_ops = &ops->fwl_ops;
Andreas Dannenberg5299c4c2018-08-27 15:57:33 +05302617
2618 bops->board_config = ti_sci_cmd_set_board_config;
2619 bops->board_config_rm = ti_sci_cmd_set_board_config_rm;
2620 bops->board_config_security = ti_sci_cmd_set_board_config_security;
2621 bops->board_config_pm = ti_sci_cmd_set_board_config_pm;
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +05302622
2623 dops->get_device = ti_sci_cmd_get_device;
Lokesh Vutlaf5613002019-06-07 19:24:39 +05302624 dops->get_device_exclusive = ti_sci_cmd_get_device_exclusive;
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +05302625 dops->idle_device = ti_sci_cmd_idle_device;
Lokesh Vutlaf5613002019-06-07 19:24:39 +05302626 dops->idle_device_exclusive = ti_sci_cmd_idle_device_exclusive;
Andreas Dannenberg24a4d5e2018-08-27 15:57:34 +05302627 dops->put_device = ti_sci_cmd_put_device;
2628 dops->is_valid = ti_sci_cmd_dev_is_valid;
2629 dops->get_context_loss_count = ti_sci_cmd_dev_get_clcnt;
2630 dops->is_idle = ti_sci_cmd_dev_is_idle;
2631 dops->is_stop = ti_sci_cmd_dev_is_stop;
2632 dops->is_on = ti_sci_cmd_dev_is_on;
2633 dops->is_transitioning = ti_sci_cmd_dev_is_trans;
2634 dops->set_device_resets = ti_sci_cmd_set_device_resets;
2635 dops->get_device_resets = ti_sci_cmd_get_device_resets;
Lokesh Vutla0d0412a2019-06-07 19:24:41 +05302636 dops->release_exclusive_devices = ti_sci_cmd_release_exclusive_devices;
Lokesh Vutlad10c80c2018-08-27 15:57:35 +05302637
2638 cops->get_clock = ti_sci_cmd_get_clock;
2639 cops->idle_clock = ti_sci_cmd_idle_clock;
2640 cops->put_clock = ti_sci_cmd_put_clock;
2641 cops->is_auto = ti_sci_cmd_clk_is_auto;
2642 cops->is_on = ti_sci_cmd_clk_is_on;
2643 cops->is_off = ti_sci_cmd_clk_is_off;
2644
2645 cops->set_parent = ti_sci_cmd_clk_set_parent;
2646 cops->get_parent = ti_sci_cmd_clk_get_parent;
2647 cops->get_num_parents = ti_sci_cmd_clk_get_num_parents;
2648
2649 cops->get_best_match_freq = ti_sci_cmd_clk_get_match_freq;
2650 cops->set_freq = ti_sci_cmd_clk_set_freq;
2651 cops->get_freq = ti_sci_cmd_clk_get_freq;
Andreas Dannenberg5bd08372018-08-27 15:57:36 +05302652
2653 core_ops->reboot_device = ti_sci_cmd_core_reboot;
Lokesh Vutla032dce82019-03-08 11:47:32 +05302654 core_ops->query_msmc = ti_sci_cmd_query_msmc;
Lokesh Vutlab8856af2018-08-27 15:57:37 +05302655
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302656 rm_core_ops->get_range = ti_sci_cmd_get_resource_range;
2657 rm_core_ops->get_range_from_shost =
2658 ti_sci_cmd_get_resource_range_from_shost;
2659
Lokesh Vutlab8856af2018-08-27 15:57:37 +05302660 pops->proc_request = ti_sci_cmd_proc_request;
2661 pops->proc_release = ti_sci_cmd_proc_release;
2662 pops->proc_handover = ti_sci_cmd_proc_handover;
2663 pops->set_proc_boot_cfg = ti_sci_cmd_set_proc_boot_cfg;
2664 pops->set_proc_boot_ctrl = ti_sci_cmd_set_proc_boot_ctrl;
2665 pops->proc_auth_boot_image = ti_sci_cmd_proc_auth_boot_image;
2666 pops->get_proc_boot_status = ti_sci_cmd_get_proc_boot_status;
Andreas Dannenbergca08cb32019-06-07 19:24:40 +05302667 pops->proc_shutdown_no_wait = ti_sci_cmd_proc_shutdown_no_wait;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302668
2669 rops->config = ti_sci_cmd_ring_config;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302670
2671 psilops->pair = ti_sci_cmd_rm_psil_pair;
2672 psilops->unpair = ti_sci_cmd_rm_psil_unpair;
2673
2674 udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg;
2675 udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg;
2676 udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg;
Andrew F. Davis2aafc0c2019-04-12 12:54:43 -04002677
2678 fwl_ops->set_fwl_region = ti_sci_cmd_set_fwl_region;
2679 fwl_ops->get_fwl_region = ti_sci_cmd_get_fwl_region;
2680 fwl_ops->change_fwl_owner = ti_sci_cmd_change_fwl_owner;
Andreas Dannenberg5299c4c2018-08-27 15:57:33 +05302681}
2682
2683/**
Lokesh Vutla5af02db2018-08-27 15:57:32 +05302684 * ti_sci_get_handle_from_sysfw() - Get the TI SCI handle of the SYSFW
2685 * @dev: Pointer to the SYSFW device
2686 *
2687 * Return: pointer to handle if successful, else EINVAL if invalid conditions
2688 * are encountered.
2689 */
2690const
2691struct ti_sci_handle *ti_sci_get_handle_from_sysfw(struct udevice *sci_dev)
2692{
Neha Malcom Francisc7dedd02023-09-27 18:39:53 +05302693 int ret;
2694
Lokesh Vutla5af02db2018-08-27 15:57:32 +05302695 if (!sci_dev)
2696 return ERR_PTR(-EINVAL);
2697
2698 struct ti_sci_info *info = dev_get_priv(sci_dev);
2699
2700 if (!info)
2701 return ERR_PTR(-EINVAL);
2702
2703 struct ti_sci_handle *handle = &info->handle;
2704
2705 if (!handle)
2706 return ERR_PTR(-EINVAL);
2707
Neha Malcom Francisc7dedd02023-09-27 18:39:53 +05302708 ret = ti_sci_cmd_get_revision(handle);
2709
2710 if (ret)
2711 return ERR_PTR(-EINVAL);
2712
Lokesh Vutla5af02db2018-08-27 15:57:32 +05302713 return handle;
2714}
2715
2716/**
2717 * ti_sci_get_handle() - Get the TI SCI handle for a device
2718 * @dev: Pointer to device for which we want SCI handle
2719 *
2720 * Return: pointer to handle if successful, else EINVAL if invalid conditions
2721 * are encountered.
2722 */
2723const struct ti_sci_handle *ti_sci_get_handle(struct udevice *dev)
2724{
2725 if (!dev)
2726 return ERR_PTR(-EINVAL);
2727
2728 struct udevice *sci_dev = dev_get_parent(dev);
2729
2730 return ti_sci_get_handle_from_sysfw(sci_dev);
2731}
2732
2733/**
2734 * ti_sci_get_by_phandle() - Get the TI SCI handle using DT phandle
2735 * @dev: device node
2736 * @propname: property name containing phandle on TISCI node
2737 *
2738 * Return: pointer to handle if successful, else appropriate error value.
2739 */
2740const struct ti_sci_handle *ti_sci_get_by_phandle(struct udevice *dev,
2741 const char *property)
2742{
2743 struct ti_sci_info *entry, *info = NULL;
2744 u32 phandle, err;
2745 ofnode node;
2746
2747 err = ofnode_read_u32(dev_ofnode(dev), property, &phandle);
2748 if (err)
2749 return ERR_PTR(err);
2750
2751 node = ofnode_get_by_phandle(phandle);
2752 if (!ofnode_valid(node))
2753 return ERR_PTR(-EINVAL);
2754
2755 list_for_each_entry(entry, &ti_sci_list, list)
2756 if (ofnode_equal(dev_ofnode(entry->dev), node)) {
2757 info = entry;
2758 break;
2759 }
2760
2761 if (!info)
2762 return ERR_PTR(-ENODEV);
2763
2764 return &info->handle;
2765}
2766
2767/**
2768 * ti_sci_of_to_info() - generate private data from device tree
2769 * @dev: corresponding system controller interface device
2770 * @info: pointer to driver specific private data
2771 *
2772 * Return: 0 if all goes good, else appropriate error message.
2773 */
2774static int ti_sci_of_to_info(struct udevice *dev, struct ti_sci_info *info)
2775{
2776 int ret;
2777
2778 ret = mbox_get_by_name(dev, "tx", &info->chan_tx);
2779 if (ret) {
2780 dev_err(dev, "%s: Acquiring Tx channel failed. ret = %d\n",
2781 __func__, ret);
2782 return ret;
2783 }
2784
2785 ret = mbox_get_by_name(dev, "rx", &info->chan_rx);
2786 if (ret) {
2787 dev_err(dev, "%s: Acquiring Rx channel failed. ret = %d\n",
2788 __func__, ret);
2789 return ret;
2790 }
2791
2792 /* Notify channel is optional. Enable only if populated */
2793 ret = mbox_get_by_name(dev, "notify", &info->chan_notify);
2794 if (ret) {
2795 dev_dbg(dev, "%s: Acquiring notify channel failed. ret = %d\n",
2796 __func__, ret);
2797 }
2798
2799 info->host_id = dev_read_u32_default(dev, "ti,host-id",
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302800 info->desc->default_host_id);
Lokesh Vutla5af02db2018-08-27 15:57:32 +05302801
2802 info->is_secure = dev_read_bool(dev, "ti,secure-host");
2803
2804 return 0;
2805}
2806
2807/**
2808 * ti_sci_probe() - Basic probe
2809 * @dev: corresponding system controller interface device
2810 *
2811 * Return: 0 if all goes good, else appropriate error message.
2812 */
2813static int ti_sci_probe(struct udevice *dev)
2814{
2815 struct ti_sci_info *info;
2816 int ret;
2817
2818 debug("%s(dev=%p)\n", __func__, dev);
2819
2820 info = dev_get_priv(dev);
2821 info->desc = (void *)dev_get_driver_data(dev);
2822
2823 ret = ti_sci_of_to_info(dev, info);
2824 if (ret) {
2825 dev_err(dev, "%s: Probe failed with error %d\n", __func__, ret);
2826 return ret;
2827 }
2828
2829 info->dev = dev;
2830 info->seq = 0xA;
2831
2832 list_add_tail(&info->list, &ti_sci_list);
Andreas Dannenberg5299c4c2018-08-27 15:57:33 +05302833 ti_sci_setup_ops(info);
Lokesh Vutla5af02db2018-08-27 15:57:32 +05302834
Lokesh Vutla0d0412a2019-06-07 19:24:41 +05302835 INIT_LIST_HEAD(&info->dev_list);
2836
Neha Malcom Francisc7dedd02023-09-27 18:39:53 +05302837 return 0;
Lokesh Vutla5af02db2018-08-27 15:57:32 +05302838}
2839
Vignesh Raghavendraaa0e3fc2021-06-07 19:47:49 +05302840/**
2841 * ti_sci_dm_probe() - Basic probe for DM to TIFS SCI
2842 * @dev: corresponding system controller interface device
2843 *
2844 * Return: 0 if all goes good, else appropriate error message.
2845 */
2846static __maybe_unused int ti_sci_dm_probe(struct udevice *dev)
2847{
2848 struct ti_sci_rm_core_ops *rm_core_ops;
2849 struct ti_sci_rm_udmap_ops *udmap_ops;
2850 struct ti_sci_rm_ringacc_ops *rops;
2851 struct ti_sci_rm_psil_ops *psilops;
2852 struct ti_sci_ops *ops;
2853 struct ti_sci_info *info;
2854 int ret;
2855
2856 debug("%s(dev=%p)\n", __func__, dev);
2857
2858 info = dev_get_priv(dev);
2859 info->desc = (void *)dev_get_driver_data(dev);
2860
2861 ret = ti_sci_of_to_info(dev, info);
2862 if (ret) {
2863 dev_err(dev, "%s: Probe failed with error %d\n", __func__, ret);
2864 return ret;
2865 }
2866
2867 info->dev = dev;
2868 info->seq = 0xA;
2869
2870 list_add_tail(&info->list, &ti_sci_list);
2871
2872 ops = &info->handle.ops;
2873
2874 rm_core_ops = &ops->rm_core_ops;
2875 rm_core_ops->get_range = ti_sci_cmd_get_resource_range_static;
2876
2877 rops = &ops->rm_ring_ops;
2878 rops->config = ti_sci_cmd_ring_config;
2879
2880 psilops = &ops->rm_psil_ops;
2881 psilops->pair = ti_sci_cmd_rm_psil_pair;
2882 psilops->unpair = ti_sci_cmd_rm_psil_unpair;
2883
2884 udmap_ops = &ops->rm_udmap_ops;
2885 udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg;
2886 udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg;
2887 udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg;
2888
2889 return ret;
2890}
2891
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302892/*
2893 * ti_sci_get_free_resource() - Get a free resource from TISCI resource.
2894 * @res: Pointer to the TISCI resource
2895 *
2896 * Return: resource num if all went ok else TI_SCI_RESOURCE_NULL.
2897 */
2898u16 ti_sci_get_free_resource(struct ti_sci_resource *res)
2899{
2900 u16 set, free_bit;
2901
2902 for (set = 0; set < res->sets; set++) {
2903 free_bit = find_first_zero_bit(res->desc[set].res_map,
2904 res->desc[set].num);
2905 if (free_bit != res->desc[set].num) {
2906 set_bit(free_bit, res->desc[set].res_map);
2907 return res->desc[set].start + free_bit;
2908 }
2909 }
2910
2911 return TI_SCI_RESOURCE_NULL;
2912}
2913
2914/**
2915 * ti_sci_release_resource() - Release a resource from TISCI resource.
2916 * @res: Pointer to the TISCI resource
2917 */
2918void ti_sci_release_resource(struct ti_sci_resource *res, u16 id)
2919{
2920 u16 set;
2921
2922 for (set = 0; set < res->sets; set++) {
2923 if (res->desc[set].start <= id &&
2924 (res->desc[set].num + res->desc[set].start) > id)
2925 clear_bit(id - res->desc[set].start,
2926 res->desc[set].res_map);
2927 }
2928}
2929
2930/**
2931 * devm_ti_sci_get_of_resource() - Get a TISCI resource assigned to a device
2932 * @handle: TISCI handle
2933 * @dev: Device pointer to which the resource is assigned
2934 * @of_prop: property name by which the resource are represented
2935 *
2936 * Note: This function expects of_prop to be in the form of tuples
2937 * <type, subtype>. Allocates and initializes ti_sci_resource structure
2938 * for each of_prop. Client driver can directly call
2939 * ti_sci_(get_free, release)_resource apis for handling the resource.
2940 *
2941 * Return: Pointer to ti_sci_resource if all went well else appropriate
2942 * error pointer.
2943 */
2944struct ti_sci_resource *
2945devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
2946 struct udevice *dev, u32 dev_id, char *of_prop)
2947{
2948 u32 resource_subtype;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302949 struct ti_sci_resource *res;
Vignesh Raghavendrae1164dd2019-08-05 12:26:44 -05002950 bool valid_set = false;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302951 int sets, i, ret;
2952 u32 *temp;
2953
2954 res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
2955 if (!res)
2956 return ERR_PTR(-ENOMEM);
2957
2958 sets = dev_read_size(dev, of_prop);
2959 if (sets < 0) {
2960 dev_err(dev, "%s resource type ids not available\n", of_prop);
2961 return ERR_PTR(sets);
2962 }
2963 temp = malloc(sets);
2964 sets /= sizeof(u32);
2965 res->sets = sets;
2966
2967 res->desc = devm_kcalloc(dev, res->sets, sizeof(*res->desc),
2968 GFP_KERNEL);
2969 if (!res->desc)
2970 return ERR_PTR(-ENOMEM);
2971
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302972 ret = dev_read_u32_array(dev, of_prop, temp, res->sets);
2973 if (ret)
2974 return ERR_PTR(-EINVAL);
2975
2976 for (i = 0; i < res->sets; i++) {
2977 resource_subtype = temp[i];
2978 ret = handle->ops.rm_core_ops.get_range(handle, dev_id,
2979 resource_subtype,
2980 &res->desc[i].start,
2981 &res->desc[i].num);
2982 if (ret) {
Vignesh Raghavendrae1164dd2019-08-05 12:26:44 -05002983 dev_dbg(dev, "type %d subtype %d not allocated for host %d\n",
Lokesh Vutla0acf1dc2020-08-17 11:00:48 +05302984 dev_id, resource_subtype,
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302985 handle_to_ti_sci_info(handle)->host_id);
Vignesh Raghavendrae1164dd2019-08-05 12:26:44 -05002986 res->desc[i].start = 0;
2987 res->desc[i].num = 0;
2988 continue;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302989 }
2990
Vignesh Raghavendrae1164dd2019-08-05 12:26:44 -05002991 valid_set = true;
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302992 dev_dbg(dev, "res type = %d, subtype = %d, start = %d, num = %d\n",
Lokesh Vutla0acf1dc2020-08-17 11:00:48 +05302993 dev_id, resource_subtype, res->desc[i].start,
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05302994 res->desc[i].num);
2995
2996 res->desc[i].res_map =
2997 devm_kzalloc(dev, BITS_TO_LONGS(res->desc[i].num) *
2998 sizeof(*res->desc[i].res_map), GFP_KERNEL);
2999 if (!res->desc[i].res_map)
3000 return ERR_PTR(-ENOMEM);
3001 }
3002
Vignesh Raghavendrae1164dd2019-08-05 12:26:44 -05003003 if (valid_set)
3004 return res;
3005
3006 return ERR_PTR(-EINVAL);
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05303007}
3008
3009/* Description for K2G */
3010static const struct ti_sci_desc ti_sci_pmmc_k2g_desc = {
3011 .default_host_id = 2,
3012 /* Conservative duration */
3013 .max_rx_timeout_ms = 10000,
3014 /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
3015 .max_msgs = 20,
3016 .max_msg_size = 64,
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05303017};
3018
Lokesh Vutla5af02db2018-08-27 15:57:32 +05303019/* Description for AM654 */
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05303020static const struct ti_sci_desc ti_sci_pmmc_am654_desc = {
3021 .default_host_id = 12,
3022 /* Conservative duration */
3023 .max_rx_timeout_ms = 10000,
3024 /* Limited by MBOX_TX_QUEUE_LEN. K2G can handle upto 128 messages! */
3025 .max_msgs = 20,
Lokesh Vutla5af02db2018-08-27 15:57:32 +05303026 .max_msg_size = 60,
3027};
3028
Vignesh Raghavendraaa0e3fc2021-06-07 19:47:49 +05303029/* Description for J721e DM to DMSC communication */
3030static const struct ti_sci_desc ti_sci_dm_j721e_desc = {
3031 .default_host_id = 3,
3032 .max_rx_timeout_ms = 10000,
3033 .max_msgs = 20,
3034 .max_msg_size = 60,
3035};
3036
Lokesh Vutla5af02db2018-08-27 15:57:32 +05303037static const struct udevice_id ti_sci_ids[] = {
3038 {
3039 .compatible = "ti,k2g-sci",
Grygorii Strashkod64c5b22019-02-05 17:31:21 +05303040 .data = (ulong)&ti_sci_pmmc_k2g_desc
3041 },
3042 {
3043 .compatible = "ti,am654-sci",
3044 .data = (ulong)&ti_sci_pmmc_am654_desc
Lokesh Vutla5af02db2018-08-27 15:57:32 +05303045 },
3046 { /* Sentinel */ },
3047};
3048
Vignesh Raghavendraaa0e3fc2021-06-07 19:47:49 +05303049static __maybe_unused const struct udevice_id ti_sci_dm_ids[] = {
3050 {
3051 .compatible = "ti,j721e-dm-sci",
3052 .data = (ulong)&ti_sci_dm_j721e_desc
3053 },
3054 { /* Sentinel */ },
3055};
3056
Lokesh Vutla5af02db2018-08-27 15:57:32 +05303057U_BOOT_DRIVER(ti_sci) = {
3058 .name = "ti_sci",
3059 .id = UCLASS_FIRMWARE,
3060 .of_match = ti_sci_ids,
3061 .probe = ti_sci_probe,
Simon Glass8a2b47f2020-12-03 16:55:17 -07003062 .priv_auto = sizeof(struct ti_sci_info),
Lokesh Vutla5af02db2018-08-27 15:57:32 +05303063};
Vignesh Raghavendraaa0e3fc2021-06-07 19:47:49 +05303064
3065#if IS_ENABLED(CONFIG_K3_DM_FW)
3066U_BOOT_DRIVER(ti_sci_dm) = {
3067 .name = "ti_sci_dm",
3068 .id = UCLASS_FIRMWARE,
3069 .of_match = ti_sci_dm_ids,
3070 .probe = ti_sci_dm_probe,
3071 .priv_auto = sizeof(struct ti_sci_info),
3072};
3073#endif