blob: 0b61f87ebeae3f6b8ea15333149ed1908507d732 [file] [log] [blame]
developere5e687d2023-08-08 16:05:33 +08001// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
4 *
5 * Author: Alvin Kuo <alvin.kuog@mediatek.com>
6 * Ren-Ting Wang <ren-ting.wang@mediatek.com>
7 */
8
9#include <linux/debugfs.h>
10#include <linux/err.h>
11#include <linux/io.h>
12#include <linux/mutex.h>
13#include <linux/of.h>
14#include <linux/of_address.h>
15#include <linux/platform_device.h>
16#include <linux/printk.h>
17#include <linux/relay.h>
18#include <linux/types.h>
19
developer0fb30d52023-12-04 09:51:36 +080020#include "tops/internal.h"
21#include "tops/mbox.h"
22#include "tops/mcu.h"
23#include "tops/netsys.h"
24#include "tops/trm-fs.h"
25#include "tops/trm-mcu.h"
26#include "tops/trm.h"
developere5e687d2023-08-08 16:05:33 +080027
28#define TRM_HDR_LEN (sizeof(struct trm_header))
29
30#define RLY_DUMP_SUBBUF_DATA_MAX (RLY_DUMP_SUBBUF_SZ - TRM_HDR_LEN)
31
developerfbdb5112023-08-21 15:12:14 +080032struct tops_runtime_monitor {
33 struct mailbox_dev mgmt_send_mdev;
34 struct mailbox_dev offload_send_mdev[CORE_OFFLOAD_NUM];
35};
36
developere5e687d2023-08-08 16:05:33 +080037struct trm_info {
38 char name[TRM_CONFIG_NAME_MAX_LEN];
39 u64 dump_time;
40 u32 start_addr;
41 u32 size;
42 u32 rsn; /* TRM_RSN_* */
43};
44
45struct trm_header {
46 struct trm_info info;
47 u32 data_offset;
48 u32 data_len;
49 u8 last_frag;
50};
51
52struct device *trm_dev;
53
developerfbdb5112023-08-21 15:12:14 +080054static struct tops_runtime_monitor trm = {
55 .mgmt_send_mdev = MBOX_SEND_MGMT_DEV(TRM),
56 .offload_send_mdev = {
57 [CORE_OFFLOAD_0] = MBOX_SEND_OFFLOAD_DEV(0, TRM),
58 [CORE_OFFLOAD_1] = MBOX_SEND_OFFLOAD_DEV(1, TRM),
59 [CORE_OFFLOAD_2] = MBOX_SEND_OFFLOAD_DEV(2, TRM),
60 [CORE_OFFLOAD_3] = MBOX_SEND_OFFLOAD_DEV(3, TRM),
61 },
62};
developere5e687d2023-08-08 16:05:33 +080063static struct trm_hw_config *trm_hw_configs[__TRM_HARDWARE_MAX];
64struct mutex trm_lock;
65
66static inline void trm_hdr_init(struct trm_header *trm_hdr,
67 struct trm_config *trm_cfg,
68 u32 size,
69 u64 dump_time,
70 u32 dump_rsn)
71{
72 if (unlikely(!trm_hdr || !trm_cfg))
73 return;
74
75 memset(trm_hdr, 0, TRM_HDR_LEN);
76
77 strncpy(trm_hdr->info.name, trm_cfg->name, TRM_CONFIG_NAME_MAX_LEN);
developer0fb30d52023-12-04 09:51:36 +080078 trm_hdr->info.name[TRM_CONFIG_NAME_MAX_LEN - 1] = '\0';
developere5e687d2023-08-08 16:05:33 +080079 trm_hdr->info.start_addr = trm_cfg->addr + trm_cfg->offset;
80 trm_hdr->info.size = size;
81 trm_hdr->info.dump_time = dump_time;
82 trm_hdr->info.rsn = dump_rsn;
83}
84
85static inline int trm_cfg_sanity_check(struct trm_config *trm_cfg)
86{
87 u32 start = trm_cfg->addr + trm_cfg->offset;
88 u32 end = start + trm_cfg->size;
89
90 if (start < trm_cfg->addr || end > trm_cfg->addr + trm_cfg->len)
91 return -1;
92
93 return 0;
94}
95
96static inline bool trm_cfg_is_core_dump_en(struct trm_config *trm_cfg)
97{
98 return trm_cfg->flag & TRM_CONFIG_F_CORE_DUMP;
99}
100
101static inline bool trm_cfg_is_en(struct trm_config *trm_cfg)
102{
103 return trm_cfg->flag & TRM_CONFIG_F_ENABLE;
104}
105
106static inline int __mtk_trm_cfg_setup(struct trm_config *trm_cfg,
107 u32 offset, u32 size, u8 enable)
108{
109 struct trm_config tmp = { 0 };
110
111 if (!enable) {
112 trm_cfg->flag &= ~TRM_CONFIG_F_ENABLE;
113 } else {
114 tmp.addr = trm_cfg->addr;
115 tmp.len = trm_cfg->len;
116 tmp.offset = offset;
117 tmp.size = size;
118
119 if (trm_cfg_sanity_check(&tmp))
120 return -EINVAL;
121
122 trm_cfg->offset = offset;
123 trm_cfg->size = size;
124 trm_cfg->flag |= TRM_CONFIG_F_ENABLE;
125 }
126
127 return 0;
128}
129
130int mtk_trm_cfg_setup(char *name, u32 offset, u32 size, u8 enable)
131{
132 struct trm_hw_config *trm_hw_cfg;
133 struct trm_config *trm_cfg;
134 int ret = 0;
135 u32 i, j;
136
137 for (i = 0; i < __TRM_HARDWARE_MAX; i++) {
138 trm_hw_cfg = trm_hw_configs[i];
developer0b3c7712023-08-24 16:23:03 +0800139 if (unlikely(!trm_hw_cfg || !trm_hw_cfg->trm_cfgs))
developere5e687d2023-08-08 16:05:33 +0800140 continue;
141
142 for (j = 0; j < trm_hw_cfg->cfg_len; j++) {
143 trm_cfg = &trm_hw_cfg->trm_cfgs[j];
developere5e687d2023-08-08 16:05:33 +0800144
145 if (!strncmp(trm_cfg->name, name, strlen(name))) {
146 mutex_lock(&trm_lock);
147
148 ret = __mtk_trm_cfg_setup(trm_cfg,
149 offset,
150 size,
151 enable);
152
153 mutex_unlock(&trm_lock);
154 }
155 }
156 }
157
158 return ret;
159}
160
161/* append core dump(via ocd) in bottom of core-x-dtcm file */
162static inline void __mtk_trm_save_core_dump(struct trm_config *trm_cfg,
163 void *dst,
164 u32 *frag_len)
165{
166 *frag_len -= CORE_DUMP_FRAME_LEN;
167 memcpy(dst + *frag_len, &cd_frams[trm_cfg->core], CORE_DUMP_FRAME_LEN);
168}
169
170static int __mtk_trm_dump(struct trm_hw_config *trm_hw_cfg,
171 struct trm_config *trm_cfg,
172 u64 dump_time,
173 u32 dump_rsn)
174{
175 struct trm_header trm_hdr;
176 u32 total = trm_cfg->size;
177 u32 i = 0;
178 u32 frag_len;
179 u32 ofs;
180 void *dst;
181
182 /* reserve core dump frame len if core dump enabled */
183 if (trm_cfg_is_core_dump_en(trm_cfg))
184 total += CORE_DUMP_FRAME_LEN;
185
186 /* fill in trm inforamtion */
187 trm_hdr_init(&trm_hdr, trm_cfg, total, dump_time, dump_rsn);
188
189 while (total > 0) {
190 if (total >= RLY_DUMP_SUBBUF_DATA_MAX) {
191 frag_len = RLY_DUMP_SUBBUF_DATA_MAX;
192 total -= RLY_DUMP_SUBBUF_DATA_MAX;
193 } else {
194 frag_len = total;
developer0b3c7712023-08-24 16:23:03 +0800195 total = 0;
developere5e687d2023-08-08 16:05:33 +0800196 trm_hdr.last_frag = true;
197 }
198
199 trm_hdr.data_offset = i++ * RLY_DUMP_SUBBUF_DATA_MAX;
200 trm_hdr.data_len = frag_len;
201
202 dst = mtk_trm_fs_relay_reserve(frag_len + TRM_HDR_LEN);
203 if (IS_ERR(dst))
204 return PTR_ERR(dst);
205
206 memcpy(dst, &trm_hdr, TRM_HDR_LEN);
207 dst += TRM_HDR_LEN;
208
209 /* TODO: what if core dump is being cut between 2 fragment? */
210 if (trm_hdr.last_frag && trm_cfg_is_core_dump_en(trm_cfg))
211 __mtk_trm_save_core_dump(trm_cfg, dst, &frag_len);
212
213 ofs = trm_hdr.info.start_addr + trm_hdr.data_offset;
214
215 /* let TRM HW write memory to destination */
216 trm_hw_cfg->trm_hw_dump(dst, ofs, frag_len);
217
218 mtk_trm_fs_relay_flush();
219 }
220
developerfbdb5112023-08-21 15:12:14 +0800221 return 0;
222}
223
224static void trm_cpu_utilization_ret_handler(void *priv,
225 struct mailbox_msg *msg)
226{
227 u32 *cpu_utilization = priv;
228
229 /*
230 * msg1: ticks of idle task
231 * msg2: ticks of this statistic period
232 */
233 if (msg->msg2 != 0)
234 *cpu_utilization = (msg->msg2 - msg->msg1) * 100U / msg->msg2;
235}
236
237int mtk_trm_cpu_utilization(enum core_id core, u32 *cpu_utilization)
238{
239 struct mailbox_dev *send_mdev;
240 struct mailbox_msg msg;
241 int ret;
242
243 if (core > CORE_MGMT || !cpu_utilization)
244 return -EINVAL;
245
246 if (!mtk_tops_mcu_alive()) {
247 TRM_ERR("mcu not alive\n");
248 return -EAGAIN;
249 }
250
251 memset(&msg, 0, sizeof(struct mailbox_msg));
252 msg.msg1 = TRM_CMD_TYPE_CPU_UTILIZATION;
253
254 *cpu_utilization = 0;
255
256 if (core == CORE_MGMT)
257 send_mdev = &trm.mgmt_send_mdev;
258 else
259 send_mdev = &trm.offload_send_mdev[core];
260
261 ret = mbox_send_msg(send_mdev,
262 &msg,
263 cpu_utilization,
264 trm_cpu_utilization_ret_handler);
265 if (ret) {
266 TRM_ERR("send CPU_UTILIZATION cmd failed(%d)\n", ret);
267 return ret;
268 }
269
developere5e687d2023-08-08 16:05:33 +0800270 return 0;
271}
272
273int mtk_trm_dump(u32 rsn)
274{
275 u64 time = ktime_to_ns(ktime_get_real()) / 1000000000;
276 struct trm_hw_config *trm_hw_cfg;
277 struct trm_config *trm_cfg;
278 int ret = 0;
279 u32 i, j;
280
281 if (!mtk_trm_fs_is_init())
282 return -EINVAL;
283
284 mutex_lock(&trm_lock);
285
286 mtk_trm_mcu_core_dump();
287
288 for (i = 0; i < __TRM_HARDWARE_MAX; i++) {
289 trm_hw_cfg = trm_hw_configs[i];
290 if (unlikely(!trm_hw_cfg || !trm_hw_cfg->trm_hw_dump))
291 continue;
292
293 for (j = 0; j < trm_hw_cfg->cfg_len; j++) {
294 trm_cfg = &trm_hw_cfg->trm_cfgs[j];
295 if (unlikely(!trm_cfg || !trm_cfg_is_en(trm_cfg)))
296 continue;
297
298 if (unlikely(trm_cfg_sanity_check(trm_cfg))) {
299 TRM_ERR("trm %s: sanity check fail\n", trm_cfg->name);
300 ret = -EINVAL;
301 goto out;
302 }
303
304 ret = __mtk_trm_dump(trm_hw_cfg, trm_cfg, time, rsn);
305 if (ret) {
306 TRM_ERR("trm %s: trm dump fail: %d\n",
307 trm_cfg->name, ret);
308 goto out;
309 }
310 }
311 }
312
313 TRM_NOTICE("TOPS runtime monitor dump\n");
314
315out:
316 mutex_unlock(&trm_lock);
317
318 return ret;
319}
320
developerfbdb5112023-08-21 15:12:14 +0800321static int mtk_tops_trm_register_mbox(void)
322{
323 int ret;
324 int i;
325
326 ret = register_mbox_dev(MBOX_SEND, &trm.mgmt_send_mdev);
327 if (ret) {
328 TRM_ERR("register trm mgmt mbox send failed: %d\n", ret);
329 return ret;
330 }
331
332 for (i = 0; i < CORE_OFFLOAD_NUM; i++) {
333 ret = register_mbox_dev(MBOX_SEND, &trm.offload_send_mdev[i]);
334 if (ret) {
335 TRM_ERR("register trm offload %d mbox send failed: %d\n",
336 i, ret);
337 goto err_unregister_offload_mbox;
338 }
339 }
340
341 return ret;
342
343err_unregister_offload_mbox:
344 for (i -= 1; i >= 0; i--)
345 unregister_mbox_dev(MBOX_SEND, &trm.offload_send_mdev[i]);
346
347 unregister_mbox_dev(MBOX_SEND, &trm.mgmt_send_mdev);
348
349 return ret;
350}
351
352static void mtk_tops_trm_unregister_mbox(void)
353{
354 int i;
355
356 unregister_mbox_dev(MBOX_SEND, &trm.mgmt_send_mdev);
357
358 for (i = 0; i < CORE_OFFLOAD_NUM; i++)
359 unregister_mbox_dev(MBOX_SEND, &trm.offload_send_mdev[i]);
360}
361
developere5e687d2023-08-08 16:05:33 +0800362int __init mtk_tops_trm_init(void)
363{
developerfbdb5112023-08-21 15:12:14 +0800364 int ret;
365
developere5e687d2023-08-08 16:05:33 +0800366 mutex_init(&trm_lock);
367
developerfbdb5112023-08-21 15:12:14 +0800368 ret = mtk_tops_trm_register_mbox();
369 if (ret)
370 return ret;
371
developere5e687d2023-08-08 16:05:33 +0800372 return mtk_tops_trm_mcu_init();
373}
374
375void __exit mtk_tops_trm_exit(void)
376{
developerfbdb5112023-08-21 15:12:14 +0800377 mtk_tops_trm_unregister_mbox();
378
developere5e687d2023-08-08 16:05:33 +0800379 mtk_tops_trm_mcu_exit();
380}
381
382int mtk_trm_hw_config_register(enum trm_hardware trm_hw,
383 struct trm_hw_config *trm_hw_cfg)
384{
385 if (unlikely(trm_hw >= __TRM_HARDWARE_MAX || !trm_hw_cfg))
386 return -ENODEV;
387
388 if (unlikely(!trm_hw_cfg->cfg_len || !trm_hw_cfg->trm_hw_dump))
389 return -EINVAL;
390
391 if (trm_hw_configs[trm_hw])
392 return -EBUSY;
393
394 trm_hw_configs[trm_hw] = trm_hw_cfg;
395
396 return 0;
397}
398
399void mtk_trm_hw_config_unregister(enum trm_hardware trm_hw,
400 struct trm_hw_config *trm_hw_cfg)
401{
402 if (unlikely(trm_hw >= __TRM_HARDWARE_MAX || !trm_hw_cfg))
403 return;
404
405 if (trm_hw_configs[trm_hw] != trm_hw_cfg)
406 return;
407
408 trm_hw_configs[trm_hw] = NULL;
409}