blob: a3f959f9acc37c1c49835be4713d23137a84c368 [file] [log] [blame]
developere5e687d2023-08-08 16:05:33 +08001// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
4 *
5 * Author: Alvin Kuo <alvin.kuog@mediatek.com>
6 * Ren-Ting Wang <ren-ting.wang@mediatek.com>
7 */
8
9#include <linux/debugfs.h>
10#include <linux/err.h>
11#include <linux/io.h>
12#include <linux/mutex.h>
13#include <linux/of.h>
14#include <linux/of_address.h>
15#include <linux/platform_device.h>
16#include <linux/printk.h>
17#include <linux/relay.h>
18#include <linux/types.h>
19
developerfbdb5112023-08-21 15:12:14 +080020#include "internal.h"
developere5e687d2023-08-08 16:05:33 +080021#include "mbox.h"
22#include "mcu.h"
23#include "netsys.h"
24#include "trm-fs.h"
25#include "trm-mcu.h"
26#include "trm.h"
27
28#define TRM_HDR_LEN (sizeof(struct trm_header))
29
30#define RLY_DUMP_SUBBUF_DATA_MAX (RLY_DUMP_SUBBUF_SZ - TRM_HDR_LEN)
31
developerfbdb5112023-08-21 15:12:14 +080032struct tops_runtime_monitor {
33 struct mailbox_dev mgmt_send_mdev;
34 struct mailbox_dev offload_send_mdev[CORE_OFFLOAD_NUM];
35};
36
developere5e687d2023-08-08 16:05:33 +080037struct trm_info {
38 char name[TRM_CONFIG_NAME_MAX_LEN];
39 u64 dump_time;
40 u32 start_addr;
41 u32 size;
42 u32 rsn; /* TRM_RSN_* */
43};
44
45struct trm_header {
46 struct trm_info info;
47 u32 data_offset;
48 u32 data_len;
49 u8 last_frag;
50};
51
52struct device *trm_dev;
53
developerfbdb5112023-08-21 15:12:14 +080054static struct tops_runtime_monitor trm = {
55 .mgmt_send_mdev = MBOX_SEND_MGMT_DEV(TRM),
56 .offload_send_mdev = {
57 [CORE_OFFLOAD_0] = MBOX_SEND_OFFLOAD_DEV(0, TRM),
58 [CORE_OFFLOAD_1] = MBOX_SEND_OFFLOAD_DEV(1, TRM),
59 [CORE_OFFLOAD_2] = MBOX_SEND_OFFLOAD_DEV(2, TRM),
60 [CORE_OFFLOAD_3] = MBOX_SEND_OFFLOAD_DEV(3, TRM),
61 },
62};
developere5e687d2023-08-08 16:05:33 +080063static struct trm_hw_config *trm_hw_configs[__TRM_HARDWARE_MAX];
64struct mutex trm_lock;
65
66static inline void trm_hdr_init(struct trm_header *trm_hdr,
67 struct trm_config *trm_cfg,
68 u32 size,
69 u64 dump_time,
70 u32 dump_rsn)
71{
72 if (unlikely(!trm_hdr || !trm_cfg))
73 return;
74
75 memset(trm_hdr, 0, TRM_HDR_LEN);
76
77 strncpy(trm_hdr->info.name, trm_cfg->name, TRM_CONFIG_NAME_MAX_LEN);
78 trm_hdr->info.start_addr = trm_cfg->addr + trm_cfg->offset;
79 trm_hdr->info.size = size;
80 trm_hdr->info.dump_time = dump_time;
81 trm_hdr->info.rsn = dump_rsn;
82}
83
84static inline int trm_cfg_sanity_check(struct trm_config *trm_cfg)
85{
86 u32 start = trm_cfg->addr + trm_cfg->offset;
87 u32 end = start + trm_cfg->size;
88
89 if (start < trm_cfg->addr || end > trm_cfg->addr + trm_cfg->len)
90 return -1;
91
92 return 0;
93}
94
95static inline bool trm_cfg_is_core_dump_en(struct trm_config *trm_cfg)
96{
97 return trm_cfg->flag & TRM_CONFIG_F_CORE_DUMP;
98}
99
100static inline bool trm_cfg_is_en(struct trm_config *trm_cfg)
101{
102 return trm_cfg->flag & TRM_CONFIG_F_ENABLE;
103}
104
105static inline int __mtk_trm_cfg_setup(struct trm_config *trm_cfg,
106 u32 offset, u32 size, u8 enable)
107{
108 struct trm_config tmp = { 0 };
109
110 if (!enable) {
111 trm_cfg->flag &= ~TRM_CONFIG_F_ENABLE;
112 } else {
113 tmp.addr = trm_cfg->addr;
114 tmp.len = trm_cfg->len;
115 tmp.offset = offset;
116 tmp.size = size;
117
118 if (trm_cfg_sanity_check(&tmp))
119 return -EINVAL;
120
121 trm_cfg->offset = offset;
122 trm_cfg->size = size;
123 trm_cfg->flag |= TRM_CONFIG_F_ENABLE;
124 }
125
126 return 0;
127}
128
129int mtk_trm_cfg_setup(char *name, u32 offset, u32 size, u8 enable)
130{
131 struct trm_hw_config *trm_hw_cfg;
132 struct trm_config *trm_cfg;
133 int ret = 0;
134 u32 i, j;
135
136 for (i = 0; i < __TRM_HARDWARE_MAX; i++) {
137 trm_hw_cfg = trm_hw_configs[i];
developer0b3c7712023-08-24 16:23:03 +0800138 if (unlikely(!trm_hw_cfg || !trm_hw_cfg->trm_cfgs))
developere5e687d2023-08-08 16:05:33 +0800139 continue;
140
141 for (j = 0; j < trm_hw_cfg->cfg_len; j++) {
142 trm_cfg = &trm_hw_cfg->trm_cfgs[j];
developere5e687d2023-08-08 16:05:33 +0800143
144 if (!strncmp(trm_cfg->name, name, strlen(name))) {
145 mutex_lock(&trm_lock);
146
147 ret = __mtk_trm_cfg_setup(trm_cfg,
148 offset,
149 size,
150 enable);
151
152 mutex_unlock(&trm_lock);
153 }
154 }
155 }
156
157 return ret;
158}
159
160/* append core dump(via ocd) in bottom of core-x-dtcm file */
161static inline void __mtk_trm_save_core_dump(struct trm_config *trm_cfg,
162 void *dst,
163 u32 *frag_len)
164{
165 *frag_len -= CORE_DUMP_FRAME_LEN;
166 memcpy(dst + *frag_len, &cd_frams[trm_cfg->core], CORE_DUMP_FRAME_LEN);
167}
168
169static int __mtk_trm_dump(struct trm_hw_config *trm_hw_cfg,
170 struct trm_config *trm_cfg,
171 u64 dump_time,
172 u32 dump_rsn)
173{
174 struct trm_header trm_hdr;
175 u32 total = trm_cfg->size;
176 u32 i = 0;
177 u32 frag_len;
178 u32 ofs;
179 void *dst;
180
181 /* reserve core dump frame len if core dump enabled */
182 if (trm_cfg_is_core_dump_en(trm_cfg))
183 total += CORE_DUMP_FRAME_LEN;
184
185 /* fill in trm inforamtion */
186 trm_hdr_init(&trm_hdr, trm_cfg, total, dump_time, dump_rsn);
187
188 while (total > 0) {
189 if (total >= RLY_DUMP_SUBBUF_DATA_MAX) {
190 frag_len = RLY_DUMP_SUBBUF_DATA_MAX;
191 total -= RLY_DUMP_SUBBUF_DATA_MAX;
192 } else {
193 frag_len = total;
developer0b3c7712023-08-24 16:23:03 +0800194 total = 0;
developere5e687d2023-08-08 16:05:33 +0800195 trm_hdr.last_frag = true;
196 }
197
198 trm_hdr.data_offset = i++ * RLY_DUMP_SUBBUF_DATA_MAX;
199 trm_hdr.data_len = frag_len;
200
201 dst = mtk_trm_fs_relay_reserve(frag_len + TRM_HDR_LEN);
202 if (IS_ERR(dst))
203 return PTR_ERR(dst);
204
205 memcpy(dst, &trm_hdr, TRM_HDR_LEN);
206 dst += TRM_HDR_LEN;
207
208 /* TODO: what if core dump is being cut between 2 fragment? */
209 if (trm_hdr.last_frag && trm_cfg_is_core_dump_en(trm_cfg))
210 __mtk_trm_save_core_dump(trm_cfg, dst, &frag_len);
211
212 ofs = trm_hdr.info.start_addr + trm_hdr.data_offset;
213
214 /* let TRM HW write memory to destination */
215 trm_hw_cfg->trm_hw_dump(dst, ofs, frag_len);
216
217 mtk_trm_fs_relay_flush();
218 }
219
developerfbdb5112023-08-21 15:12:14 +0800220 return 0;
221}
222
223static void trm_cpu_utilization_ret_handler(void *priv,
224 struct mailbox_msg *msg)
225{
226 u32 *cpu_utilization = priv;
227
228 /*
229 * msg1: ticks of idle task
230 * msg2: ticks of this statistic period
231 */
232 if (msg->msg2 != 0)
233 *cpu_utilization = (msg->msg2 - msg->msg1) * 100U / msg->msg2;
234}
235
236int mtk_trm_cpu_utilization(enum core_id core, u32 *cpu_utilization)
237{
238 struct mailbox_dev *send_mdev;
239 struct mailbox_msg msg;
240 int ret;
241
242 if (core > CORE_MGMT || !cpu_utilization)
243 return -EINVAL;
244
245 if (!mtk_tops_mcu_alive()) {
246 TRM_ERR("mcu not alive\n");
247 return -EAGAIN;
248 }
249
250 memset(&msg, 0, sizeof(struct mailbox_msg));
251 msg.msg1 = TRM_CMD_TYPE_CPU_UTILIZATION;
252
253 *cpu_utilization = 0;
254
255 if (core == CORE_MGMT)
256 send_mdev = &trm.mgmt_send_mdev;
257 else
258 send_mdev = &trm.offload_send_mdev[core];
259
260 ret = mbox_send_msg(send_mdev,
261 &msg,
262 cpu_utilization,
263 trm_cpu_utilization_ret_handler);
264 if (ret) {
265 TRM_ERR("send CPU_UTILIZATION cmd failed(%d)\n", ret);
266 return ret;
267 }
268
developere5e687d2023-08-08 16:05:33 +0800269 return 0;
270}
271
272int mtk_trm_dump(u32 rsn)
273{
274 u64 time = ktime_to_ns(ktime_get_real()) / 1000000000;
275 struct trm_hw_config *trm_hw_cfg;
276 struct trm_config *trm_cfg;
277 int ret = 0;
278 u32 i, j;
279
280 if (!mtk_trm_fs_is_init())
281 return -EINVAL;
282
283 mutex_lock(&trm_lock);
284
285 mtk_trm_mcu_core_dump();
286
287 for (i = 0; i < __TRM_HARDWARE_MAX; i++) {
288 trm_hw_cfg = trm_hw_configs[i];
289 if (unlikely(!trm_hw_cfg || !trm_hw_cfg->trm_hw_dump))
290 continue;
291
292 for (j = 0; j < trm_hw_cfg->cfg_len; j++) {
293 trm_cfg = &trm_hw_cfg->trm_cfgs[j];
294 if (unlikely(!trm_cfg || !trm_cfg_is_en(trm_cfg)))
295 continue;
296
297 if (unlikely(trm_cfg_sanity_check(trm_cfg))) {
298 TRM_ERR("trm %s: sanity check fail\n", trm_cfg->name);
299 ret = -EINVAL;
300 goto out;
301 }
302
303 ret = __mtk_trm_dump(trm_hw_cfg, trm_cfg, time, rsn);
304 if (ret) {
305 TRM_ERR("trm %s: trm dump fail: %d\n",
306 trm_cfg->name, ret);
307 goto out;
308 }
309 }
310 }
311
312 TRM_NOTICE("TOPS runtime monitor dump\n");
313
314out:
315 mutex_unlock(&trm_lock);
316
317 return ret;
318}
319
developerfbdb5112023-08-21 15:12:14 +0800320static int mtk_tops_trm_register_mbox(void)
321{
322 int ret;
323 int i;
324
325 ret = register_mbox_dev(MBOX_SEND, &trm.mgmt_send_mdev);
326 if (ret) {
327 TRM_ERR("register trm mgmt mbox send failed: %d\n", ret);
328 return ret;
329 }
330
331 for (i = 0; i < CORE_OFFLOAD_NUM; i++) {
332 ret = register_mbox_dev(MBOX_SEND, &trm.offload_send_mdev[i]);
333 if (ret) {
334 TRM_ERR("register trm offload %d mbox send failed: %d\n",
335 i, ret);
336 goto err_unregister_offload_mbox;
337 }
338 }
339
340 return ret;
341
342err_unregister_offload_mbox:
343 for (i -= 1; i >= 0; i--)
344 unregister_mbox_dev(MBOX_SEND, &trm.offload_send_mdev[i]);
345
346 unregister_mbox_dev(MBOX_SEND, &trm.mgmt_send_mdev);
347
348 return ret;
349}
350
351static void mtk_tops_trm_unregister_mbox(void)
352{
353 int i;
354
355 unregister_mbox_dev(MBOX_SEND, &trm.mgmt_send_mdev);
356
357 for (i = 0; i < CORE_OFFLOAD_NUM; i++)
358 unregister_mbox_dev(MBOX_SEND, &trm.offload_send_mdev[i]);
359}
360
developere5e687d2023-08-08 16:05:33 +0800361int __init mtk_tops_trm_init(void)
362{
developerfbdb5112023-08-21 15:12:14 +0800363 int ret;
364
developere5e687d2023-08-08 16:05:33 +0800365 mutex_init(&trm_lock);
366
developerfbdb5112023-08-21 15:12:14 +0800367 ret = mtk_tops_trm_register_mbox();
368 if (ret)
369 return ret;
370
developere5e687d2023-08-08 16:05:33 +0800371 return mtk_tops_trm_mcu_init();
372}
373
374void __exit mtk_tops_trm_exit(void)
375{
developerfbdb5112023-08-21 15:12:14 +0800376 mtk_tops_trm_unregister_mbox();
377
developere5e687d2023-08-08 16:05:33 +0800378 mtk_tops_trm_mcu_exit();
379}
380
381int mtk_trm_hw_config_register(enum trm_hardware trm_hw,
382 struct trm_hw_config *trm_hw_cfg)
383{
384 if (unlikely(trm_hw >= __TRM_HARDWARE_MAX || !trm_hw_cfg))
385 return -ENODEV;
386
387 if (unlikely(!trm_hw_cfg->cfg_len || !trm_hw_cfg->trm_hw_dump))
388 return -EINVAL;
389
390 if (trm_hw_configs[trm_hw])
391 return -EBUSY;
392
393 trm_hw_configs[trm_hw] = trm_hw_cfg;
394
395 return 0;
396}
397
398void mtk_trm_hw_config_unregister(enum trm_hardware trm_hw,
399 struct trm_hw_config *trm_hw_cfg)
400{
401 if (unlikely(trm_hw >= __TRM_HARDWARE_MAX || !trm_hw_cfg))
402 return;
403
404 if (trm_hw_configs[trm_hw] != trm_hw_cfg)
405 return;
406
407 trm_hw_configs[trm_hw] = NULL;
408}