blob: 9956442fefe6b422fb5b048f9aa039a0acb44aad [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Simon Glassc44b8002013-06-11 11:14:39 -07002/*
3 * Copyright (c) 2012 The Chromium OS Authors.
Simon Glassc44b8002013-06-11 11:14:39 -07004 */
5
6#include <common.h>
Joe Hershberger65b905b2015-03-22 17:08:59 -05007#include <mapmem.h>
Simon Glassc44b8002013-06-11 11:14:39 -07008#include <trace.h>
9#include <asm/io.h>
10#include <asm/sections.h>
11
12DECLARE_GLOBAL_DATA_PTR;
13
14static char trace_enabled __attribute__((section(".data")));
15static char trace_inited __attribute__((section(".data")));
16
17/* The header block at the start of the trace memory area */
18struct trace_hdr {
19 int func_count; /* Total number of function call sites */
20 u64 call_count; /* Total number of tracked function calls */
21 u64 untracked_count; /* Total number of untracked function calls */
22 int funcs_used; /* Total number of functions used */
23
24 /*
25 * Call count for each function. This is indexed by the word offset
26 * of the function from gd->relocaddr
27 */
28 uintptr_t *call_accum;
29
30 /* Function trace list */
31 struct trace_call *ftrace; /* The function call records */
32 ulong ftrace_size; /* Num. of ftrace records we have space for */
33 ulong ftrace_count; /* Num. of ftrace records written */
34 ulong ftrace_too_deep_count; /* Functions that were too deep */
35
36 int depth;
37 int depth_limit;
38 int max_depth;
39};
40
41static struct trace_hdr *hdr; /* Pointer to start of trace buffer */
42
43static inline uintptr_t __attribute__((no_instrument_function))
44 func_ptr_to_num(void *func_ptr)
45{
46 uintptr_t offset = (uintptr_t)func_ptr;
47
48#ifdef CONFIG_SANDBOX
49 offset -= (uintptr_t)&_init;
50#else
51 if (gd->flags & GD_FLG_RELOC)
52 offset -= gd->relocaddr;
53 else
54 offset -= CONFIG_SYS_TEXT_BASE;
55#endif
56 return offset / FUNC_SITE_SIZE;
57}
58
59static void __attribute__((no_instrument_function)) add_ftrace(void *func_ptr,
60 void *caller, ulong flags)
61{
62 if (hdr->depth > hdr->depth_limit) {
63 hdr->ftrace_too_deep_count++;
64 return;
65 }
66 if (hdr->ftrace_count < hdr->ftrace_size) {
67 struct trace_call *rec = &hdr->ftrace[hdr->ftrace_count];
68
69 rec->func = func_ptr_to_num(func_ptr);
70 rec->caller = func_ptr_to_num(caller);
71 rec->flags = flags | (timer_get_us() & FUNCF_TIMESTAMP_MASK);
72 }
73 hdr->ftrace_count++;
74}
75
76static void __attribute__((no_instrument_function)) add_textbase(void)
77{
78 if (hdr->ftrace_count < hdr->ftrace_size) {
79 struct trace_call *rec = &hdr->ftrace[hdr->ftrace_count];
80
81 rec->func = CONFIG_SYS_TEXT_BASE;
82 rec->caller = 0;
83 rec->flags = FUNCF_TEXTBASE;
84 }
85 hdr->ftrace_count++;
86}
87
88/**
89 * This is called on every function entry
90 *
91 * We add to our tally for this function and add to the list of called
92 * functions.
93 *
94 * @param func_ptr Pointer to function being entered
95 * @param caller Pointer to function which called this function
96 */
97void __attribute__((no_instrument_function)) __cyg_profile_func_enter(
98 void *func_ptr, void *caller)
99{
100 if (trace_enabled) {
101 int func;
102
103 add_ftrace(func_ptr, caller, FUNCF_ENTRY);
104 func = func_ptr_to_num(func_ptr);
105 if (func < hdr->func_count) {
106 hdr->call_accum[func]++;
107 hdr->call_count++;
108 } else {
109 hdr->untracked_count++;
110 }
111 hdr->depth++;
112 if (hdr->depth > hdr->depth_limit)
113 hdr->max_depth = hdr->depth;
114 }
115}
116
117/**
118 * This is called on every function exit
119 *
120 * We do nothing here.
121 *
122 * @param func_ptr Pointer to function being entered
123 * @param caller Pointer to function which called this function
124 */
125void __attribute__((no_instrument_function)) __cyg_profile_func_exit(
126 void *func_ptr, void *caller)
127{
128 if (trace_enabled) {
129 add_ftrace(func_ptr, caller, FUNCF_EXIT);
130 hdr->depth--;
131 }
132}
133
134/**
135 * Produce a list of called functions
136 *
137 * The information is written into the supplied buffer - a header followed
138 * by a list of function records.
139 *
140 * @param buff Buffer to place list into
141 * @param buff_size Size of buffer
142 * @param needed Returns size of buffer needed, which may be
143 * greater than buff_size if we ran out of space.
144 * @return 0 if ok, -1 if space was exhausted
145 */
146int trace_list_functions(void *buff, int buff_size, unsigned int *needed)
147{
148 struct trace_output_hdr *output_hdr = NULL;
149 void *end, *ptr = buff;
150 int func;
151 int upto;
152
153 end = buff ? buff + buff_size : NULL;
154
155 /* Place some header information */
156 if (ptr + sizeof(struct trace_output_hdr) < end)
157 output_hdr = ptr;
158 ptr += sizeof(struct trace_output_hdr);
159
160 /* Add information about each function */
161 for (func = upto = 0; func < hdr->func_count; func++) {
162 int calls = hdr->call_accum[func];
163
164 if (!calls)
165 continue;
166
167 if (ptr + sizeof(struct trace_output_func) < end) {
168 struct trace_output_func *stats = ptr;
169
170 stats->offset = func * FUNC_SITE_SIZE;
171 stats->call_count = calls;
172 upto++;
173 }
174 ptr += sizeof(struct trace_output_func);
175 }
176
177 /* Update the header */
178 if (output_hdr) {
179 output_hdr->rec_count = upto;
180 output_hdr->type = TRACE_CHUNK_FUNCS;
181 }
182
183 /* Work out how must of the buffer we used */
184 *needed = ptr - buff;
185 if (ptr > end)
Simon Glassf710d382019-04-08 13:20:50 -0600186 return -ENOSPC;
187
Simon Glassc44b8002013-06-11 11:14:39 -0700188 return 0;
189}
190
191int trace_list_calls(void *buff, int buff_size, unsigned *needed)
192{
193 struct trace_output_hdr *output_hdr = NULL;
194 void *end, *ptr = buff;
195 int rec, upto;
196 int count;
197
198 end = buff ? buff + buff_size : NULL;
199
200 /* Place some header information */
201 if (ptr + sizeof(struct trace_output_hdr) < end)
202 output_hdr = ptr;
203 ptr += sizeof(struct trace_output_hdr);
204
205 /* Add information about each call */
206 count = hdr->ftrace_count;
207 if (count > hdr->ftrace_size)
208 count = hdr->ftrace_size;
209 for (rec = upto = 0; rec < count; rec++) {
210 if (ptr + sizeof(struct trace_call) < end) {
211 struct trace_call *call = &hdr->ftrace[rec];
212 struct trace_call *out = ptr;
213
214 out->func = call->func * FUNC_SITE_SIZE;
215 out->caller = call->caller * FUNC_SITE_SIZE;
216 out->flags = call->flags;
217 upto++;
218 }
219 ptr += sizeof(struct trace_call);
220 }
221
222 /* Update the header */
223 if (output_hdr) {
224 output_hdr->rec_count = upto;
225 output_hdr->type = TRACE_CHUNK_CALLS;
226 }
227
228 /* Work out how must of the buffer we used */
229 *needed = ptr - buff;
230 if (ptr > end)
Simon Glassf710d382019-04-08 13:20:50 -0600231 return -ENOSPC;
232
Simon Glassc44b8002013-06-11 11:14:39 -0700233 return 0;
234}
235
236/* Print basic information about tracing */
237void trace_print_stats(void)
238{
239 ulong count;
240
241#ifndef FTRACE
242 puts("Warning: make U-Boot with FTRACE to enable function instrumenting.\n");
243 puts("You will likely get zeroed data here\n");
244#endif
245 if (!trace_inited) {
246 printf("Trace is disabled\n");
247 return;
248 }
249 print_grouped_ull(hdr->func_count, 10);
250 puts(" function sites\n");
251 print_grouped_ull(hdr->call_count, 10);
252 puts(" function calls\n");
253 print_grouped_ull(hdr->untracked_count, 10);
254 puts(" untracked function calls\n");
255 count = min(hdr->ftrace_count, hdr->ftrace_size);
256 print_grouped_ull(count, 10);
257 puts(" traced function calls");
258 if (hdr->ftrace_count > hdr->ftrace_size) {
259 printf(" (%lu dropped due to overflow)",
260 hdr->ftrace_count - hdr->ftrace_size);
261 }
262 puts("\n");
263 printf("%15d maximum observed call depth\n", hdr->max_depth);
264 printf("%15d call depth limit\n", hdr->depth_limit);
265 print_grouped_ull(hdr->ftrace_too_deep_count, 10);
266 puts(" calls not traced due to depth\n");
267}
268
269void __attribute__((no_instrument_function)) trace_set_enabled(int enabled)
270{
271 trace_enabled = enabled != 0;
272}
273
274/**
275 * Init the tracing system ready for used, and enable it
276 *
277 * @param buff Pointer to trace buffer
278 * @param buff_size Size of trace buffer
279 */
280int __attribute__((no_instrument_function)) trace_init(void *buff,
281 size_t buff_size)
282{
283 ulong func_count = gd->mon_len / FUNC_SITE_SIZE;
284 size_t needed;
285 int was_disabled = !trace_enabled;
286
287 if (!was_disabled) {
288#ifdef CONFIG_TRACE_EARLY
289 char *end;
290 ulong used;
291
292 /*
293 * Copy over the early trace data if we have it. Disable
294 * tracing while we are doing this.
295 */
296 trace_enabled = 0;
297 hdr = map_sysmem(CONFIG_TRACE_EARLY_ADDR,
298 CONFIG_TRACE_EARLY_SIZE);
Simon Glass445078e2019-04-08 13:20:52 -0600299 end = (char *)&hdr->ftrace[min(hdr->ftrace_count,
300 hdr->ftrace_size)];
Simon Glassc44b8002013-06-11 11:14:39 -0700301 used = end - (char *)hdr;
302 printf("trace: copying %08lx bytes of early data from %x to %08lx\n",
303 used, CONFIG_TRACE_EARLY_ADDR,
304 (ulong)map_to_sysmem(buff));
305 memcpy(buff, hdr, used);
306#else
307 puts("trace: already enabled\n");
Simon Glassf710d382019-04-08 13:20:50 -0600308 return -EALREADY;
Simon Glassc44b8002013-06-11 11:14:39 -0700309#endif
310 }
311 hdr = (struct trace_hdr *)buff;
312 needed = sizeof(*hdr) + func_count * sizeof(uintptr_t);
313 if (needed > buff_size) {
314 printf("trace: buffer size %zd bytes: at least %zd needed\n",
315 buff_size, needed);
Simon Glassf710d382019-04-08 13:20:50 -0600316 return -ENOSPC;
Simon Glassc44b8002013-06-11 11:14:39 -0700317 }
318
319 if (was_disabled)
320 memset(hdr, '\0', needed);
321 hdr->func_count = func_count;
322 hdr->call_accum = (uintptr_t *)(hdr + 1);
323
324 /* Use any remaining space for the timed function trace */
325 hdr->ftrace = (struct trace_call *)(buff + needed);
326 hdr->ftrace_size = (buff_size - needed) / sizeof(*hdr->ftrace);
327 add_textbase();
328
329 puts("trace: enabled\n");
330 hdr->depth_limit = 15;
331 trace_enabled = 1;
332 trace_inited = 1;
Simon Glassf710d382019-04-08 13:20:50 -0600333
Simon Glassc44b8002013-06-11 11:14:39 -0700334 return 0;
335}
336
337#ifdef CONFIG_TRACE_EARLY
338int __attribute__((no_instrument_function)) trace_early_init(void)
339{
340 ulong func_count = gd->mon_len / FUNC_SITE_SIZE;
341 size_t buff_size = CONFIG_TRACE_EARLY_SIZE;
342 size_t needed;
343
344 /* We can ignore additional calls to this function */
345 if (trace_enabled)
346 return 0;
347
348 hdr = map_sysmem(CONFIG_TRACE_EARLY_ADDR, CONFIG_TRACE_EARLY_SIZE);
349 needed = sizeof(*hdr) + func_count * sizeof(uintptr_t);
350 if (needed > buff_size) {
351 printf("trace: buffer size is %zd bytes, at least %zd needed\n",
352 buff_size, needed);
Simon Glassf710d382019-04-08 13:20:50 -0600353 return -ENOSPC;
Simon Glassc44b8002013-06-11 11:14:39 -0700354 }
355
356 memset(hdr, '\0', needed);
357 hdr->call_accum = (uintptr_t *)(hdr + 1);
358 hdr->func_count = func_count;
359
360 /* Use any remaining space for the timed function trace */
361 hdr->ftrace = (struct trace_call *)((char *)hdr + needed);
362 hdr->ftrace_size = (buff_size - needed) / sizeof(*hdr->ftrace);
363 add_textbase();
364 hdr->depth_limit = 200;
365 printf("trace: early enable at %08x\n", CONFIG_TRACE_EARLY_ADDR);
366
367 trace_enabled = 1;
Simon Glassf710d382019-04-08 13:20:50 -0600368
Simon Glassc44b8002013-06-11 11:14:39 -0700369 return 0;
370}
371#endif