Note for myself to remember how call trace is generated in Kernel
/* * The architecture-independent dump_stack generator */ void dump_stack(void) { unsigned long stack; printk("Pid: %d, comm: %.20s %s %s %.*sn", current->pid, current->comm, print_tainted(), init_utsname()->release, (int)strcspn(init_utsname()->version, " "), init_utsname()->version); show_trace(NULL, NULL, &stack); } void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long *stack) { show_trace_log_lvl(task, regs, stack, ""); } void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, unsigned long *stack, char *log_lvl) { printk("%sCall Trace:n", log_lvl); dump_trace(task, regs, stack, &print_trace_ops, log_lvl); } /* * x86-64 can have up to three kernel stacks: * process stack * interrupt stack * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack */ void dump_trace(struct task_struct *task, struct pt_regs *regs, unsigned long *stack, const struct stacktrace_ops *ops, void *data) { const unsigned cpu = get_cpu(); unsigned long *irq_stack_end = (unsigned long *)per_cpu(irq_stack_ptr, cpu); unsigned used = 0; struct thread_info *tinfo; int graph = 0; unsigned long bp; if (!task) task = current; if (!stack) { unsigned long dummy; stack = &dummy; if (task && task != current) stack = (unsigned long *)task->thread.sp; } bp = stack_frame(task, regs); /* * Print function call entries in all stacks, starting at the * current stack address. If the stacks consist of nested * exceptions */ tinfo = task_thread_info(task); for (;;) { char *id; unsigned long *estack_end; estack_end = in_exception_stack(cpu, (unsigned long)stack, &used, &id); if (estack_end) { if (ops->stack(data, id) walk_stack(tinfo, stack, bp, ops, data, estack_end, &graph); ops->stack(data, ""); /* * We link to the next stack via the * second-to-last pointer (index -2 to end) in the * exception stack: */ stack = (unsigned long *) estack_end[-2]; continue; } if (irq_stack_end) { unsigned long *irq_stack; irq_stack = irq_stack_end - (IRQ_STACK_SIZE - 64) / sizeof(*irq_stack); if (in_irq_stack(stack, irq_stack, irq_stack_end)) { if (ops->stack(data, "IRQ") walk_stack(tinfo, stack, bp, ops, data, irq_stack_end, &graph); /* * We link to the next stack (which would be * the process stack normally) the last * pointer (index -1 to end) in the IRQ stack: */ stack = (unsigned long *) (irq_stack_end[-1]); bp = fixup_bp_irq_link(bp, stack, irq_stack, irq_stack_end); irq_stack_end = NULL; ops->stack(data, "EOI"); continue; } } break; } /* * This handles the process stack: */ bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph); put_cpu(); } static const struct stacktrace_ops print_trace_ops = { .warning = print_trace_warning, .warning_symbol = print_trace_warning_symbol, .stack = print_trace_stack, .address = print_trace_address, .walk_stack = print_context_stack, }; unsigned long print_context_stack(struct thread_info *tinfo, unsigned long *stack, unsigned long bp, const struct stacktrace_ops *ops, void *data, unsigned long *end, int *graph) { struct stack_frame *frame = (struct stack_frame *)bp; while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) { unsigned long addr; addr = *stack; if (__kernel_text_address(addr)) { address(data, addr, 1); next_frame; bp = (unsigned long) frame; } else { ops->address(data, addr, 0); <-- Unreliable address } print_ftrace_graph_addr(addr, data, ops, tinfo, graph); } stack++; } return bp; } /* * Print one address/symbol entries per line. */ static void print_trace_address(void *data, unsigned long addr, int reliable) { touch_nmi_watchdog(); printk(data); printk_address(addr, reliable); } void printk_address(unsigned long address, int reliable) { printk(" [] %s%pSn", (void *) address, reliable ? "" : "? ", (void *) address); }
Leave a Reply