Lines Matching +full:early +full:- +full:to +full:- +full:mid

1 // SPDX-License-Identifier: GPL-2.0-only
20 if (state->task == current && !state->error) { \
59 state->stack_info.type, state->stack_info.next_sp, in unwind_dump()
60 state->stack_mask, state->graph_idx); in unwind_dump()
64 if (get_stack_info(sp, state->task, &stack_info, &visit_mask)) in unwind_dump()
87 int *last = ip_table + num_entries - 1; in __orc_find()
88 int *mid, *found = first; in __orc_find() local
94 * Do a binary range search to find the rightmost duplicate of a given in __orc_find()
100 mid = first + ((last - first) / 2); in __orc_find()
102 if (orc_ip(mid) <= ip) { in __orc_find()
103 found = mid; in __orc_find()
104 first = mid + 1; in __orc_find()
106 last = mid - 1; in __orc_find()
109 return u_table + (found - ip_table); in __orc_find()
118 if (!mod || !mod->arch.orc_unwind || !mod->arch.orc_unwind_ip) in orc_module_find()
120 return __orc_find(mod->arch.orc_unwind_ip, mod->arch.orc_unwind, in orc_module_find()
121 mod->arch.num_orcs, ip); in orc_module_find()
139 * ftrace function that was used to create it, and use that ftrace
152 /* Set tramp_addr to the start of the code copied by the trampoline */ in orc_ftrace_find()
153 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) in orc_ftrace_find()
158 /* Now place tramp_addr to the location within the trampoline ip is at */ in orc_ftrace_find()
159 offset = ip - ops->trampoline; in orc_ftrace_find()
179 * This hardcoded ORC entry for IP==0 allows us to unwind from a NULL function
189 /* Fake frame pointer entry -- used as a fallback for generated code */
195 .bp_offset = -16,
205 /* For non-init vmlinux addresses, use the fast lookup table: */ in orc_find()
209 idx = (ip - LOOKUP_START_IP) / LOOKUP_BLOCK_SIZE; in orc_find()
211 if (unlikely((idx >= lookup_num_blocks-1))) { in orc_find()
228 __start_orc_unwind + start, stop - start, ip); in orc_find()
234 __stop_orc_unwind_ip - __start_orc_unwind_ip, ip); in orc_find()
254 int delta = _b - _a; in orc_sort_swap()
259 *b = tmp - delta; in orc_sort_swap()
262 orc_a = cur_orc_table + (a - cur_orc_ip_table); in orc_sort_swap()
263 orc_b = cur_orc_table + (b - cur_orc_ip_table); in orc_sort_swap()
277 return -1; in orc_sort_cmp()
280 * The "weak" section terminator entries need to always be first in orc_sort_cmp()
281 * to ensure the lookup code skips them in favor of real entries. in orc_sort_cmp()
282 * These terminator entries exist to handle any gaps created by in orc_sort_cmp()
285 orc_a = cur_orc_table + (a - cur_orc_ip_table); in orc_sort_cmp()
286 return orc_a->type == ORC_TYPE_UNDEFINED ? -1 : 1; in orc_sort_cmp()
301 * The 'cur_orc_*' globals allow the orc_sort_swap() callback to in unwind_module_init()
311 mod->arch.orc_unwind_ip = orc_ip; in unwind_module_init()
312 mod->arch.orc_unwind = orc; in unwind_module_init()
313 mod->arch.num_orcs = num_entries; in unwind_module_init()
319 size_t orc_ip_size = (void *)__stop_orc_unwind_ip - (void *)__start_orc_unwind_ip; in unwind_init()
320 size_t orc_size = (void *)__stop_orc_unwind - (void *)__start_orc_unwind; in unwind_init()
335 * It's ready for binary search straight away, no need to sort it. in unwind_init()
339 lookup_num_blocks = orc_lookup_end - orc_lookup; in unwind_init()
340 for (i = 0; i < lookup_num_blocks-1; i++) { in unwind_init()
349 orc_lookup[i] = orc - __start_orc_unwind; in unwind_init()
359 orc_lookup[lookup_num_blocks-1] = orc - __start_orc_unwind; in unwind_init()
369 return __kernel_text_address(state->ip) ? state->ip : 0; in unwind_get_return_address()
378 if (state->regs) in unwind_get_return_address_ptr()
379 return &state->regs->ip; in unwind_get_return_address_ptr()
381 if (state->sp) in unwind_get_return_address_ptr()
382 return (unsigned long *)state->sp - 1; in unwind_get_return_address_ptr()
390 struct stack_info *info = &state->stack_info; in stack_access_ok()
396 return !get_stack_info(addr, state->task, info, &state->stack_mask) && in stack_access_ok()
415 /* x86-32 support will be more complicated due to the &regs->sp hack */ in deref_stack_regs()
421 *ip = READ_ONCE_NOCHECK(regs->ip); in deref_stack_regs()
422 *sp = READ_ONCE_NOCHECK(regs->sp); in deref_stack_regs()
429 struct pt_regs *regs = (void *)addr - IRET_FRAME_OFFSET; in deref_stack_iret_regs()
434 *ip = READ_ONCE_NOCHECK(regs->ip); in deref_stack_iret_regs()
435 *sp = READ_ONCE_NOCHECK(regs->sp); in deref_stack_iret_regs()
440 * If state->regs is non-NULL, and points to a full pt_regs, just get the reg
441 * value from state->regs.
443 * Otherwise, if state->regs just points to IRET regs, and the previous frame
444 * had full regs, it's safe to get the value from the previous regs. This can
445 * happen when early/late IRQ entry code gets interrupted by an NMI.
452 if (!state->regs) in get_reg()
455 if (state->full_regs) { in get_reg()
456 *val = READ_ONCE_NOCHECK(((unsigned long *)state->regs)[reg]); in get_reg()
460 if (state->prev_regs) { in get_reg()
461 *val = READ_ONCE_NOCHECK(((unsigned long *)state->prev_regs)[reg]); in get_reg()
470 unsigned long ip_p, sp, tmp, orig_ip = state->ip, prev_sp = state->sp; in unwind_next_frame()
471 enum stack_type prev_type = state->stack_info.type; in unwind_next_frame()
481 /* End-of-stack check for user tasks: */ in unwind_next_frame()
482 if (state->regs && user_mode(state->regs)) in unwind_next_frame()
488 * For a call frame (as opposed to a signal frame), state->ip points to in unwind_next_frame()
491 * if the call was to a noreturn function. So get the ORC data for the in unwind_next_frame()
494 orc = orc_find(state->signal ? state->ip : state->ip - 1); in unwind_next_frame()
497 * As a fallback, try to assume this code uses a frame pointer. in unwind_next_frame()
503 state->error = true; in unwind_next_frame()
505 if (orc->type == ORC_TYPE_UNDEFINED) in unwind_next_frame()
508 if (orc->type == ORC_TYPE_END_OF_STACK) in unwind_next_frame()
512 state->signal = orc->signal; in unwind_next_frame()
515 switch (orc->sp_reg) { in unwind_next_frame()
517 sp = state->sp + orc->sp_offset; in unwind_next_frame()
521 sp = state->bp + orc->sp_offset; in unwind_next_frame()
525 sp = state->sp; in unwind_next_frame()
530 sp = state->bp + orc->sp_offset; in unwind_next_frame()
537 (void *)state->ip); in unwind_next_frame()
545 (void *)state->ip); in unwind_next_frame()
553 (void *)state->ip); in unwind_next_frame()
561 (void *)state->ip); in unwind_next_frame()
568 orc->sp_reg, (void *)state->ip); in unwind_next_frame()
576 if (orc->sp_reg == ORC_REG_SP_INDIRECT) in unwind_next_frame()
577 sp += orc->sp_offset; in unwind_next_frame()
581 switch (orc->type) { in unwind_next_frame()
583 ip_p = sp - sizeof(long); in unwind_next_frame()
585 if (!deref_stack_reg(state, ip_p, &state->ip)) in unwind_next_frame()
588 state->ip = unwind_recover_ret_addr(state, state->ip, in unwind_next_frame()
590 state->sp = sp; in unwind_next_frame()
591 state->regs = NULL; in unwind_next_frame()
592 state->prev_regs = NULL; in unwind_next_frame()
596 if (!deref_stack_regs(state, sp, &state->ip, &state->sp)) { in unwind_next_frame()
602 * There is a small chance to interrupt at the entry of in unwind_next_frame()
604 * That point is right after the RET to arch_rethook_trampoline() in unwind_next_frame()
607 * (this has to point the address of the stack entry storing in unwind_next_frame()
608 * the modified return address) must be "SP - (a stack entry)" in unwind_next_frame()
611 state->ip = unwind_recover_rethook(state, state->ip, in unwind_next_frame()
612 (unsigned long *)(state->sp - sizeof(long))); in unwind_next_frame()
613 state->regs = (struct pt_regs *)sp; in unwind_next_frame()
614 state->prev_regs = NULL; in unwind_next_frame()
615 state->full_regs = true; in unwind_next_frame()
619 if (!deref_stack_iret_regs(state, sp, &state->ip, &state->sp)) { in unwind_next_frame()
625 state->ip = unwind_recover_rethook(state, state->ip, in unwind_next_frame()
626 (unsigned long *)(state->sp - sizeof(long))); in unwind_next_frame()
628 if (state->full_regs) in unwind_next_frame()
629 state->prev_regs = state->regs; in unwind_next_frame()
630 state->regs = (void *)sp - IRET_FRAME_OFFSET; in unwind_next_frame()
631 state->full_regs = false; in unwind_next_frame()
636 orc->type, (void *)orig_ip); in unwind_next_frame()
641 switch (orc->bp_reg) { in unwind_next_frame()
644 state->bp = tmp; in unwind_next_frame()
648 if (!deref_stack_reg(state, sp + orc->bp_offset, &state->bp)) in unwind_next_frame()
653 if (!deref_stack_reg(state, state->bp + orc->bp_offset, &state->bp)) in unwind_next_frame()
659 orc->bp_reg, (void *)orig_ip); in unwind_next_frame()
663 /* Prevent a recursive loop due to bad ORC data: */ in unwind_next_frame()
664 if (state->stack_info.type == prev_type && in unwind_next_frame()
665 on_stack(&state->stack_info, (void *)state->sp, sizeof(long)) && in unwind_next_frame()
666 state->sp <= prev_sp) { in unwind_next_frame()
676 state->error = true; in unwind_next_frame()
680 state->stack_info.type = STACK_TYPE_UNKNOWN; in unwind_next_frame()
689 state->task = task; in __unwind_start()
695 * Refuse to unwind the stack of a task while it's executing on another in __unwind_start()
697 * checks to prevent it from going off the rails. in __unwind_start()
706 state->ip = regs->ip; in __unwind_start()
707 state->sp = regs->sp; in __unwind_start()
708 state->bp = regs->bp; in __unwind_start()
709 state->regs = regs; in __unwind_start()
710 state->full_regs = true; in __unwind_start()
711 state->signal = true; in __unwind_start()
717 : "=r" (state->ip), "=r" (state->sp), in __unwind_start()
718 "=r" (state->bp)); in __unwind_start()
721 struct inactive_task_frame *frame = (void *)task->thread.sp; in __unwind_start()
723 state->sp = task->thread.sp + sizeof(*frame); in __unwind_start()
724 state->bp = READ_ONCE_NOCHECK(frame->bp); in __unwind_start()
725 state->ip = READ_ONCE_NOCHECK(frame->ret_addr); in __unwind_start()
726 state->signal = (void *)state->ip == ret_from_fork_asm; in __unwind_start()
729 if (get_stack_info((unsigned long *)state->sp, state->task, in __unwind_start()
730 &state->stack_info, &state->stack_mask)) { in __unwind_start()
737 void *next_page = (void *)PAGE_ALIGN((unsigned long)state->sp); in __unwind_start()
738 state->error = true; in __unwind_start()
739 if (get_stack_info(next_page, state->task, &state->stack_info, in __unwind_start()
740 &state->stack_mask)) in __unwind_start()
746 * (first_frame) or indirectly (regs->sp) to indicate which stack frame in __unwind_start()
747 * to start unwinding at. Skip ahead until we reach it. in __unwind_start()
756 /* Otherwise, skip ahead to the user-specified starting frame: */ in __unwind_start()
758 (!on_stack(&state->stack_info, first_frame, sizeof(long)) || in __unwind_start()
759 state->sp <= (unsigned long)first_frame)) in __unwind_start()
765 state->error = true; in __unwind_start()
767 state->stack_info.type = STACK_TYPE_UNKNOWN; in __unwind_start()