2 * Kernel Probes Jump Optimization (Optprobes)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2002, 2004
19 * Copyright (C) Hitachi Ltd., 2012
21 #include <linux/kprobes.h>
22 #include <linux/ptrace.h>
23 #include <linux/string.h>
24 #include <linux/slab.h>
25 #include <linux/hardirq.h>
26 #include <linux/preempt.h>
27 #include <linux/extable.h>
28 #include <linux/kdebug.h>
29 #include <linux/kallsyms.h>
30 #include <linux/ftrace.h>
32 #include <asm/text-patching.h>
33 #include <asm/cacheflush.h>
35 #include <asm/pgtable.h>
36 #include <linux/uaccess.h>
37 #include <asm/alternative.h>
39 #include <asm/debugreg.h>
40 #include <asm/set_memory.h>
44 unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
46 struct optimized_kprobe *op;
51 for (i = 0; i < RELATIVEJUMP_SIZE; i++) {
52 kp = get_kprobe((void *)addr - i);
53 /* This function only handles jump-optimized kprobe */
54 if (kp && kprobe_optimized(kp)) {
55 op = container_of(kp, struct optimized_kprobe, kp);
56 /* If op->list is not empty, op is under optimizing */
57 if (list_empty(&op->list))
65 * If the kprobe can be optimized, original bytes which can be
66 * overwritten by jump destination address. In this case, original
67 * bytes must be recovered from op->optinsn.copied_insn buffer.
69 if (probe_kernel_read(buf, (void *)addr,
70 MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
73 if (addr == (unsigned long)kp->addr) {
75 memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
77 offs = addr - (unsigned long)kp->addr - 1;
78 memcpy(buf, op->optinsn.copied_insn + offs, RELATIVE_ADDR_SIZE - offs);
81 return (unsigned long)buf;
84 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
85 static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
93 *(unsigned long *)addr = val;
97 ".global optprobe_template_entry\n"
98 "optprobe_template_entry:\n"
100 /* We don't bother saving the ss register */
105 ".global optprobe_template_val\n"
106 "optprobe_template_val:\n"
109 ".global optprobe_template_call\n"
110 "optprobe_template_call:\n"
112 /* Move flags to rsp */
113 " movq 144(%rsp), %rdx\n"
114 " movq %rdx, 152(%rsp)\n"
116 /* Skip flags entry */
119 #else /* CONFIG_X86_32 */
123 ".global optprobe_template_val\n"
124 "optprobe_template_val:\n"
126 ".global optprobe_template_call\n"
127 "optprobe_template_call:\n"
130 " addl $4, %esp\n" /* skip cs */
133 ".global optprobe_template_end\n"
134 "optprobe_template_end:\n");
136 #define TMPL_MOVE_IDX \
137 ((long)&optprobe_template_val - (long)&optprobe_template_entry)
138 #define TMPL_CALL_IDX \
139 ((long)&optprobe_template_call - (long)&optprobe_template_entry)
140 #define TMPL_END_IDX \
141 ((long)&optprobe_template_end - (long)&optprobe_template_entry)
143 #define INT3_SIZE sizeof(kprobe_opcode_t)
145 /* Optimized kprobe call back function: called from optinsn */
147 optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
149 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
152 /* This is possible if op is under delayed unoptimizing */
153 if (kprobe_disabled(&op->kp))
156 local_irq_save(flags);
157 if (kprobe_running()) {
158 kprobes_inc_nmissed_count(&op->kp);
160 /* Save skipped registers */
162 regs->cs = __KERNEL_CS;
164 regs->cs = __KERNEL_CS | get_kernel_rpl();
167 regs->ip = (unsigned long)op->kp.addr + INT3_SIZE;
168 regs->orig_ax = ~0UL;
170 __this_cpu_write(current_kprobe, &op->kp);
171 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
172 opt_pre_handler(&op->kp, regs);
173 __this_cpu_write(current_kprobe, NULL);
175 local_irq_restore(flags);
177 NOKPROBE_SYMBOL(optimized_callback);
179 static int copy_optimized_instructions(u8 *dest, u8 *src)
184 while (len < RELATIVEJUMP_SIZE) {
185 ret = __copy_instruction(dest + len, src + len, &insn);
186 if (!ret || !can_boost(&insn, src + len))
190 /* Check whether the address range is reserved */
191 if (ftrace_text_reserved(src, src + len - 1) ||
192 alternatives_text_reserved(src, src + len - 1) ||
193 jump_label_text_reserved(src, src + len - 1))
199 /* Check whether insn is indirect jump */
200 static int insn_is_indirect_jump(struct insn *insn)
202 return ((insn->opcode.bytes[0] == 0xff &&
203 (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
204 insn->opcode.bytes[0] == 0xea); /* Segment based jump */
207 /* Check whether insn jumps into specified address range */
208 static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
210 unsigned long target = 0;
212 switch (insn->opcode.bytes[0]) {
213 case 0xe0: /* loopne */
214 case 0xe1: /* loope */
215 case 0xe2: /* loop */
216 case 0xe3: /* jcxz */
217 case 0xe9: /* near relative jump */
218 case 0xeb: /* short relative jump */
221 if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */
225 if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */
229 target = (unsigned long)insn->next_byte + insn->immediate.value;
231 return (start <= target && target <= start + len);
234 /* Decode whole function to ensure any instructions don't jump into target */
235 static int can_optimize(unsigned long paddr)
237 unsigned long addr, size = 0, offset = 0;
239 kprobe_opcode_t buf[MAX_INSN_SIZE];
241 /* Lookup symbol including addr */
242 if (!kallsyms_lookup_size_offset(paddr, &size, &offset))
246 * Do not optimize in the entry code due to the unstable
249 if ((paddr >= (unsigned long)__entry_text_start) &&
250 (paddr < (unsigned long)__entry_text_end))
253 /* Check there is enough space for a relative jump. */
254 if (size - offset < RELATIVEJUMP_SIZE)
257 /* Decode instructions */
258 addr = paddr - offset;
259 while (addr < paddr - offset + size) { /* Decode until function end */
260 unsigned long recovered_insn;
261 if (search_exception_tables(addr))
263 * Since some fixup code will jumps into this function,
264 * we can't optimize kprobe in this function.
267 recovered_insn = recover_probed_instruction(buf, addr);
270 kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
271 insn_get_length(&insn);
272 /* Another subsystem puts a breakpoint */
273 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
275 /* Recover address */
276 insn.kaddr = (void *)addr;
277 insn.next_byte = (void *)(addr + insn.length);
278 /* Check any instructions don't jump into target */
279 if (insn_is_indirect_jump(&insn) ||
280 insn_jump_into_range(&insn, paddr + INT3_SIZE,
289 /* Check optimized_kprobe can actually be optimized. */
290 int arch_check_optimized_kprobe(struct optimized_kprobe *op)
295 for (i = 1; i < op->optinsn.size; i++) {
296 p = get_kprobe(op->kp.addr + i);
297 if (p && !kprobe_disabled(p))
304 /* Check the addr is within the optimized instructions. */
305 int arch_within_optimized_kprobe(struct optimized_kprobe *op,
308 return ((unsigned long)op->kp.addr <= addr &&
309 (unsigned long)op->kp.addr + op->optinsn.size > addr);
312 /* Free optimized instruction slot */
314 void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
316 if (op->optinsn.insn) {
317 free_optinsn_slot(op->optinsn.insn, dirty);
318 op->optinsn.insn = NULL;
319 op->optinsn.size = 0;
323 void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
325 __arch_remove_optimized_kprobe(op, 1);
329 * Copy replacing target instructions
330 * Target instructions MUST be relocatable (checked inside)
331 * This is called when new aggr(opt)probe is allocated or reused.
333 int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
334 struct kprobe *__unused)
340 if (!can_optimize((unsigned long)op->kp.addr))
343 op->optinsn.insn = get_optinsn_slot();
344 if (!op->optinsn.insn)
348 * Verify if the address gap is in 2GB range, because this uses
351 rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
352 if (abs(rel) > 0x7fffffff) {
353 __arch_remove_optimized_kprobe(op, 0);
357 buf = (u8 *)op->optinsn.insn;
358 set_memory_rw((unsigned long)buf & PAGE_MASK, 1);
360 /* Copy instructions into the out-of-line buffer */
361 ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr);
363 __arch_remove_optimized_kprobe(op, 0);
366 op->optinsn.size = ret;
368 /* Copy arch-dep-instance from template */
369 memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
371 /* Set probe information */
372 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
374 /* Set probe function call */
375 synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
377 /* Set returning jmp instruction at the tail of out-of-line buffer */
378 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
379 (u8 *)op->kp.addr + op->optinsn.size);
381 set_memory_ro((unsigned long)buf & PAGE_MASK, 1);
383 flush_icache_range((unsigned long) buf,
384 (unsigned long) buf + TMPL_END_IDX +
385 op->optinsn.size + RELATIVEJUMP_SIZE);
390 * Replace breakpoints (int3) with relative jumps.
391 * Caller must call with locking kprobe_mutex and text_mutex.
393 void arch_optimize_kprobes(struct list_head *oplist)
395 struct optimized_kprobe *op, *tmp;
396 u8 insn_buf[RELATIVEJUMP_SIZE];
398 list_for_each_entry_safe(op, tmp, oplist, list) {
399 s32 rel = (s32)((long)op->optinsn.insn -
400 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
402 WARN_ON(kprobe_disabled(&op->kp));
404 /* Backup instructions which will be replaced by jump address */
405 memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
408 insn_buf[0] = RELATIVEJUMP_OPCODE;
409 *(s32 *)(&insn_buf[1]) = rel;
411 text_poke_bp(op->kp.addr, insn_buf, RELATIVEJUMP_SIZE,
414 list_del_init(&op->list);
418 /* Replace a relative jump with a breakpoint (int3). */
419 void arch_unoptimize_kprobe(struct optimized_kprobe *op)
421 u8 insn_buf[RELATIVEJUMP_SIZE];
423 /* Set int3 to first byte for kprobes */
424 insn_buf[0] = BREAKPOINT_INSTRUCTION;
425 memcpy(insn_buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
426 text_poke_bp(op->kp.addr, insn_buf, RELATIVEJUMP_SIZE,
431 * Recover original instructions and breakpoints from relative jumps.
432 * Caller must call with locking kprobe_mutex.
434 extern void arch_unoptimize_kprobes(struct list_head *oplist,
435 struct list_head *done_list)
437 struct optimized_kprobe *op, *tmp;
439 list_for_each_entry_safe(op, tmp, oplist, list) {
440 arch_unoptimize_kprobe(op);
441 list_move(&op->list, done_list);
445 int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
447 struct optimized_kprobe *op;
449 if (p->flags & KPROBE_FLAG_OPTIMIZED) {
450 /* This kprobe is really able to run optimized path. */
451 op = container_of(p, struct optimized_kprobe, kp);
452 /* Detour through copied instructions */
453 regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
455 reset_current_kprobe();
456 preempt_enable_no_resched();
461 NOKPROBE_SYMBOL(setup_detour_execution);