]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - kernel/uprobes.c
Merge remote-tracking branch 'kmap_atomic/kmap_atomic'
[karo-tx-linux.git] / kernel / uprobes.c
1 /*
2  * Userspace Probes (UProbes)
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright (C) IBM Corporation, 2008-2011
19  * Authors:
20  *      Srikar Dronamraju
21  *      Jim Keniston
22  */
23
24 #include <linux/kernel.h>
25 #include <linux/highmem.h>
26 #include <linux/pagemap.h>      /* read_mapping_page */
27 #include <linux/slab.h>
28 #include <linux/sched.h>
29 #include <linux/rmap.h>         /* anon_vma_prepare */
30 #include <linux/mmu_notifier.h> /* set_pte_at_notify */
31 #include <linux/swap.h>         /* try_to_free_swap */
32 #include <linux/ptrace.h>       /* user_enable_single_step */
33 #include <linux/kdebug.h>       /* notifier mechanism */
34 #include <linux/uprobes.h>
35
36 #define UINSNS_PER_PAGE (PAGE_SIZE/UPROBES_XOL_SLOT_BYTES)
37 #define MAX_UPROBES_XOL_SLOTS UINSNS_PER_PAGE
38
39 static bulkref_t uprobes_srcu;
40 static struct rb_root uprobes_tree = RB_ROOT;
41 static DEFINE_SPINLOCK(uprobes_treelock);       /* serialize rbtree access */
42
43 #define UPROBES_HASH_SZ 13
44 /* serialize (un)register */
45 static struct mutex uprobes_mutex[UPROBES_HASH_SZ];
46 #define uprobes_hash(v) (&uprobes_mutex[((unsigned long)(v)) %\
47                                                 UPROBES_HASH_SZ])
48
49 /* serialize uprobe->pending_list */
50 static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
51 #define uprobes_mmap_hash(v)    (&uprobes_mmap_mutex[((unsigned long)(v)) %\
52                                                 UPROBES_HASH_SZ])
53
54 /*
55  * uprobe_events allows us to skip the mmap_uprobe if there are no uprobe
56  * events active at this time.  Probably a fine grained per inode count is
57  * better?
58  */
59 static atomic_t uprobe_events = ATOMIC_INIT(0);
60
61 /*
62  * Maintain a temporary per vma info that can be used to search if a vma
63  * has already been handled. This structure is introduced since extending
64  * vm_area_struct wasnt recommended.
65  */
66 struct vma_info {
67         struct list_head probe_list;
68         struct mm_struct *mm;
69         loff_t vaddr;
70 };
71
72 /*
73  * valid_vma: Verify if the specified vma is an executable vma
74  * Relax restrictions while unregistering: vm_flags might have
75  * changed after breakpoint was inserted.
76  *      - is_reg: indicates if we are in register context.
77  *      - Return 1 if the specified virtual address is in an
78  *        executable vma.
79  */
80 static bool valid_vma(struct vm_area_struct *vma, bool is_reg)
81 {
82         if (!vma->vm_file)
83                 return false;
84
85         if (!is_reg)
86                 return true;
87
88         if ((vma->vm_flags & (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)) ==
89                                                 (VM_READ|VM_EXEC))
90                 return true;
91
92         return false;
93 }
94
95 /**
96  * __replace_page - replace page in vma by new page.
97  * based on replace_page in mm/ksm.c
98  *
99  * @vma:      vma that holds the pte pointing to page
100  * @page:     the cowed page we are replacing by kpage
101  * @kpage:    the modified page we replace page by
102  *
103  * Returns 0 on success, -EFAULT on failure.
104  */
105 static int __replace_page(struct vm_area_struct *vma, struct page *page,
106                                         struct page *kpage)
107 {
108         struct mm_struct *mm = vma->vm_mm;
109         pgd_t *pgd;
110         pud_t *pud;
111         pmd_t *pmd;
112         pte_t *ptep;
113         spinlock_t *ptl;
114         unsigned long addr;
115         int err = -EFAULT;
116
117         addr = page_address_in_vma(page, vma);
118         if (addr == -EFAULT)
119                 goto out;
120
121         pgd = pgd_offset(mm, addr);
122         if (!pgd_present(*pgd))
123                 goto out;
124
125         pud = pud_offset(pgd, addr);
126         if (!pud_present(*pud))
127                 goto out;
128
129         pmd = pmd_offset(pud, addr);
130         if (!pmd_present(*pmd))
131                 goto out;
132
133         ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
134         if (!ptep)
135                 goto out;
136
137         get_page(kpage);
138         page_add_new_anon_rmap(kpage, vma, addr);
139
140         flush_cache_page(vma, addr, pte_pfn(*ptep));
141         ptep_clear_flush(vma, addr, ptep);
142         set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot));
143
144         page_remove_rmap(page);
145         if (!page_mapped(page))
146                 try_to_free_swap(page);
147         put_page(page);
148         pte_unmap_unlock(ptep, ptl);
149         err = 0;
150
151 out:
152         return err;
153 }
154
155 /*
156  * NOTE:
157  * Expect the breakpoint instruction to be the smallest size instruction for
158  * the architecture. If an arch has variable length instruction and the
159  * breakpoint instruction is not of the smallest length instruction
160  * supported by that architecture then we need to modify read_opcode /
161  * write_opcode accordingly. This would never be a problem for archs that
162  * have fixed length instructions.
163  */
164
165 /*
166  * write_opcode - write the opcode at a given virtual address.
167  * @mm: the probed process address space.
168  * @uprobe: the breakpointing information.
169  * @vaddr: the virtual address to store the opcode.
170  * @opcode: opcode to be written at @vaddr.
171  *
172  * Called with mm->mmap_sem held (for read and with a reference to
173  * mm).
174  *
175  * For mm @mm, write the opcode at @vaddr.
176  * Return 0 (success) or a negative errno.
177  */
178 static int write_opcode(struct mm_struct *mm, struct uprobe *uprobe,
179                         unsigned long vaddr, uprobe_opcode_t opcode)
180 {
181         struct page *old_page, *new_page;
182         struct address_space *mapping;
183         void *vaddr_old, *vaddr_new;
184         struct vm_area_struct *vma;
185         unsigned long addr;
186         int ret;
187
188         /* Read the page with vaddr into memory */
189         ret = get_user_pages(NULL, mm, vaddr, 1, 0, 0, &old_page, &vma);
190         if (ret <= 0)
191                 return ret;
192         ret = -EINVAL;
193
194         /*
195          * We are interested in text pages only. Our pages of interest
196          * should be mapped for read and execute only. We desist from
197          * adding probes in write mapped pages since the breakpoints
198          * might end up in the file copy.
199          */
200         if (!valid_vma(vma, opcode == UPROBES_BKPT_INSN))
201                 goto put_out;
202
203         mapping = uprobe->inode->i_mapping;
204         if (mapping != vma->vm_file->f_mapping)
205                 goto put_out;
206
207         addr = vma->vm_start + uprobe->offset;
208         addr -= vma->vm_pgoff << PAGE_SHIFT;
209         if (vaddr != (unsigned long)addr)
210                 goto put_out;
211
212         ret = -ENOMEM;
213         new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
214         if (!new_page)
215                 goto put_out;
216
217         __SetPageUptodate(new_page);
218
219         /*
220          * lock page will serialize against do_wp_page()'s
221          * PageAnon() handling
222          */
223         lock_page(old_page);
224         /* copy the page now that we've got it stable */
225         vaddr_old = kmap_atomic(old_page);
226         vaddr_new = kmap_atomic(new_page);
227
228         memcpy(vaddr_new, vaddr_old, PAGE_SIZE);
229         /* poke the new insn in, ASSUMES we don't cross page boundary */
230         vaddr &= ~PAGE_MASK;
231         memcpy(vaddr_new + vaddr, &opcode, uprobe_opcode_sz);
232
233         kunmap_atomic(vaddr_new);
234         kunmap_atomic(vaddr_old);
235
236         ret = anon_vma_prepare(vma);
237         if (ret)
238                 goto unlock_out;
239
240         lock_page(new_page);
241         ret = __replace_page(vma, old_page, new_page);
242         unlock_page(new_page);
243
244 unlock_out:
245         unlock_page(old_page);
246         page_cache_release(new_page);
247
248 put_out:
249         put_page(old_page);     /* we did a get_page in the beginning */
250         return ret;
251 }
252
253 /**
254  * read_opcode - read the opcode at a given virtual address.
255  * @mm: the probed process address space.
256  * @vaddr: the virtual address to read the opcode.
257  * @opcode: location to store the read opcode.
258  *
259  * Called with mm->mmap_sem held (for read and with a reference to
260  * mm.
261  *
262  * For mm @mm, read the opcode at @vaddr and store it in @opcode.
263  * Return 0 (success) or a negative errno.
264  */
265 static int read_opcode(struct mm_struct *mm, unsigned long vaddr,
266                                                 uprobe_opcode_t *opcode)
267 {
268         struct page *page;
269         void *vaddr_new;
270         int ret;
271
272         ret = get_user_pages(NULL, mm, vaddr, 1, 0, 0, &page, NULL);
273         if (ret <= 0)
274                 return ret;
275
276         lock_page(page);
277         vaddr_new = kmap_atomic(page);
278         vaddr &= ~PAGE_MASK;
279         memcpy(opcode, vaddr_new + vaddr, uprobe_opcode_sz);
280         kunmap_atomic(vaddr_new);
281         unlock_page(page);
282         put_page(page);         /* we did a get_user_pages in the beginning */
283         return 0;
284 }
285
286 /**
287  * set_bkpt - store breakpoint at a given address.
288  * @mm: the probed process address space.
289  * @uprobe: the probepoint information.
290  * @vaddr: the virtual address to insert the opcode.
291  *
292  * For mm @mm, store the breakpoint instruction at @vaddr.
293  * Return 0 (success) or a negative errno.
294  */
295 int __weak set_bkpt(struct mm_struct *mm, struct uprobe *uprobe,
296                                                 unsigned long vaddr)
297 {
298         return write_opcode(mm, uprobe, vaddr, UPROBES_BKPT_INSN);
299 }
300
301 /**
302  * set_orig_insn - Restore the original instruction.
303  * @mm: the probed process address space.
304  * @uprobe: the probepoint information.
305  * @vaddr: the virtual address to insert the opcode.
306  * @verify: if true, verify existance of breakpoint instruction.
307  *
308  * For mm @mm, restore the original opcode (opcode) at @vaddr.
309  * Return 0 (success) or a negative errno.
310  */
311 int __weak set_orig_insn(struct mm_struct *mm, struct uprobe *uprobe,
312                                         unsigned long vaddr, bool verify)
313 {
314         if (verify) {
315                 uprobe_opcode_t opcode;
316                 int result = read_opcode(mm, vaddr, &opcode);
317
318                 if (result)
319                         return result;
320
321                 if (opcode != UPROBES_BKPT_INSN)
322                         return -EINVAL;
323         }
324         return write_opcode(mm, uprobe, vaddr,
325                                 *(uprobe_opcode_t *)uprobe->insn);
326 }
327
328 /**
329  * is_bkpt_insn - check if instruction is breakpoint instruction.
330  * @insn: instruction to be checked.
331  * Default implementation of is_bkpt_insn
332  * Returns true if @insn is a breakpoint instruction.
333  */
334 bool __weak is_bkpt_insn(u8 *insn)
335 {
336         return (insn[0] == UPROBES_BKPT_INSN);
337 }
338
339 static int match_uprobe(struct uprobe *l, struct uprobe *r)
340 {
341         if (l->inode < r->inode)
342                 return -1;
343         if (l->inode > r->inode)
344                 return 1;
345         else {
346                 if (l->offset < r->offset)
347                         return -1;
348
349                 if (l->offset > r->offset)
350                         return 1;
351         }
352
353         return 0;
354 }
355
356 static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset)
357 {
358         struct uprobe u = { .inode = inode, .offset = offset };
359         struct rb_node *n = uprobes_tree.rb_node;
360         struct uprobe *uprobe;
361         int match;
362
363         while (n) {
364                 uprobe = rb_entry(n, struct uprobe, rb_node);
365                 match = match_uprobe(&u, uprobe);
366                 if (!match) {
367                         atomic_inc(&uprobe->ref);
368                         return uprobe;
369                 }
370                 if (match < 0)
371                         n = n->rb_left;
372                 else
373                         n = n->rb_right;
374         }
375         return NULL;
376 }
377
378 /*
379  * Find a uprobe corresponding to a given inode:offset
380  * Acquires uprobes_treelock
381  */
382 static struct uprobe *find_uprobe(struct inode *inode, loff_t offset)
383 {
384         struct uprobe *uprobe;
385         unsigned long flags;
386
387         spin_lock_irqsave(&uprobes_treelock, flags);
388         uprobe = __find_uprobe(inode, offset);
389         spin_unlock_irqrestore(&uprobes_treelock, flags);
390         return uprobe;
391 }
392
393 static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
394 {
395         struct rb_node **p = &uprobes_tree.rb_node;
396         struct rb_node *parent = NULL;
397         struct uprobe *u;
398         int match;
399
400         while (*p) {
401                 parent = *p;
402                 u = rb_entry(parent, struct uprobe, rb_node);
403                 match = match_uprobe(uprobe, u);
404                 if (!match) {
405                         atomic_inc(&u->ref);
406                         return u;
407                 }
408
409                 if (match < 0)
410                         p = &parent->rb_left;
411                 else
412                         p = &parent->rb_right;
413
414         }
415         u = NULL;
416         rb_link_node(&uprobe->rb_node, parent, p);
417         rb_insert_color(&uprobe->rb_node, &uprobes_tree);
418         /* get access + creation ref */
419         atomic_set(&uprobe->ref, 2);
420         return u;
421 }
422
423 /*
424  * Acquires uprobes_treelock.
425  * Matching uprobe already exists in rbtree;
426  *      increment (access refcount) and return the matching uprobe.
427  *
428  * No matching uprobe; insert the uprobe in rb_tree;
429  *      get a double refcount (access + creation) and return NULL.
430  */
431 static struct uprobe *insert_uprobe(struct uprobe *uprobe)
432 {
433         unsigned long flags;
434         struct uprobe *u;
435
436         spin_lock_irqsave(&uprobes_treelock, flags);
437         u = __insert_uprobe(uprobe);
438         spin_unlock_irqrestore(&uprobes_treelock, flags);
439
440         /* For now assume that the instruction need not be single-stepped */
441         uprobe->flags |= UPROBES_SKIP_SSTEP;
442         return u;
443 }
444
445 static void put_uprobe(struct uprobe *uprobe)
446 {
447         if (atomic_dec_and_test(&uprobe->ref))
448                 kfree(uprobe);
449 }
450
451 static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
452 {
453         struct uprobe *uprobe, *cur_uprobe;
454
455         uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL);
456         if (!uprobe)
457                 return NULL;
458
459         uprobe->inode = igrab(inode);
460         uprobe->offset = offset;
461         init_rwsem(&uprobe->consumer_rwsem);
462         INIT_LIST_HEAD(&uprobe->pending_list);
463
464         /* add to uprobes_tree, sorted on inode:offset */
465         cur_uprobe = insert_uprobe(uprobe);
466
467         /* a uprobe exists for this inode:offset combination */
468         if (cur_uprobe) {
469                 kfree(uprobe);
470                 uprobe = cur_uprobe;
471                 iput(inode);
472         } else
473                 atomic_inc(&uprobe_events);
474         return uprobe;
475 }
476
477 static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
478 {
479         struct uprobe_consumer *consumer;
480
481         if (!(uprobe->flags & UPROBES_RUN_HANDLER))
482                 return;
483
484         down_read(&uprobe->consumer_rwsem);
485         consumer = uprobe->consumers;
486         for (consumer = uprobe->consumers; consumer;
487                                         consumer = consumer->next) {
488                 if (!consumer->filter ||
489                                 consumer->filter(consumer, current))
490                         consumer->handler(consumer, regs);
491         }
492         up_read(&uprobe->consumer_rwsem);
493 }
494
495 /* Returns the previous consumer */
496 static struct uprobe_consumer *add_consumer(struct uprobe *uprobe,
497                                 struct uprobe_consumer *consumer)
498 {
499         down_write(&uprobe->consumer_rwsem);
500         consumer->next = uprobe->consumers;
501         uprobe->consumers = consumer;
502         up_write(&uprobe->consumer_rwsem);
503         return consumer->next;
504 }
505
506 /*
507  * For uprobe @uprobe, delete the consumer @consumer.
508  * Return true if the @consumer is deleted successfully
509  * or return false.
510  */
511 static bool del_consumer(struct uprobe *uprobe,
512                                 struct uprobe_consumer *consumer)
513 {
514         struct uprobe_consumer **con;
515         bool ret = false;
516
517         down_write(&uprobe->consumer_rwsem);
518         for (con = &uprobe->consumers; *con; con = &(*con)->next) {
519                 if (*con == consumer) {
520                         *con = consumer->next;
521                         ret = true;
522                         break;
523                 }
524         }
525         up_write(&uprobe->consumer_rwsem);
526         return ret;
527 }
528
529 static int __copy_insn(struct address_space *mapping,
530                         struct vm_area_struct *vma, char *insn,
531                         unsigned long nbytes, unsigned long offset)
532 {
533         struct file *filp = vma->vm_file;
534         struct page *page;
535         void *vaddr;
536         unsigned long off1;
537         unsigned long idx;
538
539         if (!filp)
540                 return -EINVAL;
541
542         idx = (unsigned long)(offset >> PAGE_CACHE_SHIFT);
543         off1 = offset &= ~PAGE_MASK;
544
545         /*
546          * Ensure that the page that has the original instruction is
547          * populated and in page-cache.
548          */
549         page = read_mapping_page(mapping, idx, filp);
550         if (IS_ERR(page))
551                 return -ENOMEM;
552
553         vaddr = kmap_atomic(page);
554         memcpy(insn, vaddr + off1, nbytes);
555         kunmap_atomic(vaddr);
556         page_cache_release(page);
557         return 0;
558 }
559
560 static int copy_insn(struct uprobe *uprobe, struct vm_area_struct *vma,
561                                         unsigned long addr)
562 {
563         struct address_space *mapping;
564         int bytes;
565         unsigned long nbytes;
566
567         addr &= ~PAGE_MASK;
568         nbytes = PAGE_SIZE - addr;
569         mapping = uprobe->inode->i_mapping;
570
571         /* Instruction at end of binary; copy only available bytes */
572         if (uprobe->offset + MAX_UINSN_BYTES > uprobe->inode->i_size)
573                 bytes = uprobe->inode->i_size - uprobe->offset;
574         else
575                 bytes = MAX_UINSN_BYTES;
576
577         /* Instruction at the page-boundary; copy bytes in second page */
578         if (nbytes < bytes) {
579                 if (__copy_insn(mapping, vma, uprobe->insn + nbytes,
580                                 bytes - nbytes, uprobe->offset + nbytes))
581                         return -ENOMEM;
582
583                 bytes = nbytes;
584         }
585         return __copy_insn(mapping, vma, uprobe->insn, bytes, uprobe->offset);
586 }
587
588 static int install_breakpoint(struct mm_struct *mm, struct uprobe *uprobe,
589                                 struct vm_area_struct *vma, loff_t vaddr)
590 {
591         unsigned long addr;
592         int ret;
593
594         /*
595          * Probe is to be deleted;
596          * Dont know if somebody already inserted the probe;
597          * behave as if probe already exists.
598          */
599         if (!uprobe->consumers)
600                 return -EEXIST;
601
602         addr = (unsigned long)vaddr;
603         if (!(uprobe->flags & UPROBES_COPY_INSN)) {
604                 ret = copy_insn(uprobe, vma, addr);
605                 if (ret)
606                         return ret;
607
608                 if (is_bkpt_insn(uprobe->insn))
609                         return -EEXIST;
610
611                 ret = analyze_insn(mm, uprobe);
612                 if (ret)
613                         return ret;
614
615                 uprobe->flags |= UPROBES_COPY_INSN;
616         }
617         ret = set_bkpt(mm, uprobe, addr);
618         if (!ret)
619                 atomic_inc(&mm->mm_uprobes_count);
620
621         return ret;
622 }
623
624 static void remove_breakpoint(struct mm_struct *mm, struct uprobe *uprobe,
625                                                         loff_t vaddr)
626 {
627         if (!set_orig_insn(mm, uprobe, (unsigned long)vaddr, true))
628                 atomic_dec(&mm->mm_uprobes_count);
629 }
630
631 /*
632  * There could be threads that have hit the breakpoint and are entering the
633  * notifier code and trying to acquire the uprobes_treelock. The thread
634  * calling delete_uprobe() that is removing the uprobe from the rb_tree can
635  * race with these threads and might acquire the uprobes_treelock compared
636  * to some of the breakpoint hit threads. In such a case, the breakpoint hit
637  * threads will not find the uprobe. Hence wait till the current breakpoint
638  * hit threads acquire the uprobes_treelock before the uprobe is removed
639  * from the rbtree.
640  */
641 static void delete_uprobe(struct uprobe *uprobe)
642 {
643         unsigned long flags;
644
645         bulkref_wait_old(&uprobes_srcu);
646         spin_lock_irqsave(&uprobes_treelock, flags);
647         rb_erase(&uprobe->rb_node, &uprobes_tree);
648         spin_unlock_irqrestore(&uprobes_treelock, flags);
649         iput(uprobe->inode);
650         put_uprobe(uprobe);
651         atomic_dec(&uprobe_events);
652 }
653
654 static struct vma_info *__find_next_vma_info(struct list_head *head,
655                         loff_t offset, struct address_space *mapping,
656                         struct vma_info *vi, bool is_register)
657 {
658         struct prio_tree_iter iter;
659         struct vm_area_struct *vma;
660         struct vma_info *tmpvi;
661         loff_t vaddr;
662         unsigned long pgoff = offset >> PAGE_SHIFT;
663         int existing_vma;
664
665         vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
666                 if (!valid_vma(vma, is_register))
667                         continue;
668
669                 existing_vma = 0;
670                 vaddr = vma->vm_start + offset;
671                 vaddr -= vma->vm_pgoff << PAGE_SHIFT;
672                 list_for_each_entry(tmpvi, head, probe_list) {
673                         if (tmpvi->mm == vma->vm_mm && tmpvi->vaddr == vaddr) {
674                                 existing_vma = 1;
675                                 break;
676                         }
677                 }
678
679                 /*
680                  * Another vma needs a probe to be installed. However skip
681                  * installing the probe if the vma is about to be unlinked.
682                  */
683                 if (!existing_vma &&
684                                 atomic_inc_not_zero(&vma->vm_mm->mm_users)) {
685                         vi->mm = vma->vm_mm;
686                         vi->vaddr = vaddr;
687                         list_add(&vi->probe_list, head);
688                         return vi;
689                 }
690         }
691         return NULL;
692 }
693
694 /*
695  * Iterate in the rmap prio tree  and find a vma where a probe has not
696  * yet been inserted.
697  */
698 static struct vma_info *find_next_vma_info(struct list_head *head,
699                         loff_t offset, struct address_space *mapping,
700                         bool is_register)
701 {
702         struct vma_info *vi, *retvi;
703         vi = kzalloc(sizeof(struct vma_info), GFP_KERNEL);
704         if (!vi)
705                 return ERR_PTR(-ENOMEM);
706
707         mutex_lock(&mapping->i_mmap_mutex);
708         retvi = __find_next_vma_info(head, offset, mapping, vi, is_register);
709         mutex_unlock(&mapping->i_mmap_mutex);
710
711         if (!retvi)
712                 kfree(vi);
713         return retvi;
714 }
715
716 static int __register_uprobe(struct inode *inode, loff_t offset,
717                                 struct uprobe *uprobe)
718 {
719         struct list_head try_list;
720         struct vm_area_struct *vma;
721         struct address_space *mapping;
722         struct vma_info *vi, *tmpvi;
723         struct mm_struct *mm;
724         loff_t vaddr;
725         int ret = 0;
726
727         mapping = inode->i_mapping;
728         INIT_LIST_HEAD(&try_list);
729         while ((vi = find_next_vma_info(&try_list, offset,
730                                                 mapping, true)) != NULL) {
731                 if (IS_ERR(vi)) {
732                         ret = -ENOMEM;
733                         break;
734                 }
735                 mm = vi->mm;
736                 down_read(&mm->mmap_sem);
737                 vma = find_vma(mm, (unsigned long)vi->vaddr);
738                 if (!vma || !valid_vma(vma, true)) {
739                         list_del(&vi->probe_list);
740                         kfree(vi);
741                         up_read(&mm->mmap_sem);
742                         mmput(mm);
743                         continue;
744                 }
745                 vaddr = vma->vm_start + offset;
746                 vaddr -= vma->vm_pgoff << PAGE_SHIFT;
747                 if (vma->vm_file->f_mapping->host != inode ||
748                                                 vaddr != vi->vaddr) {
749                         list_del(&vi->probe_list);
750                         kfree(vi);
751                         up_read(&mm->mmap_sem);
752                         mmput(mm);
753                         continue;
754                 }
755                 ret = install_breakpoint(mm, uprobe, vma, vi->vaddr);
756                 up_read(&mm->mmap_sem);
757                 mmput(mm);
758                 if (ret && ret == -EEXIST)
759                         ret = 0;
760                 if (!ret)
761                         break;
762         }
763         list_for_each_entry_safe(vi, tmpvi, &try_list, probe_list) {
764                 list_del(&vi->probe_list);
765                 kfree(vi);
766         }
767         return ret;
768 }
769
770 static void __unregister_uprobe(struct inode *inode, loff_t offset,
771                                                 struct uprobe *uprobe)
772 {
773         struct list_head try_list;
774         struct address_space *mapping;
775         struct vma_info *vi, *tmpvi;
776         struct vm_area_struct *vma;
777         struct mm_struct *mm;
778         loff_t vaddr;
779
780         mapping = inode->i_mapping;
781         INIT_LIST_HEAD(&try_list);
782         while ((vi = find_next_vma_info(&try_list, offset,
783                                                 mapping, false)) != NULL) {
784                 if (IS_ERR(vi))
785                         break;
786                 mm = vi->mm;
787                 down_read(&mm->mmap_sem);
788                 vma = find_vma(mm, (unsigned long)vi->vaddr);
789                 if (!vma || !valid_vma(vma, false)) {
790                         list_del(&vi->probe_list);
791                         kfree(vi);
792                         up_read(&mm->mmap_sem);
793                         mmput(mm);
794                         continue;
795                 }
796                 vaddr = vma->vm_start + offset;
797                 vaddr -= vma->vm_pgoff << PAGE_SHIFT;
798                 if (vma->vm_file->f_mapping->host != inode ||
799                                                 vaddr != vi->vaddr) {
800                         list_del(&vi->probe_list);
801                         kfree(vi);
802                         up_read(&mm->mmap_sem);
803                         mmput(mm);
804                         continue;
805                 }
806                 remove_breakpoint(mm, uprobe, vi->vaddr);
807                 up_read(&mm->mmap_sem);
808                 mmput(mm);
809         }
810
811         list_for_each_entry_safe(vi, tmpvi, &try_list, probe_list) {
812                 list_del(&vi->probe_list);
813                 kfree(vi);
814         }
815         delete_uprobe(uprobe);
816 }
817
818 /*
819  * register_uprobe - register a probe
820  * @inode: the file in which the probe has to be placed.
821  * @offset: offset from the start of the file.
822  * @consumer: information on howto handle the probe..
823  *
824  * Apart from the access refcount, register_uprobe() takes a creation
825  * refcount (thro alloc_uprobe) if and only if this @uprobe is getting
826  * inserted into the rbtree (i.e first consumer for a @inode:@offset
827  * tuple).  Creation refcount stops unregister_uprobe from freeing the
828  * @uprobe even before the register operation is complete. Creation
829  * refcount is released when the last @consumer for the @uprobe
830  * unregisters.
831  *
832  * Return errno if it cannot successully install probes
833  * else return 0 (success)
834  */
835 int register_uprobe(struct inode *inode, loff_t offset,
836                                 struct uprobe_consumer *consumer)
837 {
838         struct uprobe *uprobe;
839         int ret = -EINVAL;
840
841         if (!consumer || consumer->next)
842                 return ret;
843
844         inode = igrab(inode);
845         if (!inode)
846                 return ret;
847
848         if (offset > i_size_read(inode))
849                 goto reg_out;
850
851         ret = 0;
852         mutex_lock(uprobes_hash(inode));
853         uprobe = alloc_uprobe(inode, offset);
854         if (uprobe && !add_consumer(uprobe, consumer)) {
855                 ret = __register_uprobe(inode, offset, uprobe);
856                 if (ret) {
857                         uprobe->consumers = NULL;
858                         __unregister_uprobe(inode, offset, uprobe);
859                 } else
860                         uprobe->flags |= UPROBES_RUN_HANDLER;
861         }
862
863         mutex_unlock(uprobes_hash(inode));
864         put_uprobe(uprobe);
865
866 reg_out:
867         iput(inode);
868         return ret;
869 }
870
871 /*
872  * unregister_uprobe - unregister a already registered probe.
873  * @inode: the file in which the probe has to be removed.
874  * @offset: offset from the start of the file.
875  * @consumer: identify which probe if multiple probes are colocated.
876  */
877 void unregister_uprobe(struct inode *inode, loff_t offset,
878                                 struct uprobe_consumer *consumer)
879 {
880         struct uprobe *uprobe = NULL;
881
882         inode = igrab(inode);
883         if (!inode || !consumer)
884                 goto unreg_out;
885
886         uprobe = find_uprobe(inode, offset);
887         if (!uprobe)
888                 goto unreg_out;
889
890         mutex_lock(uprobes_hash(inode));
891         if (!del_consumer(uprobe, consumer)) {
892                 mutex_unlock(uprobes_hash(inode));
893                 goto unreg_out;
894         }
895
896         if (!uprobe->consumers) {
897                 __unregister_uprobe(inode, offset, uprobe);
898                 uprobe->flags &= ~UPROBES_RUN_HANDLER;
899         }
900         mutex_unlock(uprobes_hash(inode));
901
902 unreg_out:
903         if (uprobe)
904                 put_uprobe(uprobe);
905         if (inode)
906                 iput(inode);
907 }
908
909 /*
910  * Of all the nodes that correspond to the given inode, return the node
911  * with the least offset.
912  */
913 static struct rb_node *find_least_offset_node(struct inode *inode)
914 {
915         struct uprobe u = { .inode = inode, .offset = 0};
916         struct rb_node *n = uprobes_tree.rb_node;
917         struct rb_node *close_node = NULL;
918         struct uprobe *uprobe;
919         int match;
920
921         while (n) {
922                 uprobe = rb_entry(n, struct uprobe, rb_node);
923                 match = match_uprobe(&u, uprobe);
924                 if (uprobe->inode == inode)
925                         close_node = n;
926
927                 if (!match)
928                         return close_node;
929
930                 if (match < 0)
931                         n = n->rb_left;
932                 else
933                         n = n->rb_right;
934         }
935         return close_node;
936 }
937
938 /*
939  * For a given inode, build a list of probes that need to be inserted.
940  */
941 static void build_probe_list(struct inode *inode, struct list_head *head)
942 {
943         struct uprobe *uprobe;
944         struct rb_node *n;
945         unsigned long flags;
946
947         spin_lock_irqsave(&uprobes_treelock, flags);
948         n = find_least_offset_node(inode);
949         for (; n; n = rb_next(n)) {
950                 uprobe = rb_entry(n, struct uprobe, rb_node);
951                 if (uprobe->inode != inode)
952                         break;
953
954                 list_add(&uprobe->pending_list, head);
955                 atomic_inc(&uprobe->ref);
956         }
957         spin_unlock_irqrestore(&uprobes_treelock, flags);
958 }
959
960 /*
961  * Called from mmap_region.
962  * called with mm->mmap_sem acquired.
963  *
964  * Return -ve no if we fail to insert probes and we cannot
965  * bail-out.
966  * Return 0 otherwise. i.e :
967  *      - successful insertion of probes
968  *      - (or) no possible probes to be inserted.
969  *      - (or) insertion of probes failed but we can bail-out.
970  */
971 int mmap_uprobe(struct vm_area_struct *vma)
972 {
973         struct list_head tmp_list;
974         struct uprobe *uprobe, *u;
975         struct inode *inode;
976         int ret = 0, count = 0;
977
978         if (!atomic_read(&uprobe_events) || !valid_vma(vma, true))
979                 return ret;     /* Bail-out */
980
981         inode = igrab(vma->vm_file->f_mapping->host);
982         if (!inode)
983                 return ret;
984
985         INIT_LIST_HEAD(&tmp_list);
986         mutex_lock(uprobes_mmap_hash(inode));
987         build_probe_list(inode, &tmp_list);
988         list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
989                 loff_t vaddr;
990
991                 list_del(&uprobe->pending_list);
992                 if (!ret) {
993                         vaddr = vma->vm_start + uprobe->offset;
994                         vaddr -= vma->vm_pgoff << PAGE_SHIFT;
995                         if (vaddr < vma->vm_start || vaddr >= vma->vm_end) {
996                                 put_uprobe(uprobe);
997                                 continue;
998                         }
999                         ret = install_breakpoint(vma->vm_mm, uprobe, vma,
1000                                                                 vaddr);
1001                         if (ret == -EEXIST) {
1002                                 atomic_inc(&vma->vm_mm->mm_uprobes_count);
1003                                 ret = 0;
1004                         }
1005                         if (!ret)
1006                                 count++;
1007                 }
1008                 put_uprobe(uprobe);
1009         }
1010
1011         mutex_unlock(uprobes_mmap_hash(inode));
1012         iput(inode);
1013         if (ret)
1014                 atomic_sub(count, &vma->vm_mm->mm_uprobes_count);
1015
1016         return ret;
1017 }
1018
1019 /*
1020  * Called in context of a munmap of a vma.
1021  */
1022 void munmap_uprobe(struct vm_area_struct *vma)
1023 {
1024         struct list_head tmp_list;
1025         struct uprobe *uprobe, *u;
1026         struct inode *inode;
1027
1028         if (!atomic_read(&uprobe_events) || !valid_vma(vma, false))
1029                 return;         /* Bail-out */
1030
1031         if (!atomic_read(&vma->vm_mm->mm_uprobes_count))
1032                 return;
1033
1034         inode = igrab(vma->vm_file->f_mapping->host);
1035         if (!inode)
1036                 return;
1037
1038         INIT_LIST_HEAD(&tmp_list);
1039         mutex_lock(uprobes_mmap_hash(inode));
1040         build_probe_list(inode, &tmp_list);
1041         list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
1042                 loff_t vaddr;
1043                 uprobe_opcode_t opcode;
1044
1045                 list_del(&uprobe->pending_list);
1046                 vaddr = vma->vm_start + uprobe->offset;
1047                 vaddr -= vma->vm_pgoff << PAGE_SHIFT;
1048                 if (vaddr >= vma->vm_start && vaddr < vma->vm_end) {
1049
1050                         /*
1051                          * An unregister could have removed the probe before
1052                          * unmap. So check before we decrement the count.
1053                          */
1054                         if (!read_opcode(vma->vm_mm, vaddr, &opcode) &&
1055                                                 (opcode == UPROBES_BKPT_INSN))
1056                                 atomic_dec(&vma->vm_mm->mm_uprobes_count);
1057                 }
1058                 put_uprobe(uprobe);
1059         }
1060         mutex_unlock(uprobes_mmap_hash(inode));
1061         iput(inode);
1062         return;
1063 }
1064
1065 /* Slot allocation for XOL */
1066 static int xol_add_vma(struct uprobes_xol_area *area)
1067 {
1068         struct mm_struct *mm;
1069         int ret;
1070
1071         area->page = alloc_page(GFP_HIGHUSER);
1072         if (!area->page)
1073                 return -ENOMEM;
1074
1075         mm = current->mm;
1076         down_write(&mm->mmap_sem);
1077         ret = -EALREADY;
1078         if (mm->uprobes_xol_area)
1079                 goto fail;
1080
1081         ret = -ENOMEM;
1082
1083         /* Try to map as high as possible, this is only a hint. */
1084         area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE,
1085                                                         PAGE_SIZE, 0, 0);
1086         if (area->vaddr & ~PAGE_MASK) {
1087                 ret = area->vaddr;
1088                 goto fail;
1089         }
1090
1091         ret = install_special_mapping(mm, area->vaddr, PAGE_SIZE,
1092                                 VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO,
1093                                 &area->page);
1094         if (ret)
1095                 goto fail;
1096
1097         smp_wmb();      /* pairs with get_uprobes_xol_area() */
1098         mm->uprobes_xol_area = area;
1099         ret = 0;
1100
1101 fail:
1102         up_write(&mm->mmap_sem);
1103         if (ret)
1104                 __free_page(area->page);
1105
1106         return ret;
1107 }
1108
1109 static struct uprobes_xol_area *get_uprobes_xol_area(struct mm_struct *mm)
1110 {
1111         struct uprobes_xol_area *area = mm->uprobes_xol_area;
1112         smp_read_barrier_depends();/* pairs with wmb in xol_add_vma() */
1113         return area;
1114 }
1115
1116 /*
1117  * xol_alloc_area - Allocate process's uprobes_xol_area.
1118  * This area will be used for storing instructions for execution out of
1119  * line.
1120  *
1121  * Returns the allocated area or NULL.
1122  */
1123 static struct uprobes_xol_area *xol_alloc_area(void)
1124 {
1125         struct uprobes_xol_area *area;
1126
1127         area = kzalloc(sizeof(*area), GFP_KERNEL);
1128         if (unlikely(!area))
1129                 return NULL;
1130
1131         area->bitmap = kzalloc(BITS_TO_LONGS(UINSNS_PER_PAGE) * sizeof(long),
1132                                                                 GFP_KERNEL);
1133
1134         if (!area->bitmap)
1135                 goto fail;
1136
1137         init_waitqueue_head(&area->wq);
1138         if (!xol_add_vma(area))
1139                 return area;
1140
1141 fail:
1142         kfree(area->bitmap);
1143         kfree(area);
1144         return get_uprobes_xol_area(current->mm);
1145 }
1146
1147 /*
1148  * free_uprobes_xol_area - Free the area allocated for slots.
1149  */
1150 void free_uprobes_xol_area(struct mm_struct *mm)
1151 {
1152         struct uprobes_xol_area *area = mm->uprobes_xol_area;
1153
1154         if (!area)
1155                 return;
1156
1157         put_page(area->page);
1158         kfree(area->bitmap);
1159         kfree(area);
1160 }
1161
1162 /*
1163  *  - search for a free slot.
1164  */
1165 static unsigned long xol_take_insn_slot(struct uprobes_xol_area *area)
1166 {
1167         unsigned long slot_addr;
1168         int slot_nr;
1169
1170         do {
1171                 slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE);
1172                 if (slot_nr < UINSNS_PER_PAGE) {
1173                         if (!test_and_set_bit(slot_nr, area->bitmap))
1174                                 break;
1175
1176                         slot_nr = UINSNS_PER_PAGE;
1177                         continue;
1178                 }
1179                 wait_event(area->wq,
1180                         (atomic_read(&area->slot_count) < UINSNS_PER_PAGE));
1181         } while (slot_nr >= UINSNS_PER_PAGE);
1182
1183         slot_addr = area->vaddr + (slot_nr * UPROBES_XOL_SLOT_BYTES);
1184         atomic_inc(&area->slot_count);
1185         return slot_addr;
1186 }
1187
1188 /*
1189  * xol_get_insn_slot - If was not allocated a slot, then
1190  * allocate a slot.
1191  * Returns the allocated slot address or 0.
1192  */
1193 static unsigned long xol_get_insn_slot(struct uprobe *uprobe,
1194                                         unsigned long slot_addr)
1195 {
1196         struct uprobes_xol_area *area;
1197         unsigned long offset;
1198         void *vaddr;
1199
1200         area = get_uprobes_xol_area(current->mm);
1201         if (!area) {
1202                 area = xol_alloc_area();
1203                 if (!area)
1204                         return 0;
1205         }
1206         current->utask->xol_vaddr = xol_take_insn_slot(area);
1207
1208         /*
1209          * Initialize the slot if xol_vaddr points to valid
1210          * instruction slot.
1211          */
1212         if (unlikely(!current->utask->xol_vaddr))
1213                 return 0;
1214
1215         current->utask->vaddr = slot_addr;
1216         offset = current->utask->xol_vaddr & ~PAGE_MASK;
1217         vaddr = kmap_atomic(area->page);
1218         memcpy(vaddr + offset, uprobe->insn, MAX_UINSN_BYTES);
1219         kunmap_atomic(vaddr);
1220         return current->utask->xol_vaddr;
1221 }
1222
1223 /*
1224  * xol_free_insn_slot - If slot was earlier allocated by
1225  * @xol_get_insn_slot(), make the slot available for
1226  * subsequent requests.
1227  */
1228 static void xol_free_insn_slot(struct task_struct *tsk)
1229 {
1230         struct uprobes_xol_area *area;
1231         unsigned long vma_end;
1232         unsigned long slot_addr;
1233
1234         if (!tsk->mm || !tsk->mm->uprobes_xol_area || !tsk->utask)
1235                 return;
1236
1237         slot_addr = tsk->utask->xol_vaddr;
1238
1239         if (unlikely(!slot_addr || IS_ERR_VALUE(slot_addr)))
1240                 return;
1241
1242         area = tsk->mm->uprobes_xol_area;
1243         vma_end = area->vaddr + PAGE_SIZE;
1244         if (area->vaddr <= slot_addr && slot_addr < vma_end) {
1245                 int slot_nr;
1246                 unsigned long offset = slot_addr - area->vaddr;
1247
1248                 slot_nr = offset / UPROBES_XOL_SLOT_BYTES;
1249                 if (slot_nr >= UINSNS_PER_PAGE)
1250                         return;
1251
1252                 clear_bit(slot_nr, area->bitmap);
1253                 atomic_dec(&area->slot_count);
1254                 if (waitqueue_active(&area->wq))
1255                         wake_up(&area->wq);
1256                 tsk->utask->xol_vaddr = 0;
1257         }
1258 }
1259
1260 /**
1261  * get_uprobe_bkpt_addr - compute address of bkpt given post-bkpt regs
1262  * @regs: Reflects the saved state of the task after it has hit a breakpoint
1263  * instruction.
1264  * Return the address of the breakpoint instruction.
1265  */
1266 unsigned long __weak get_uprobe_bkpt_addr(struct pt_regs *regs)
1267 {
1268         return instruction_pointer(regs) - UPROBES_BKPT_INSN_SIZE;
1269 }
1270
1271 /*
1272  * Called with no locks held.
1273  * Called in context of a exiting or a exec-ing thread.
1274  */
1275 void free_uprobe_utask(struct task_struct *tsk)
1276 {
1277         struct uprobe_task *utask = tsk->utask;
1278
1279         if (tsk->uprobes_bulkref_id != -1)
1280                 bulkref_put(&uprobes_srcu, tsk->uprobes_bulkref_id);
1281
1282         if (!utask)
1283                 return;
1284
1285         if (utask->active_uprobe)
1286                 put_uprobe(utask->active_uprobe);
1287
1288         xol_free_insn_slot(tsk);
1289         kfree(utask);
1290         tsk->utask = NULL;
1291 }
1292
1293 /*
1294  * Allocate a uprobe_task object for the task.
1295  * Called when the thread hits a breakpoint for the first time.
1296  *
1297  * Returns:
1298  * - pointer to new uprobe_task on success
1299  * - negative errno otherwise
1300  */
1301 static struct uprobe_task *add_utask(void)
1302 {
1303         struct uprobe_task *utask;
1304
1305         utask = kzalloc(sizeof *utask, GFP_KERNEL);
1306         if (unlikely(utask == NULL))
1307                 return ERR_PTR(-ENOMEM);
1308
1309         utask->active_uprobe = NULL;
1310         current->utask = utask;
1311         return utask;
1312 }
1313
1314 /* Prepare to single-step probed instruction out of line. */
1315 static int pre_ssout(struct uprobe *uprobe, struct pt_regs *regs,
1316                                 unsigned long vaddr)
1317 {
1318         if (xol_get_insn_slot(uprobe, vaddr) && !pre_xol(uprobe, regs))
1319                 return 0;
1320         return -EFAULT;
1321 }
1322
1323 bool uprobe_deny_signal(void)
1324 {
1325         struct task_struct *tsk = current;
1326         struct uprobe_task *utask = tsk->utask;
1327
1328         if (likely(!utask || !utask->active_uprobe))
1329                 return false;
1330
1331         WARN_ON_ONCE(utask->state != UTASK_SSTEP);
1332
1333         if (signal_pending(tsk)) {
1334                 spin_lock_irq(&tsk->sighand->siglock);
1335                 clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
1336                 spin_unlock_irq(&tsk->sighand->siglock);
1337
1338                 if (__fatal_signal_pending(tsk) || xol_was_trapped(tsk)) {
1339                         utask->state = UTASK_SSTEP_TRAPPED;
1340                         set_tsk_thread_flag(tsk, TIF_UPROBE);
1341                         set_tsk_thread_flag(tsk, TIF_NOTIFY_RESUME);
1342                 }
1343         }
1344
1345         return true;
1346 }
1347
1348 bool __weak can_skip_xol(struct pt_regs *regs, struct uprobe *u)
1349 {
1350         u->flags &= ~UPROBES_SKIP_SSTEP;
1351         return false;
1352 }
1353
1354 /*
1355  * uprobe_notify_resume gets called in task context just before returning
1356  * to userspace.
1357  *
1358  *  If its the first time the probepoint is hit, slot gets allocated here.
1359  *  If its the first time the thread hit a breakpoint, utask gets
1360  *  allocated here.
1361  */
1362 void uprobe_notify_resume(struct pt_regs *regs)
1363 {
1364         struct vm_area_struct *vma;
1365         struct uprobe_task *utask;
1366         struct mm_struct *mm;
1367         struct uprobe *u = NULL;
1368         unsigned long probept;
1369
1370         utask = current->utask;
1371         mm = current->mm;
1372         if (!utask || utask->state == UTASK_BP_HIT) {
1373                 probept = get_uprobe_bkpt_addr(regs);
1374                 down_read(&mm->mmap_sem);
1375                 vma = find_vma(mm, probept);
1376                 if (vma && valid_vma(vma, false))
1377                         u = find_uprobe(vma->vm_file->f_mapping->host,
1378                                         probept - vma->vm_start +
1379                                         (vma->vm_pgoff << PAGE_SHIFT));
1380
1381                 bulkref_put(&uprobes_srcu, current->uprobes_bulkref_id);
1382                 current->uprobes_bulkref_id = -1;
1383                 up_read(&mm->mmap_sem);
1384                 if (!u)
1385                         /* No matching uprobe; signal SIGTRAP. */
1386                         goto cleanup_ret;
1387                 if (!utask) {
1388                         utask = add_utask();
1389                         /* Cannot Allocate; re-execute the instruction. */
1390                         if (!utask)
1391                                 goto cleanup_ret;
1392                 }
1393                 utask->active_uprobe = u;
1394                 handler_chain(u, regs);
1395
1396                 if (u->flags & UPROBES_SKIP_SSTEP && can_skip_xol(regs, u))
1397                         goto cleanup_ret;
1398
1399                 utask->state = UTASK_SSTEP;
1400                 if (!pre_ssout(u, regs, probept))
1401                         user_enable_single_step(current);
1402                 else
1403                         /* Cannot Singlestep; re-execute the instruction. */
1404                         goto cleanup_ret;
1405         } else {
1406                 u = utask->active_uprobe;
1407                 if (utask->state == UTASK_SSTEP_ACK)
1408                         post_xol(u, regs);
1409                 else if (utask->state == UTASK_SSTEP_TRAPPED)
1410                         abort_xol(regs, u);
1411                 else
1412                         WARN_ON_ONCE(1);
1413
1414                 put_uprobe(u);
1415                 utask->active_uprobe = NULL;
1416                 utask->state = UTASK_RUNNING;
1417                 user_disable_single_step(current);
1418                 xol_free_insn_slot(current);
1419
1420                 spin_lock_irq(&current->sighand->siglock);
1421                 recalc_sigpending(); /* see uprobe_deny_signal() */
1422                 spin_unlock_irq(&current->sighand->siglock);
1423         }
1424         return;
1425
1426 cleanup_ret:
1427         if (utask) {
1428                 utask->active_uprobe = NULL;
1429                 utask->state = UTASK_RUNNING;
1430         }
1431         if (u) {
1432                 if (!(u->flags & UPROBES_SKIP_SSTEP))
1433                         set_instruction_pointer(regs, probept);
1434
1435                 put_uprobe(u);
1436         } else
1437                 send_sig(SIGTRAP, current, 0);
1438 }
1439
1440 /*
1441  * uprobe_bkpt_notifier gets called from interrupt context
1442  * it gets a reference to the ppt and sets TIF_UPROBE flag,
1443  */
1444 int uprobe_bkpt_notifier(struct pt_regs *regs)
1445 {
1446         struct uprobe_task *utask;
1447
1448         if (!current->mm || !atomic_read(&current->mm->mm_uprobes_count))
1449                 /* task is currently not uprobed */
1450                 return 0;
1451
1452         utask = current->utask;
1453         if (utask)
1454                 utask->state = UTASK_BP_HIT;
1455
1456         set_thread_flag(TIF_UPROBE);
1457         current->uprobes_bulkref_id = bulkref_get(&uprobes_srcu);
1458         return 1;
1459 }
1460
1461 /*
1462  * uprobe_post_notifier gets called in interrupt context.
1463  * It completes the single step operation.
1464  */
1465 int uprobe_post_notifier(struct pt_regs *regs)
1466 {
1467         struct uprobe_task *utask = current->utask;
1468
1469         if (!current->mm || !utask || !utask->active_uprobe)
1470                 /* task is currently not uprobed */
1471                 return 0;
1472
1473         utask->state = UTASK_SSTEP_ACK;
1474         set_thread_flag(TIF_UPROBE);
1475         return 1;
1476 }
1477
1478 struct notifier_block uprobe_exception_nb = {
1479         .notifier_call = uprobe_exception_notify,
1480         .priority = INT_MAX - 1,        /* notified after kprobes, kgdb */
1481 };
1482
1483 static int __init init_uprobes(void)
1484 {
1485         int i;
1486
1487         for (i = 0; i < UPROBES_HASH_SZ; i++) {
1488                 mutex_init(&uprobes_mutex[i]);
1489                 mutex_init(&uprobes_mmap_mutex[i]);
1490         }
1491         init_bulkref(&uprobes_srcu);
1492         return register_die_notifier(&uprobe_exception_nb);
1493 }
1494
1495 static void __exit exit_uprobes(void)
1496 {
1497 }
1498
1499 module_init(init_uprobes);
1500 module_exit(exit_uprobes);