1 #define pr_fmt(fmt) "kcov: " fmt
3 #define DISABLE_BRANCH_PROFILING
4 #include <linux/atomic.h>
5 #include <linux/compiler.h>
6 #include <linux/errno.h>
7 #include <linux/export.h>
8 #include <linux/types.h>
9 #include <linux/file.h>
11 #include <linux/init.h>
13 #include <linux/preempt.h>
14 #include <linux/printk.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/vmalloc.h>
19 #include <linux/debugfs.h>
20 #include <linux/uaccess.h>
21 #include <linux/kcov.h>
22 #include <asm/setup.h>
25 * kcov descriptor (one per opened debugfs file).
26 * State transitions of the descriptor:
27 * - initial state after open()
28 * - then there must be a single ioctl(KCOV_INIT_TRACE) call
29 * - then, mmap() call (several calls are allowed but not useful)
30 * - then, repeated enable/disable for a task (only one task a time allowed)
34 * Reference counter. We keep one for:
35 * - opened file descriptor
36 * - task with enabled coverage (we can't unwire it from another task)
39 /* The lock protects mode, size, area and t. */
42 /* Size of arena (in long's for KCOV_MODE_TRACE). */
44 /* Coverage buffer shared with user space. */
46 /* Task for which we collect coverage, or NULL. */
47 struct task_struct *t;
51 * Entry point from instrumented code.
52 * This is called once per basic-block/edge.
54 void notrace __sanitizer_cov_trace_pc(void)
56 struct task_struct *t;
61 * We are interested in code coverage as a function of a syscall inputs,
62 * so we ignore code executed in interrupts.
63 * The checks for whether we are in an interrupt are open-coded, because
64 * 1. We can't use in_interrupt() here, since it also returns true
65 * when we are inside local_bh_disable() section.
66 * 2. We don't want to use (in_irq() | in_serving_softirq() | in_nmi()),
67 * since that leads to slower generated code (three separate tests,
68 * one for each of the flags).
70 if (!t || (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET
73 mode = READ_ONCE(t->kcov_mode);
74 if (mode == KCOV_MODE_TRACE) {
77 unsigned long ip = _RET_IP_;
79 #ifdef CONFIG_RANDOMIZE_BASE
84 * There is some code that runs in interrupts but for which
85 * in_interrupt() returns false (e.g. preempt_schedule_irq()).
86 * READ_ONCE()/barrier() effectively provides load-acquire wrt
87 * interrupts, there are paired barrier()/WRITE_ONCE() in
88 * kcov_ioctl_locked().
92 /* The first word is number of subsequent PCs. */
93 pos = READ_ONCE(area[0]) + 1;
94 if (likely(pos < t->kcov_size)) {
96 WRITE_ONCE(area[0], pos);
100 EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
102 static void kcov_get(struct kcov *kcov)
104 atomic_inc(&kcov->refcount);
107 static void kcov_put(struct kcov *kcov)
109 if (atomic_dec_and_test(&kcov->refcount)) {
115 void kcov_task_init(struct task_struct *t)
117 t->kcov_mode = KCOV_MODE_DISABLED;
123 void kcov_task_exit(struct task_struct *t)
130 spin_lock(&kcov->lock);
131 if (WARN_ON(kcov->t != t)) {
132 spin_unlock(&kcov->lock);
135 /* Just to not leave dangling references behind. */
138 spin_unlock(&kcov->lock);
142 static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
146 struct kcov *kcov = vma->vm_file->private_data;
147 unsigned long size, off;
150 area = vmalloc_user(vma->vm_end - vma->vm_start);
154 spin_lock(&kcov->lock);
155 size = kcov->size * sizeof(unsigned long);
156 if (kcov->mode == KCOV_MODE_DISABLED || vma->vm_pgoff != 0 ||
157 vma->vm_end - vma->vm_start != size) {
163 vma->vm_flags |= VM_DONTEXPAND;
164 spin_unlock(&kcov->lock);
165 for (off = 0; off < size; off += PAGE_SIZE) {
166 page = vmalloc_to_page(kcov->area + off);
167 if (vm_insert_page(vma, vma->vm_start + off, page))
168 WARN_ONCE(1, "vm_insert_page() failed");
173 spin_unlock(&kcov->lock);
178 static int kcov_open(struct inode *inode, struct file *filep)
182 kcov = kzalloc(sizeof(*kcov), GFP_KERNEL);
185 atomic_set(&kcov->refcount, 1);
186 spin_lock_init(&kcov->lock);
187 filep->private_data = kcov;
188 return nonseekable_open(inode, filep);
191 static int kcov_close(struct inode *inode, struct file *filep)
193 kcov_put(filep->private_data);
197 static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
200 struct task_struct *t;
201 unsigned long size, unused;
204 case KCOV_INIT_TRACE:
206 * Enable kcov in trace mode and setup buffer size.
207 * Must happen before anything else.
209 if (kcov->mode != KCOV_MODE_DISABLED)
212 * Size must be at least 2 to hold current position and one PC.
213 * Later we allocate size * sizeof(unsigned long) memory,
214 * that must not overflow.
217 if (size < 2 || size > INT_MAX / sizeof(unsigned long))
220 kcov->mode = KCOV_MODE_TRACE;
224 * Enable coverage for the current task.
225 * At this point user must have been enabled trace mode,
226 * and mmapped the file. Coverage collection is disabled only
227 * at task exit or voluntary by KCOV_DISABLE. After that it can
228 * be enabled for another task.
231 if (unused != 0 || kcov->mode == KCOV_MODE_DISABLED ||
237 /* Cache in task struct for performance. */
238 t->kcov_size = kcov->size;
239 t->kcov_area = kcov->area;
240 /* See comment in __sanitizer_cov_trace_pc(). */
242 WRITE_ONCE(t->kcov_mode, kcov->mode);
245 /* This is put either in kcov_task_exit() or in KCOV_DISABLE. */
249 /* Disable coverage for the current task. */
251 if (unused != 0 || current->kcov != kcov)
254 if (WARN_ON(kcov->t != t))
265 static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
270 kcov = filep->private_data;
271 spin_lock(&kcov->lock);
272 res = kcov_ioctl_locked(kcov, cmd, arg);
273 spin_unlock(&kcov->lock);
277 static const struct file_operations kcov_fops = {
279 .unlocked_ioctl = kcov_ioctl,
281 .release = kcov_close,
284 static int __init kcov_init(void)
287 * The kcov debugfs file won't ever get removed and thus,
288 * there is no need to protect it against removal races. The
289 * use of debugfs_create_file_unsafe() is actually safe here.
291 if (!debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops)) {
292 pr_err("failed to create kcov in debugfs\n");
298 device_initcall(kcov_init);