2 * mpx.c - Memory Protection eXtensions
4 * Copyright (c) 2014, Intel Corporation.
5 * Qiaowei Ren <qiaowei.ren@intel.com>
6 * Dave Hansen <dave.hansen@intel.com>
8 #include <linux/kernel.h>
9 #include <linux/slab.h>
10 #include <linux/syscalls.h>
11 #include <linux/sched/sysctl.h>
16 #include <asm/mmu_context.h>
18 #include <asm/processor.h>
19 #include <asm/fpu-internal.h>
21 static const char *mpx_mapping_name(struct vm_area_struct *vma)
26 static struct vm_operations_struct mpx_vma_ops = {
27 .name = mpx_mapping_name,
30 static int is_mpx_vma(struct vm_area_struct *vma)
32 return (vma->vm_ops == &mpx_vma_ops);
36 * This is really a simplified "vm_mmap". it only handles MPX
37 * bounds tables (the bounds directory is user-allocated).
39 * Later on, we use the vma->vm_ops to uniquely identify these
42 static unsigned long mpx_mmap(unsigned long len)
45 unsigned long addr, pgoff;
46 struct mm_struct *mm = current->mm;
48 struct vm_area_struct *vma;
50 /* Only bounds table and bounds directory can be allocated here */
51 if (len != MPX_BD_SIZE_BYTES && len != MPX_BT_SIZE_BYTES)
54 down_write(&mm->mmap_sem);
56 /* Too many mappings? */
57 if (mm->map_count > sysctl_max_map_count) {
62 /* Obtain the address to map to. we verify (or select) it and ensure
63 * that it represents a valid section of the address space.
65 addr = get_unmapped_area(NULL, 0, len, 0, MAP_ANONYMOUS | MAP_PRIVATE);
66 if (addr & ~PAGE_MASK) {
71 vm_flags = VM_READ | VM_WRITE | VM_MPX |
72 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
74 /* Set pgoff according to addr for anon_vma */
75 pgoff = addr >> PAGE_SHIFT;
77 ret = mmap_region(NULL, addr, len, vm_flags, pgoff);
78 if (IS_ERR_VALUE(ret))
81 vma = find_vma(mm, ret);
86 vma->vm_ops = &mpx_vma_ops;
88 if (vm_flags & VM_LOCKED) {
89 up_write(&mm->mmap_sem);
90 mm_populate(ret, len);
95 up_write(&mm->mmap_sem);
105 static unsigned long get_reg_offset(struct insn *insn, struct pt_regs *regs,
110 static const int regoff[] = {
111 offsetof(struct pt_regs, ax),
112 offsetof(struct pt_regs, cx),
113 offsetof(struct pt_regs, dx),
114 offsetof(struct pt_regs, bx),
115 offsetof(struct pt_regs, sp),
116 offsetof(struct pt_regs, bp),
117 offsetof(struct pt_regs, si),
118 offsetof(struct pt_regs, di),
120 offsetof(struct pt_regs, r8),
121 offsetof(struct pt_regs, r9),
122 offsetof(struct pt_regs, r10),
123 offsetof(struct pt_regs, r11),
124 offsetof(struct pt_regs, r12),
125 offsetof(struct pt_regs, r13),
126 offsetof(struct pt_regs, r14),
127 offsetof(struct pt_regs, r15),
130 int nr_registers = ARRAY_SIZE(regoff);
132 * Don't possibly decode a 32-bit instructions as
133 * reading a 64-bit-only register.
135 if (IS_ENABLED(CONFIG_X86_64) && !insn->x86_64)
140 regno = X86_MODRM_RM(insn->modrm.value);
141 if (X86_REX_B(insn->rex_prefix.value) == 1)
146 regno = X86_SIB_INDEX(insn->sib.value);
147 if (X86_REX_X(insn->rex_prefix.value) == 1)
152 regno = X86_SIB_BASE(insn->sib.value);
153 if (X86_REX_B(insn->rex_prefix.value) == 1)
158 pr_err("invalid register type");
163 if (regno > nr_registers) {
164 WARN_ONCE(1, "decoded an instruction with an invalid register");
167 return regoff[regno];
171 * return the address being referenced be instruction
172 * for rm=3 returning the content of the rm reg
173 * for rm!=3 calculates the address using SIB and Disp
175 static void __user *mpx_get_addr_ref(struct insn *insn, struct pt_regs *regs)
177 unsigned long addr, addr_offset;
178 unsigned long base, base_offset;
179 unsigned long indx, indx_offset;
182 insn_get_modrm(insn);
184 sib = insn->sib.value;
186 if (X86_MODRM_MOD(insn->modrm.value) == 3) {
187 addr_offset = get_reg_offset(insn, regs, REG_TYPE_RM);
190 addr = regs_get_register(regs, addr_offset);
192 if (insn->sib.nbytes) {
193 base_offset = get_reg_offset(insn, regs, REG_TYPE_BASE);
197 indx_offset = get_reg_offset(insn, regs, REG_TYPE_INDEX);
201 base = regs_get_register(regs, base_offset);
202 indx = regs_get_register(regs, indx_offset);
203 addr = base + indx * (1 << X86_SIB_SCALE(sib));
205 addr_offset = get_reg_offset(insn, regs, REG_TYPE_RM);
208 addr = regs_get_register(regs, addr_offset);
210 addr += insn->displacement.value;
212 return (void __user *)addr;
214 return (void __user *)-1;
217 static int mpx_insn_decode(struct insn *insn,
218 struct pt_regs *regs)
220 unsigned char buf[MAX_INSN_SIZE];
221 int x86_64 = !test_thread_flag(TIF_IA32);
225 not_copied = copy_from_user(buf, (void __user *)regs->ip, sizeof(buf));
226 nr_copied = sizeof(buf) - not_copied;
228 * The decoder _should_ fail nicely if we pass it a short buffer.
229 * But, let's not depend on that implementation detail. If we
230 * did not get anything, just error out now.
234 insn_init(insn, buf, nr_copied, x86_64);
235 insn_get_length(insn);
237 * copy_from_user() tries to get as many bytes as we could see in
238 * the largest possible instruction. If the instruction we are
239 * after is shorter than that _and_ we attempt to copy from
240 * something unreadable, we might get a short read. This is OK
241 * as long as the read did not stop in the middle of the
242 * instruction. Check to see if we got a partial instruction.
244 if (nr_copied < insn->length)
247 insn_get_opcode(insn);
249 * We only _really_ need to decode bndcl/bndcn/bndcu
250 * Error out on anything else.
252 if (insn->opcode.bytes[0] != 0x0f)
254 if ((insn->opcode.bytes[1] != 0x1a) &&
255 (insn->opcode.bytes[1] != 0x1b))
264 * If a bounds overflow occurs then a #BR is generated. This
265 * function decodes MPX instructions to get violation address
266 * and set this address into extended struct siginfo.
268 * Note that this is not a super precise way of doing this.
269 * Userspace could have, by the time we get here, written
270 * anything it wants in to the instructions. We can not
271 * trust anything about it. They might not be valid
272 * instructions or might encode invalid registers, etc...
274 * The caller is expected to kfree() the returned siginfo_t.
276 siginfo_t *mpx_generate_siginfo(struct pt_regs *regs,
277 struct xsave_struct *xsave_buf)
279 struct bndreg *bndregs, *bndreg;
280 siginfo_t *info = NULL;
285 err = mpx_insn_decode(&insn, regs);
290 * We know at this point that we are only dealing with
293 insn_get_modrm(&insn);
294 bndregno = X86_MODRM_REG(insn.modrm.value);
299 /* get the bndregs _area_ of the xsave structure */
300 bndregs = get_xsave_addr(xsave_buf, XSTATE_BNDREGS);
305 /* now go select the individual register in the set of 4 */
306 bndreg = &bndregs[bndregno];
308 info = kzalloc(sizeof(*info), GFP_KERNEL);
314 * The registers are always 64-bit, but the upper 32
315 * bits are ignored in 32-bit mode. Also, note that the
316 * upper bounds are architecturally represented in 1's
319 * The 'unsigned long' cast is because the compiler
320 * complains when casting from integers to different-size
323 info->si_lower = (void __user *)(unsigned long)bndreg->lower_bound;
324 info->si_upper = (void __user *)(unsigned long)~bndreg->upper_bound;
325 info->si_addr_lsb = 0;
326 info->si_signo = SIGSEGV;
328 info->si_code = SEGV_BNDERR;
329 info->si_addr = mpx_get_addr_ref(&insn, regs);
331 * We were not able to extract an address from the instruction,
332 * probably because there was something invalid in it.
334 if (info->si_addr == (void *)-1) {
340 /* info might be NULL, but kfree() handles that */
345 static __user void *task_get_bounds_dir(struct task_struct *tsk)
347 struct bndcsr *bndcsr;
349 if (!cpu_feature_enabled(X86_FEATURE_MPX))
350 return MPX_INVALID_BOUNDS_DIR;
353 * The bounds directory pointer is stored in a register
354 * only accessible if we first do an xsave.
356 fpu_save_init(&tsk->thread.fpu);
357 bndcsr = get_xsave_addr(&tsk->thread.fpu.state->xsave, XSTATE_BNDCSR);
359 return MPX_INVALID_BOUNDS_DIR;
362 * Make sure the register looks valid by checking the
365 if (!(bndcsr->bndcfgu & MPX_BNDCFG_ENABLE_FLAG))
366 return MPX_INVALID_BOUNDS_DIR;
369 * Lastly, mask off the low bits used for configuration
370 * flags, and return the address of the bounds table.
372 return (void __user *)(unsigned long)
373 (bndcsr->bndcfgu & MPX_BNDCFG_ADDR_MASK);
376 int mpx_enable_management(struct task_struct *tsk)
378 void __user *bd_base = MPX_INVALID_BOUNDS_DIR;
379 struct mm_struct *mm = tsk->mm;
383 * runtime in the userspace will be responsible for allocation of
384 * the bounds directory. Then, it will save the base of the bounds
385 * directory into XSAVE/XRSTOR Save Area and enable MPX through
386 * XRSTOR instruction.
388 * fpu_xsave() is expected to be very expensive. Storing the bounds
389 * directory here means that we do not have to do xsave in the unmap
390 * path; we can just use mm->bd_addr instead.
392 bd_base = task_get_bounds_dir(tsk);
393 down_write(&mm->mmap_sem);
394 mm->bd_addr = bd_base;
395 if (mm->bd_addr == MPX_INVALID_BOUNDS_DIR)
398 up_write(&mm->mmap_sem);
402 int mpx_disable_management(struct task_struct *tsk)
404 struct mm_struct *mm = current->mm;
406 if (!cpu_feature_enabled(X86_FEATURE_MPX))
409 down_write(&mm->mmap_sem);
410 mm->bd_addr = MPX_INVALID_BOUNDS_DIR;
411 up_write(&mm->mmap_sem);
416 * With 32-bit mode, MPX_BT_SIZE_BYTES is 4MB, and the size of each
417 * bounds table is 16KB. With 64-bit mode, MPX_BT_SIZE_BYTES is 2GB,
418 * and the size of each bounds table is 4MB.
420 static int allocate_bt(long __user *bd_entry)
422 unsigned long expected_old_val = 0;
423 unsigned long actual_old_val = 0;
424 unsigned long bt_addr;
428 * Carve the virtual space out of userspace for the new
431 bt_addr = mpx_mmap(MPX_BT_SIZE_BYTES);
432 if (IS_ERR((void *)bt_addr))
433 return PTR_ERR((void *)bt_addr);
435 * Set the valid flag (kinda like _PAGE_PRESENT in a pte)
437 bt_addr = bt_addr | MPX_BD_ENTRY_VALID_FLAG;
440 * Go poke the address of the new bounds table in to the
441 * bounds directory entry out in userspace memory. Note:
442 * we may race with another CPU instantiating the same table.
443 * In that case the cmpxchg will see an unexpected
446 * This can fault, but that's OK because we do not hold
447 * mmap_sem at this point, unlike some of the other part
448 * of the MPX code that have to pagefault_disable().
450 ret = user_atomic_cmpxchg_inatomic(&actual_old_val, bd_entry,
451 expected_old_val, bt_addr);
456 * The user_atomic_cmpxchg_inatomic() will only return nonzero
457 * for faults, *not* if the cmpxchg itself fails. Now we must
458 * verify that the cmpxchg itself completed successfully.
461 * We expected an empty 'expected_old_val', but instead found
462 * an apparently valid entry. Assume we raced with another
463 * thread to instantiate this table and desclare succecss.
465 if (actual_old_val & MPX_BD_ENTRY_VALID_FLAG) {
470 * We found a non-empty bd_entry but it did not have the
471 * VALID_FLAG set. Return an error which will result in
472 * a SEGV since this probably means that somebody scribbled
473 * some invalid data in to a bounds table.
475 if (expected_old_val != actual_old_val) {
481 vm_munmap(bt_addr & MPX_BT_ADDR_MASK, MPX_BT_SIZE_BYTES);
486 * When a BNDSTX instruction attempts to save bounds to a bounds
487 * table, it will first attempt to look up the table in the
488 * first-level bounds directory. If it does not find a table in
489 * the directory, a #BR is generated and we get here in order to
490 * allocate a new table.
492 * With 32-bit mode, the size of BD is 4MB, and the size of each
493 * bound table is 16KB. With 64-bit mode, the size of BD is 2GB,
494 * and the size of each bound table is 4MB.
496 static int do_mpx_bt_fault(struct xsave_struct *xsave_buf)
498 unsigned long bd_entry, bd_base;
499 struct bndcsr *bndcsr;
501 bndcsr = get_xsave_addr(xsave_buf, XSTATE_BNDCSR);
505 * Mask off the preserve and enable bits
507 bd_base = bndcsr->bndcfgu & MPX_BNDCFG_ADDR_MASK;
509 * The hardware provides the address of the missing or invalid
510 * entry via BNDSTATUS, so we don't have to go look it up.
512 bd_entry = bndcsr->bndstatus & MPX_BNDSTA_ADDR_MASK;
514 * Make sure the directory entry is within where we think
517 if ((bd_entry < bd_base) ||
518 (bd_entry >= bd_base + MPX_BD_SIZE_BYTES))
521 return allocate_bt((long __user *)bd_entry);
524 int mpx_handle_bd_fault(struct xsave_struct *xsave_buf)
527 * Userspace never asked us to manage the bounds tables,
530 if (!kernel_managing_mpx_tables(current->mm))
533 if (do_mpx_bt_fault(xsave_buf)) {
534 force_sig(SIGSEGV, current);
536 * The force_sig() is essentially "handling" this
537 * exception, so we do not pass up the error
538 * from do_mpx_bt_fault().
545 * A thin wrapper around get_user_pages(). Returns 0 if the
546 * fault was resolved or -errno if not.
548 static int mpx_resolve_fault(long __user *addr, int write)
554 gup_ret = get_user_pages(current, current->mm, (unsigned long)addr,
555 nr_pages, write, force, NULL, NULL);
557 * get_user_pages() returns number of pages gotten.
558 * 0 means we failed to fault in and get anything,
559 * probably because 'addr' is bad.
563 /* Other error, return it */
566 /* must have gup'd a page and gup_ret>0, success */
571 * Get the base of bounds tables pointed by specific bounds
574 static int get_bt_addr(struct mm_struct *mm,
575 long __user *bd_entry, unsigned long *bt_addr)
580 if (!access_ok(VERIFY_READ, (bd_entry), sizeof(*bd_entry)))
587 ret = get_user(*bt_addr, bd_entry);
592 ret = mpx_resolve_fault(bd_entry, need_write);
594 * If we could not resolve the fault, consider it
595 * userspace's fault and error out.
601 valid_bit = *bt_addr & MPX_BD_ENTRY_VALID_FLAG;
602 *bt_addr &= MPX_BT_ADDR_MASK;
605 * When the kernel is managing bounds tables, a bounds directory
606 * entry will either have a valid address (plus the valid bit)
607 * *OR* be completely empty. If we see a !valid entry *and* some
608 * data in the address field, we know something is wrong. This
609 * -EINVAL return will cause a SIGSEGV.
611 if (!valid_bit && *bt_addr)
614 * Do we have an completely zeroed bt entry? That is OK. It
615 * just means there was no bounds table for this memory. Make
616 * sure to distinguish this from -EINVAL, which will cause
626 * Free the backing physical pages of bounds table 'bt_addr'.
627 * Assume start...end is within that bounds table.
629 static int zap_bt_entries(struct mm_struct *mm,
630 unsigned long bt_addr,
631 unsigned long start, unsigned long end)
633 struct vm_area_struct *vma;
634 unsigned long addr, len;
637 * Find the first overlapping vma. If vma->vm_start > start, there
638 * will be a hole in the bounds table. This -EINVAL return will
641 vma = find_vma(mm, start);
642 if (!vma || vma->vm_start > start)
646 * A NUMA policy on a VM_MPX VMA could cause this bouds table to
647 * be split. So we need to look across the entire 'start -> end'
648 * range of this bounds table, find all of the VM_MPX VMAs, and
652 while (vma && vma->vm_start < end) {
654 * We followed a bounds directory entry down
655 * here. If we find a non-MPX VMA, that's bad,
656 * so stop immediately and return an error. This
657 * probably results in a SIGSEGV.
659 if (!is_mpx_vma(vma))
662 len = min(vma->vm_end, end) - addr;
663 zap_page_range(vma, addr, len, NULL);
666 addr = vma->vm_start;
672 static int unmap_single_bt(struct mm_struct *mm,
673 long __user *bd_entry, unsigned long bt_addr)
675 unsigned long expected_old_val = bt_addr | MPX_BD_ENTRY_VALID_FLAG;
676 unsigned long actual_old_val = 0;
683 ret = user_atomic_cmpxchg_inatomic(&actual_old_val, bd_entry,
684 expected_old_val, 0);
689 ret = mpx_resolve_fault(bd_entry, need_write);
691 * If we could not resolve the fault, consider it
692 * userspace's fault and error out.
698 * The cmpxchg was performed, check the results.
700 if (actual_old_val != expected_old_val) {
702 * Someone else raced with us to unmap the table.
703 * There was no bounds table pointed to by the
704 * directory, so declare success. Somebody freed
710 * Something messed with the bounds directory
711 * entry. We hold mmap_sem for read or write
712 * here, so it could not be a _new_ bounds table
713 * that someone just allocated. Something is
714 * wrong, so pass up the error and SIGSEGV.
720 * Note, we are likely being called under do_munmap() already. To
721 * avoid recursion, do_munmap() will check whether it comes
722 * from one bounds table through VM_MPX flag.
724 return do_munmap(mm, bt_addr, MPX_BT_SIZE_BYTES);
728 * If the bounds table pointed by bounds directory 'bd_entry' is
729 * not shared, unmap this whole bounds table. Otherwise, only free
730 * those backing physical pages of bounds table entries covered
731 * in this virtual address region start...end.
733 static int unmap_shared_bt(struct mm_struct *mm,
734 long __user *bd_entry, unsigned long start,
735 unsigned long end, bool prev_shared, bool next_shared)
737 unsigned long bt_addr;
740 ret = get_bt_addr(mm, bd_entry, &bt_addr);
742 * We could see an "error" ret for not-present bounds
743 * tables (not really an error), or actual errors, but
744 * stop unmapping either way.
749 if (prev_shared && next_shared)
750 ret = zap_bt_entries(mm, bt_addr,
751 bt_addr+MPX_GET_BT_ENTRY_OFFSET(start),
752 bt_addr+MPX_GET_BT_ENTRY_OFFSET(end));
753 else if (prev_shared)
754 ret = zap_bt_entries(mm, bt_addr,
755 bt_addr+MPX_GET_BT_ENTRY_OFFSET(start),
756 bt_addr+MPX_BT_SIZE_BYTES);
757 else if (next_shared)
758 ret = zap_bt_entries(mm, bt_addr, bt_addr,
759 bt_addr+MPX_GET_BT_ENTRY_OFFSET(end));
761 ret = unmap_single_bt(mm, bd_entry, bt_addr);
767 * A virtual address region being munmap()ed might share bounds table
768 * with adjacent VMAs. We only need to free the backing physical
769 * memory of these shared bounds tables entries covered in this virtual
772 static int unmap_edge_bts(struct mm_struct *mm,
773 unsigned long start, unsigned long end)
776 long __user *bde_start, *bde_end;
777 struct vm_area_struct *prev, *next;
778 bool prev_shared = false, next_shared = false;
780 bde_start = mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(start);
781 bde_end = mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(end-1);
784 * Check whether bde_start and bde_end are shared with adjacent
787 * We already unliked the VMAs from the mm's rbtree so 'start'
788 * is guaranteed to be in a hole. This gets us the first VMA
789 * before the hole in to 'prev' and the next VMA after the hole
792 next = find_vma_prev(mm, start, &prev);
793 if (prev && (mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(prev->vm_end-1))
796 if (next && (mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(next->vm_start))
801 * This virtual address region being munmap()ed is only
802 * covered by one bounds table.
804 * In this case, if this table is also shared with adjacent
805 * VMAs, only part of the backing physical memory of the bounds
806 * table need be freeed. Otherwise the whole bounds table need
809 if (bde_start == bde_end) {
810 return unmap_shared_bt(mm, bde_start, start, end,
811 prev_shared, next_shared);
815 * If more than one bounds tables are covered in this virtual
816 * address region being munmap()ed, we need to separately check
817 * whether bde_start and bde_end are shared with adjacent VMAs.
819 ret = unmap_shared_bt(mm, bde_start, start, end, prev_shared, false);
822 ret = unmap_shared_bt(mm, bde_end, start, end, false, next_shared);
829 static int mpx_unmap_tables(struct mm_struct *mm,
830 unsigned long start, unsigned long end)
833 long __user *bd_entry, *bde_start, *bde_end;
834 unsigned long bt_addr;
837 * "Edge" bounds tables are those which are being used by the region
838 * (start -> end), but that may be shared with adjacent areas. If they
839 * turn out to be completely unshared, they will be freed. If they are
840 * shared, we will free the backing store (like an MADV_DONTNEED) for
841 * areas used by this region.
843 ret = unmap_edge_bts(mm, start, end);
845 /* non-present tables are OK */
848 /* Success, or no tables to unmap */
857 * Only unmap the bounds table that are
859 * 2. not at the edges of the mapping, even if full aligned
861 bde_start = mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(start);
862 bde_end = mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(end-1);
863 for (bd_entry = bde_start + 1; bd_entry < bde_end; bd_entry++) {
864 ret = get_bt_addr(mm, bd_entry, &bt_addr);
869 /* No table here, try the next one */
875 * Note: we are being strict here.
876 * Any time we run in to an issue
877 * unmapping tables, we stop and
883 ret = unmap_single_bt(mm, bd_entry, bt_addr);
892 * Free unused bounds tables covered in a virtual address region being
893 * munmap()ed. Assume end > start.
895 * This function will be called by do_munmap(), and the VMAs covering
896 * the virtual address region start...end have already been split if
897 * necessary, and the 'vma' is the first vma in this range (start -> end).
899 void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
900 unsigned long start, unsigned long end)
905 * Refuse to do anything unless userspace has asked
906 * the kernel to help manage the bounds tables,
908 if (!kernel_managing_mpx_tables(current->mm))
911 * This will look across the entire 'start -> end' range,
912 * and find all of the non-VM_MPX VMAs.
914 * To avoid recursion, if a VM_MPX vma is found in the range
915 * (start->end), we will not continue follow-up work. This
916 * recursion represents having bounds tables for bounds tables,
917 * which should not occur normally. Being strict about it here
918 * helps ensure that we do not have an exploitable stack overflow.
921 if (vma->vm_flags & VM_MPX)
924 } while (vma && vma->vm_start < end);
926 ret = mpx_unmap_tables(mm, start, end);
928 force_sig(SIGSEGV, current);