2 * Handle caching attributes in page tables (PAT)
4 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Suresh B Siddha <suresh.b.siddha@intel.com>
7 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen.
10 #include <linux/seq_file.h>
11 #include <linux/bootmem.h>
12 #include <linux/debugfs.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
18 #include <linux/rbtree.h>
20 #include <asm/cacheflush.h>
21 #include <asm/processor.h>
22 #include <asm/tlbflush.h>
23 #include <asm/x86_init.h>
24 #include <asm/pgtable.h>
25 #include <asm/fcntl.h>
33 #include "pat_internal.h"
36 int __read_mostly pat_enabled = 1;
38 static inline void pat_disable(const char *reason)
41 printk(KERN_INFO "%s\n", reason);
44 static int __init nopat(char *str)
46 pat_disable("PAT support disabled.");
49 early_param("nopat", nopat);
51 static inline void pat_disable(const char *reason)
60 static int __init pat_debug_setup(char *str)
65 __setup("debugpat", pat_debug_setup);
67 static u64 __read_mostly boot_pat_state;
70 PAT_UC = 0, /* uncached */
71 PAT_WC = 1, /* Write combining */
72 PAT_WT = 4, /* Write Through */
73 PAT_WP = 5, /* Write Protected */
74 PAT_WB = 6, /* Write Back (default) */
75 PAT_UC_MINUS = 7, /* UC, but can be overriden by MTRR */
78 #define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
83 bool boot_cpu = !boot_pat_state;
89 if (!boot_pat_state) {
90 pat_disable("PAT not supported by CPU.");
94 * If this happens we are on a secondary CPU, but
95 * switched to PAT on the boot CPU. We have no way to
98 printk(KERN_ERR "PAT enabled, "
99 "but not supported by secondary CPU\n");
104 /* Set PWT to Write-Combining. All other bits stay the same */
106 * PTE encoding used in Linux:
111 * 000 WB _PAGE_CACHE_WB
112 * 001 WC _PAGE_CACHE_WC
113 * 010 UC- _PAGE_CACHE_UC_MINUS
114 * 011 UC _PAGE_CACHE_UC
117 pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
118 PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC);
122 rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);
124 wrmsrl(MSR_IA32_CR_PAT, pat);
127 printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n",
128 smp_processor_id(), boot_pat_state, pat);
133 static DEFINE_SPINLOCK(memtype_lock); /* protects memtype accesses */
136 * Does intersection of PAT memory type and MTRR memory type and returns
137 * the resulting memory type as PAT understands it.
138 * (Type in pat and mtrr will not have same value)
139 * The intersection is based on "Effective Memory Type" tables in IA-32
142 static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
145 * Look for MTRR hint to get the effective type in case where PAT
148 if (req_type == _PAGE_CACHE_WB) {
151 mtrr_type = mtrr_type_lookup(start, end);
152 if (mtrr_type != MTRR_TYPE_WRBACK)
153 return _PAGE_CACHE_UC_MINUS;
155 return _PAGE_CACHE_WB;
161 struct pagerange_state {
162 unsigned long cur_pfn;
168 pagerange_is_ram_callback(unsigned long initial_pfn, unsigned long total_nr_pages, void *arg)
170 struct pagerange_state *state = arg;
172 state->not_ram |= initial_pfn > state->cur_pfn;
173 state->ram |= total_nr_pages > 0;
174 state->cur_pfn = initial_pfn + total_nr_pages;
176 return state->ram && state->not_ram;
179 static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end)
182 unsigned long start_pfn = start >> PAGE_SHIFT;
183 unsigned long end_pfn = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
184 struct pagerange_state state = {start_pfn, 0, 0};
187 * For legacy reasons, physical address range in the legacy ISA
188 * region is tracked as non-RAM. This will allow users of
189 * /dev/mem to map portions of legacy ISA region, even when
190 * some of those portions are listed(or not even listed) with
191 * different e820 types(RAM/reserved/..)
193 if (start_pfn < ISA_END_ADDRESS >> PAGE_SHIFT)
194 start_pfn = ISA_END_ADDRESS >> PAGE_SHIFT;
196 if (start_pfn < end_pfn) {
197 ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn,
198 &state, pagerange_is_ram_callback);
201 return (ret > 0) ? -1 : (state.ram ? 1 : 0);
205 * For RAM pages, we use page flags to mark the pages with appropriate type.
206 * Here we do two pass:
207 * - Find the memtype of all the pages in the range, look for any conflicts
208 * - In case of no conflicts, set the new memtype for pages in the range
210 static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type,
211 unsigned long *new_type)
216 if (req_type == _PAGE_CACHE_UC) {
217 /* We do not support strong UC */
219 req_type = _PAGE_CACHE_UC_MINUS;
222 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
225 page = pfn_to_page(pfn);
226 type = get_page_memtype(page);
228 printk(KERN_INFO "reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%lx, req 0x%lx\n",
229 start, end - 1, type, req_type);
238 *new_type = req_type;
240 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
241 page = pfn_to_page(pfn);
242 set_page_memtype(page, req_type);
247 static int free_ram_pages_type(u64 start, u64 end)
252 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
253 page = pfn_to_page(pfn);
254 set_page_memtype(page, -1);
260 * req_type typically has one of the:
263 * - _PAGE_CACHE_UC_MINUS
266 * If new_type is NULL, function will return an error if it cannot reserve the
267 * region with req_type. If new_type is non-NULL, function will return
268 * available type in new_type in case of no error. In case of any error
269 * it will return a negative return value.
271 int reserve_memtype(u64 start, u64 end, unsigned long req_type,
272 unsigned long *new_type)
275 unsigned long actual_type;
279 BUG_ON(start >= end); /* end is exclusive */
282 /* This is identical to page table setting without PAT */
284 if (req_type == _PAGE_CACHE_WC)
285 *new_type = _PAGE_CACHE_UC_MINUS;
287 *new_type = req_type & _PAGE_CACHE_MASK;
292 /* Low ISA region is always mapped WB in page table. No need to track */
293 if (x86_platform.is_untracked_pat_range(start, end)) {
295 *new_type = _PAGE_CACHE_WB;
300 * Call mtrr_lookup to get the type hint. This is an
301 * optimization for /dev/mem mmap'ers into WB memory (BIOS
302 * tools and ACPI tools). Use WB request for WB memory and use
303 * UC_MINUS otherwise.
305 actual_type = pat_x_mtrr_type(start, end, req_type & _PAGE_CACHE_MASK);
308 *new_type = actual_type;
310 is_range_ram = pat_pagerange_is_ram(start, end);
311 if (is_range_ram == 1) {
313 err = reserve_ram_pages_type(start, end, req_type, new_type);
316 } else if (is_range_ram < 0) {
320 new = kzalloc(sizeof(struct memtype), GFP_KERNEL);
326 new->type = actual_type;
328 spin_lock(&memtype_lock);
330 err = rbt_memtype_check_insert(new, new_type);
332 printk(KERN_INFO "reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n",
334 cattr_name(new->type), cattr_name(req_type));
336 spin_unlock(&memtype_lock);
341 spin_unlock(&memtype_lock);
343 dprintk("reserve_memtype added [mem %#010Lx-%#010Lx], track %s, req %s, ret %s\n",
344 start, end - 1, cattr_name(new->type), cattr_name(req_type),
345 new_type ? cattr_name(*new_type) : "-");
350 int free_memtype(u64 start, u64 end)
354 struct memtype *entry;
359 /* Low ISA region is always mapped WB. No need to track */
360 if (x86_platform.is_untracked_pat_range(start, end))
363 is_range_ram = pat_pagerange_is_ram(start, end);
364 if (is_range_ram == 1) {
366 err = free_ram_pages_type(start, end);
369 } else if (is_range_ram < 0) {
373 spin_lock(&memtype_lock);
374 entry = rbt_memtype_erase(start, end);
375 spin_unlock(&memtype_lock);
378 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
379 current->comm, current->pid, start, end - 1);
385 dprintk("free_memtype request [mem %#010Lx-%#010Lx]\n", start, end - 1);
392 * lookup_memtype - Looksup the memory type for a physical address
393 * @paddr: physical address of which memory type needs to be looked up
395 * Only to be called when PAT is enabled
397 * Returns _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC, _PAGE_CACHE_MODE_UC_MINUS
398 * or _PAGE_CACHE_MODE_UC
400 static enum page_cache_mode lookup_memtype(u64 paddr)
402 enum page_cache_mode rettype = _PAGE_CACHE_MODE_WB;
403 struct memtype *entry;
405 if (x86_platform.is_untracked_pat_range(paddr, paddr + PAGE_SIZE))
408 if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
410 page = pfn_to_page(paddr >> PAGE_SHIFT);
411 rettype = pgprot2cachemode(__pgprot(get_page_memtype(page)));
413 * -1 from get_page_memtype() implies RAM page is in its
414 * default state and not reserved, and hence of type WB
417 rettype = _PAGE_CACHE_MODE_WB;
422 spin_lock(&memtype_lock);
424 entry = rbt_memtype_lookup(paddr);
426 rettype = pgprot2cachemode(__pgprot(entry->type));
428 rettype = _PAGE_CACHE_MODE_UC_MINUS;
430 spin_unlock(&memtype_lock);
435 * io_reserve_memtype - Request a memory type mapping for a region of memory
436 * @start: start (physical address) of the region
437 * @end: end (physical address) of the region
438 * @type: A pointer to memtype, with requested type. On success, requested
439 * or any other compatible type that was available for the region is returned
441 * On success, returns 0
442 * On failure, returns non-zero
444 int io_reserve_memtype(resource_size_t start, resource_size_t end,
445 enum page_cache_mode *type)
447 resource_size_t size = end - start;
448 enum page_cache_mode req_type = *type;
449 enum page_cache_mode new_type;
450 unsigned long new_prot;
453 WARN_ON_ONCE(iomem_map_sanity_check(start, size));
455 ret = reserve_memtype(start, end, cachemode2protval(req_type),
460 new_type = pgprot2cachemode(__pgprot(new_prot));
462 if (!is_new_memtype_allowed(start, size, req_type, new_type))
465 if (kernel_map_sync_memtype(start, size, new_type) < 0)
472 free_memtype(start, end);
479 * io_free_memtype - Release a memory type mapping for a region of memory
480 * @start: start (physical address) of the region
481 * @end: end (physical address) of the region
483 void io_free_memtype(resource_size_t start, resource_size_t end)
485 free_memtype(start, end);
488 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
489 unsigned long size, pgprot_t vma_prot)
494 #ifdef CONFIG_STRICT_DEVMEM
495 /* This check is done in drivers/char/mem.c in case of STRICT_DEVMEM*/
496 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
501 /* This check is needed to avoid cache aliasing when PAT is enabled */
502 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
504 u64 from = ((u64)pfn) << PAGE_SHIFT;
505 u64 to = from + size;
511 while (cursor < to) {
512 if (!devmem_is_allowed(pfn)) {
513 printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
514 current->comm, from, to - 1);
522 #endif /* CONFIG_STRICT_DEVMEM */
524 int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
525 unsigned long size, pgprot_t *vma_prot)
527 unsigned long flags = _PAGE_CACHE_WB;
529 if (!range_is_allowed(pfn, size))
532 if (file->f_flags & O_DSYNC)
533 flags = _PAGE_CACHE_UC_MINUS;
537 * On the PPro and successors, the MTRRs are used to set
538 * memory types for physical addresses outside main memory,
539 * so blindly setting UC or PWT on those pages is wrong.
540 * For Pentiums and earlier, the surround logic should disable
541 * caching for the high addresses through the KEN pin, but
542 * we maintain the tradition of paranoia in this code.
545 !(boot_cpu_has(X86_FEATURE_MTRR) ||
546 boot_cpu_has(X86_FEATURE_K6_MTRR) ||
547 boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
548 boot_cpu_has(X86_FEATURE_CENTAUR_MCR)) &&
549 (pfn << PAGE_SHIFT) >= __pa(high_memory)) {
550 flags = _PAGE_CACHE_UC;
554 *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) |
560 * Change the memory type for the physial address range in kernel identity
561 * mapping space if that range is a part of identity map.
563 int kernel_map_sync_memtype(u64 base, unsigned long size,
564 enum page_cache_mode pcm)
568 if (base > __pa(high_memory-1))
572 * some areas in the middle of the kernel identity range
573 * are not mapped, like the PCI space.
575 if (!page_is_ram(base >> PAGE_SHIFT))
578 id_sz = (__pa(high_memory-1) <= base + size) ?
579 __pa(high_memory) - base :
582 if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) {
583 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
584 "for [mem %#010Lx-%#010Lx]\n",
585 current->comm, current->pid,
586 cattr_name(cachemode2protval(pcm)),
587 base, (unsigned long long)(base + size-1));
594 * Internal interface to reserve a range of physical memory with prot.
595 * Reserved non RAM regions only and after successful reserve_memtype,
596 * this func also keeps identity mapping (if any) in sync with this new prot.
598 static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
603 unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
604 unsigned long flags = want_flags;
606 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
609 * reserve_pfn_range() for RAM pages. We do not refcount to keep
610 * track of number of mappings of RAM pages. We can assert that
611 * the type requested matches the type of first page in the range.
617 flags = cachemode2protval(lookup_memtype(paddr));
618 if (want_flags != flags) {
619 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
620 current->comm, current->pid,
621 cattr_name(want_flags),
622 (unsigned long long)paddr,
623 (unsigned long long)(paddr + size - 1),
625 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
626 (~_PAGE_CACHE_MASK)) |
632 ret = reserve_memtype(paddr, paddr + size, want_flags, &flags);
636 if (flags != want_flags) {
638 !is_new_memtype_allowed(paddr, size,
639 pgprot2cachemode(__pgprot(want_flags)),
640 pgprot2cachemode(__pgprot(flags)))) {
641 free_memtype(paddr, paddr + size);
642 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
643 " for [mem %#010Lx-%#010Lx], got %s\n",
644 current->comm, current->pid,
645 cattr_name(want_flags),
646 (unsigned long long)paddr,
647 (unsigned long long)(paddr + size - 1),
652 * We allow returning different type than the one requested in
655 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
656 (~_PAGE_CACHE_MASK)) |
660 if (kernel_map_sync_memtype(paddr, size,
661 pgprot2cachemode(__pgprot(flags))) < 0) {
662 free_memtype(paddr, paddr + size);
669 * Internal interface to free a range of physical memory.
670 * Frees non RAM regions only.
672 static void free_pfn_range(u64 paddr, unsigned long size)
676 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
678 free_memtype(paddr, paddr + size);
682 * track_pfn_copy is called when vma that is covering the pfnmap gets
683 * copied through copy_page_range().
685 * If the vma has a linear pfn mapping for the entire range, we get the prot
686 * from pte and reserve the entire vma range with single reserve_pfn_range call.
688 int track_pfn_copy(struct vm_area_struct *vma)
690 resource_size_t paddr;
692 unsigned long vma_size = vma->vm_end - vma->vm_start;
695 if (vma->vm_flags & VM_PAT) {
697 * reserve the whole chunk covered by vma. We need the
698 * starting address and protection from pte.
700 if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
704 pgprot = __pgprot(prot);
705 return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
712 * prot is passed in as a parameter for the new mapping. If the vma has a
713 * linear pfn mapping for the entire range reserve the entire vma range with
714 * single reserve_pfn_range call.
716 int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
717 unsigned long pfn, unsigned long addr, unsigned long size)
719 resource_size_t paddr = (resource_size_t)pfn << PAGE_SHIFT;
720 enum page_cache_mode pcm;
722 /* reserve the whole chunk starting from paddr */
723 if (addr == vma->vm_start && size == (vma->vm_end - vma->vm_start)) {
726 ret = reserve_pfn_range(paddr, size, prot, 0);
728 vma->vm_flags |= VM_PAT;
736 * For anything smaller than the vma size we set prot based on the
739 pcm = lookup_memtype(paddr);
741 /* Check memtype for the remaining pages */
742 while (size > PAGE_SIZE) {
745 if (pcm != lookup_memtype(paddr))
749 *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) |
750 cachemode2protval(pcm));
755 int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
758 enum page_cache_mode pcm;
763 /* Set prot based on lookup */
764 pcm = lookup_memtype((resource_size_t)pfn << PAGE_SHIFT);
765 *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) |
766 cachemode2protval(pcm));
772 * untrack_pfn is called while unmapping a pfnmap for a region.
773 * untrack can be called for a specific region indicated by pfn and size or
774 * can be for the entire vma (in which case pfn, size are zero).
776 void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
779 resource_size_t paddr;
782 if (!(vma->vm_flags & VM_PAT))
785 /* free the chunk starting from pfn or the whole chunk */
786 paddr = (resource_size_t)pfn << PAGE_SHIFT;
787 if (!paddr && !size) {
788 if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
793 size = vma->vm_end - vma->vm_start;
795 free_pfn_range(paddr, size);
796 vma->vm_flags &= ~VM_PAT;
799 pgprot_t pgprot_writecombine(pgprot_t prot)
802 return __pgprot(pgprot_val(prot) | _PAGE_CACHE_WC);
804 return pgprot_noncached(prot);
806 EXPORT_SYMBOL_GPL(pgprot_writecombine);
808 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
810 static struct memtype *memtype_get_idx(loff_t pos)
812 struct memtype *print_entry;
815 print_entry = kzalloc(sizeof(struct memtype), GFP_KERNEL);
819 spin_lock(&memtype_lock);
820 ret = rbt_memtype_copy_nth_element(print_entry, pos);
821 spin_unlock(&memtype_lock);
831 static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
835 seq_printf(seq, "PAT memtype list:\n");
838 return memtype_get_idx(*pos);
841 static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
844 return memtype_get_idx(*pos);
847 static void memtype_seq_stop(struct seq_file *seq, void *v)
851 static int memtype_seq_show(struct seq_file *seq, void *v)
853 struct memtype *print_entry = (struct memtype *)v;
855 seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type),
856 print_entry->start, print_entry->end);
862 static const struct seq_operations memtype_seq_ops = {
863 .start = memtype_seq_start,
864 .next = memtype_seq_next,
865 .stop = memtype_seq_stop,
866 .show = memtype_seq_show,
869 static int memtype_seq_open(struct inode *inode, struct file *file)
871 return seq_open(file, &memtype_seq_ops);
874 static const struct file_operations memtype_fops = {
875 .open = memtype_seq_open,
878 .release = seq_release,
881 static int __init pat_memtype_list_init(void)
884 debugfs_create_file("pat_memtype_list", S_IRUSR,
885 arch_debugfs_dir, NULL, &memtype_fops);
890 late_initcall(pat_memtype_list_init);
892 #endif /* CONFIG_DEBUG_FS && CONFIG_X86_PAT */