2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
6 * (C) Copyright 1995 1996 Linus Torvalds
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mmiotrace.h>
17 #include <asm/cacheflush.h>
19 #include <asm/fixmap.h>
20 #include <asm/pgtable.h>
21 #include <asm/tlbflush.h>
22 #include <asm/pgalloc.h>
28 * Fix up the linear direct mapping of the kernel to avoid cache attribute
31 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
32 enum page_cache_mode pcm)
34 unsigned long nrpages = size >> PAGE_SHIFT;
38 case _PAGE_CACHE_MODE_UC:
40 err = _set_memory_uc(vaddr, nrpages);
42 case _PAGE_CACHE_MODE_WC:
43 err = _set_memory_wc(vaddr, nrpages);
45 case _PAGE_CACHE_MODE_WB:
46 err = _set_memory_wb(vaddr, nrpages);
53 static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
58 for (i = 0; i < nr_pages; ++i)
59 if (pfn_valid(start_pfn + i) &&
60 !PageReserved(pfn_to_page(start_pfn + i)))
63 WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
69 * Remap an arbitrary physical address space into the kernel virtual
70 * address space. Needed when the kernel wants to access high addresses
73 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
74 * have to convert them into an offset in a page-aligned mapping, but the
75 * caller shouldn't need to know that small detail.
77 static void __iomem *__ioremap_caller(resource_size_t phys_addr,
78 unsigned long size, enum page_cache_mode pcm, void *caller)
80 unsigned long offset, vaddr;
81 resource_size_t pfn, last_pfn, last_addr;
82 const resource_size_t unaligned_phys_addr = phys_addr;
83 const unsigned long unaligned_size = size;
84 struct vm_struct *area;
85 enum page_cache_mode new_pcm;
86 unsigned long new_prot_val;
89 void __iomem *ret_addr;
92 /* Don't allow wraparound or zero size */
93 last_addr = phys_addr + size - 1;
94 if (!size || last_addr < phys_addr)
97 if (!phys_addr_valid(phys_addr)) {
98 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
99 (unsigned long long)phys_addr);
105 * Don't remap the low PCI/ISA area, it's always mapped..
107 if (is_ISA_range(phys_addr, last_addr))
108 return (__force void __iomem *)phys_to_virt(phys_addr);
111 * Don't allow anybody to remap normal RAM that we're using..
113 /* First check if whole region can be identified as RAM or not */
114 ram_region = region_is_ram(phys_addr, size);
115 if (ram_region > 0) {
116 WARN_ONCE(1, "ioremap on RAM at 0x%lx - 0x%lx\n",
117 (unsigned long int)phys_addr,
118 (unsigned long int)last_addr);
122 /* If could not be identified(-1), check page by page */
123 if (ram_region < 0) {
124 pfn = phys_addr >> PAGE_SHIFT;
125 last_pfn = last_addr >> PAGE_SHIFT;
126 if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
127 __ioremap_check_ram) == 1)
131 * Mappings have to be page-aligned
133 offset = phys_addr & ~PAGE_MASK;
134 phys_addr &= PHYSICAL_PAGE_MASK;
135 size = PAGE_ALIGN(last_addr+1) - phys_addr;
137 retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
138 cachemode2protval(pcm), &new_prot_val);
140 printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
144 new_pcm = pgprot2cachemode(__pgprot(new_prot_val));
146 if (pcm != new_pcm) {
147 if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
149 "ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
150 (unsigned long long)phys_addr,
151 (unsigned long long)(phys_addr + size),
153 goto err_free_memtype;
158 prot = PAGE_KERNEL_IO;
160 case _PAGE_CACHE_MODE_UC:
162 prot = __pgprot(pgprot_val(prot) |
163 cachemode2protval(_PAGE_CACHE_MODE_UC));
165 case _PAGE_CACHE_MODE_UC_MINUS:
166 prot = __pgprot(pgprot_val(prot) |
167 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
169 case _PAGE_CACHE_MODE_WC:
170 prot = __pgprot(pgprot_val(prot) |
171 cachemode2protval(_PAGE_CACHE_MODE_WC));
173 case _PAGE_CACHE_MODE_WB:
180 area = get_vm_area_caller(size, VM_IOREMAP, caller);
182 goto err_free_memtype;
183 area->phys_addr = phys_addr;
184 vaddr = (unsigned long) area->addr;
186 if (kernel_map_sync_memtype(phys_addr, size, pcm))
189 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
192 ret_addr = (void __iomem *) (vaddr + offset);
193 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
196 * Check if the request spans more than any BAR in the iomem resource
199 WARN_ONCE(iomem_map_sanity_check(unaligned_phys_addr, unaligned_size),
200 KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
206 free_memtype(phys_addr, phys_addr + size);
211 * ioremap_nocache - map bus memory into CPU space
212 * @phys_addr: bus address of the memory
213 * @size: size of the resource to map
215 * ioremap_nocache performs a platform specific sequence of operations to
216 * make bus memory CPU accessible via the readb/readw/readl/writeb/
217 * writew/writel functions and the other mmio helpers. The returned
218 * address is not guaranteed to be usable directly as a virtual
221 * This version of ioremap ensures that the memory is marked uncachable
222 * on the CPU as well as honouring existing caching rules from things like
223 * the PCI bus. Note that there are other caches and buffers on many
224 * busses. In particular driver authors should read up on PCI writes
226 * It's useful if some control registers are in such an area and
227 * write combining or read caching is not desirable:
229 * Must be freed with iounmap.
231 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
234 * Ideally, this should be:
235 * pat_enabled ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
237 * Till we fix all X drivers to use ioremap_wc(), we will use
240 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
242 return __ioremap_caller(phys_addr, size, pcm,
243 __builtin_return_address(0));
245 EXPORT_SYMBOL(ioremap_nocache);
248 * ioremap_wc - map memory into CPU space write combined
249 * @phys_addr: bus address of the memory
250 * @size: size of the resource to map
252 * This version of ioremap ensures that the memory is marked write combining.
253 * Write combining allows faster writes to some hardware devices.
255 * Must be freed with iounmap.
257 void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
260 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
261 __builtin_return_address(0));
263 return ioremap_nocache(phys_addr, size);
265 EXPORT_SYMBOL(ioremap_wc);
267 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
269 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
270 __builtin_return_address(0));
272 EXPORT_SYMBOL(ioremap_cache);
274 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
275 unsigned long prot_val)
277 return __ioremap_caller(phys_addr, size,
278 pgprot2cachemode(__pgprot(prot_val)),
279 __builtin_return_address(0));
281 EXPORT_SYMBOL(ioremap_prot);
284 * iounmap - Free a IO remapping
285 * @addr: virtual address from ioremap_*
287 * Caller must ensure there is only one unmapping for the same pointer.
289 void iounmap(volatile void __iomem *addr)
291 struct vm_struct *p, *o;
293 if ((void __force *)addr <= high_memory)
297 * __ioremap special-cases the PCI/ISA range by not instantiating a
298 * vm_area and by simply returning an address into the kernel mapping
299 * of ISA space. So handle that here.
301 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
302 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
305 addr = (volatile void __iomem *)
306 (PAGE_MASK & (unsigned long __force)addr);
308 mmiotrace_iounmap(addr);
310 /* Use the vm area unlocked, assuming the caller
311 ensures there isn't another iounmap for the same address
312 in parallel. Reuse of the virtual address is prevented by
313 leaving it in the global lists until we're done with it.
314 cpa takes care of the direct mappings. */
315 p = find_vm_area((void __force *)addr);
318 printk(KERN_ERR "iounmap: bad address %p\n", addr);
323 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
325 /* Finally remove it */
326 o = remove_vm_area((void __force *)addr);
327 BUG_ON(p != o || o == NULL);
330 EXPORT_SYMBOL(iounmap);
333 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
336 void *xlate_dev_mem_ptr(unsigned long phys)
339 unsigned long start = phys & PAGE_MASK;
341 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
342 if (page_is_ram(start >> PAGE_SHIFT))
345 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
347 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
352 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
354 if (page_is_ram(phys >> PAGE_SHIFT))
357 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
361 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
363 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
365 /* Don't assume we're using swapper_pg_dir at this point */
366 pgd_t *base = __va(read_cr3());
367 pgd_t *pgd = &base[pgd_index(addr)];
368 pud_t *pud = pud_offset(pgd, addr);
369 pmd_t *pmd = pmd_offset(pud, addr);
374 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
376 return &bm_pte[pte_index(addr)];
379 bool __init is_early_ioremap_ptep(pte_t *ptep)
381 return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
384 void __init early_ioremap_init(void)
389 BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
391 WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
394 early_ioremap_setup();
396 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
397 memset(bm_pte, 0, sizeof(bm_pte));
398 pmd_populate_kernel(&init_mm, pmd, bm_pte);
401 * The boot-ioremap range spans multiple pmds, for which
402 * we are not prepared:
404 #define __FIXADDR_TOP (-PAGE_SIZE)
405 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
406 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
408 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
410 printk(KERN_WARNING "pmd %p != %p\n",
411 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
412 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
413 fix_to_virt(FIX_BTMAP_BEGIN));
414 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
415 fix_to_virt(FIX_BTMAP_END));
417 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
418 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
423 void __init __early_set_fixmap(enum fixed_addresses idx,
424 phys_addr_t phys, pgprot_t flags)
426 unsigned long addr = __fix_to_virt(idx);
429 if (idx >= __end_of_fixed_addresses) {
433 pte = early_ioremap_pte(addr);
435 if (pgprot_val(flags))
436 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
438 pte_clear(&init_mm, addr, pte);
439 __flush_tlb_one(addr);