]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - arch/x86/mm/ioremap.c
do_generic_file_read: s/EINTR/EIO/ if lock_page_killable() fails
[karo-tx-linux.git] / arch / x86 / mm / ioremap.c
1 /*
2  * Re-map IO memory to kernel address space so that we can access it.
3  * This is needed for high PCI addresses that aren't mapped in the
4  * 640k-1MB IO memory area on PC's
5  *
6  * (C) Copyright 1995 1996 Linus Torvalds
7  */
8
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
11 #include <linux/io.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mmiotrace.h>
16
17 #include <asm/cacheflush.h>
18 #include <asm/e820.h>
19 #include <asm/fixmap.h>
20 #include <asm/pgtable.h>
21 #include <asm/tlbflush.h>
22 #include <asm/pgalloc.h>
23 #include <asm/pat.h>
24
25 #ifdef CONFIG_X86_64
26
27 static inline int phys_addr_valid(unsigned long addr)
28 {
29         return addr < (1UL << boot_cpu_data.x86_phys_bits);
30 }
31
32 unsigned long __phys_addr(unsigned long x)
33 {
34         if (x >= __START_KERNEL_map) {
35                 x -= __START_KERNEL_map;
36                 VIRTUAL_BUG_ON(x >= KERNEL_IMAGE_SIZE);
37                 x += phys_base;
38         } else {
39                 VIRTUAL_BUG_ON(x < PAGE_OFFSET);
40                 x -= PAGE_OFFSET;
41                 VIRTUAL_BUG_ON(system_state == SYSTEM_BOOTING ? x > MAXMEM :
42                                         !phys_addr_valid(x));
43         }
44         return x;
45 }
46 EXPORT_SYMBOL(__phys_addr);
47
48 bool __virt_addr_valid(unsigned long x)
49 {
50         if (x >= __START_KERNEL_map) {
51                 x -= __START_KERNEL_map;
52                 if (x >= KERNEL_IMAGE_SIZE)
53                         return false;
54                 x += phys_base;
55         } else {
56                 if (x < PAGE_OFFSET)
57                         return false;
58                 x -= PAGE_OFFSET;
59                 if (system_state == SYSTEM_BOOTING ?
60                                 x > MAXMEM : !phys_addr_valid(x)) {
61                         return false;
62                 }
63         }
64
65         return pfn_valid(x >> PAGE_SHIFT);
66 }
67 EXPORT_SYMBOL(__virt_addr_valid);
68
69 #else
70
71 static inline int phys_addr_valid(unsigned long addr)
72 {
73         return 1;
74 }
75
76 #ifdef CONFIG_DEBUG_VIRTUAL
77 unsigned long __phys_addr(unsigned long x)
78 {
79         /* VMALLOC_* aren't constants; not available at the boot time */
80         VIRTUAL_BUG_ON(x < PAGE_OFFSET);
81         VIRTUAL_BUG_ON(system_state != SYSTEM_BOOTING &&
82                 is_vmalloc_addr((void *) x));
83         return x - PAGE_OFFSET;
84 }
85 EXPORT_SYMBOL(__phys_addr);
86 #endif
87
88 bool __virt_addr_valid(unsigned long x)
89 {
90         if (x < PAGE_OFFSET)
91                 return false;
92         if (system_state != SYSTEM_BOOTING && is_vmalloc_addr((void *) x))
93                 return false;
94         return pfn_valid((x - PAGE_OFFSET) >> PAGE_SHIFT);
95 }
96 EXPORT_SYMBOL(__virt_addr_valid);
97
98 #endif
99
100 int page_is_ram(unsigned long pagenr)
101 {
102         resource_size_t addr, end;
103         int i;
104
105         /*
106          * A special case is the first 4Kb of memory;
107          * This is a BIOS owned area, not kernel ram, but generally
108          * not listed as such in the E820 table.
109          */
110         if (pagenr == 0)
111                 return 0;
112
113         /*
114          * Second special case: Some BIOSen report the PC BIOS
115          * area (640->1Mb) as ram even though it is not.
116          */
117         if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
118                     pagenr < (BIOS_END >> PAGE_SHIFT))
119                 return 0;
120
121         for (i = 0; i < e820.nr_map; i++) {
122                 /*
123                  * Not usable memory:
124                  */
125                 if (e820.map[i].type != E820_RAM)
126                         continue;
127                 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
128                 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
129
130
131                 if ((pagenr >= addr) && (pagenr < end))
132                         return 1;
133         }
134         return 0;
135 }
136
137 int pagerange_is_ram(unsigned long start, unsigned long end)
138 {
139         int ram_page = 0, not_rampage = 0;
140         unsigned long page_nr;
141
142         for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
143              ++page_nr) {
144                 if (page_is_ram(page_nr))
145                         ram_page = 1;
146                 else
147                         not_rampage = 1;
148
149                 if (ram_page == not_rampage)
150                         return -1;
151         }
152
153         return ram_page;
154 }
155
156 /*
157  * Fix up the linear direct mapping of the kernel to avoid cache attribute
158  * conflicts.
159  */
160 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
161                                unsigned long prot_val)
162 {
163         unsigned long nrpages = size >> PAGE_SHIFT;
164         int err;
165
166         switch (prot_val) {
167         case _PAGE_CACHE_UC:
168         default:
169                 err = _set_memory_uc(vaddr, nrpages);
170                 break;
171         case _PAGE_CACHE_WC:
172                 err = _set_memory_wc(vaddr, nrpages);
173                 break;
174         case _PAGE_CACHE_WB:
175                 err = _set_memory_wb(vaddr, nrpages);
176                 break;
177         }
178
179         return err;
180 }
181
182 /*
183  * Remap an arbitrary physical address space into the kernel virtual
184  * address space. Needed when the kernel wants to access high addresses
185  * directly.
186  *
187  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
188  * have to convert them into an offset in a page-aligned mapping, but the
189  * caller shouldn't need to know that small detail.
190  */
191 static void __iomem *__ioremap_caller(resource_size_t phys_addr,
192                 unsigned long size, unsigned long prot_val, void *caller)
193 {
194         unsigned long pfn, offset, vaddr;
195         resource_size_t last_addr;
196         const resource_size_t unaligned_phys_addr = phys_addr;
197         const unsigned long unaligned_size = size;
198         struct vm_struct *area;
199         unsigned long new_prot_val;
200         pgprot_t prot;
201         int retval;
202         void __iomem *ret_addr;
203
204         /* Don't allow wraparound or zero size */
205         last_addr = phys_addr + size - 1;
206         if (!size || last_addr < phys_addr)
207                 return NULL;
208
209         if (!phys_addr_valid(phys_addr)) {
210                 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
211                        (unsigned long long)phys_addr);
212                 WARN_ON_ONCE(1);
213                 return NULL;
214         }
215
216         /*
217          * Don't remap the low PCI/ISA area, it's always mapped..
218          */
219         if (is_ISA_range(phys_addr, last_addr))
220                 return (__force void __iomem *)phys_to_virt(phys_addr);
221
222         /*
223          * Don't allow anybody to remap normal RAM that we're using..
224          */
225         for (pfn = phys_addr >> PAGE_SHIFT;
226                                 (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
227                                 pfn++) {
228
229                 int is_ram = page_is_ram(pfn);
230
231                 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
232                         return NULL;
233                 WARN_ON_ONCE(is_ram);
234         }
235
236         /*
237          * Mappings have to be page-aligned
238          */
239         offset = phys_addr & ~PAGE_MASK;
240         phys_addr &= PAGE_MASK;
241         size = PAGE_ALIGN(last_addr+1) - phys_addr;
242
243         retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
244                                                 prot_val, &new_prot_val);
245         if (retval) {
246                 pr_debug("Warning: reserve_memtype returned %d\n", retval);
247                 return NULL;
248         }
249
250         if (prot_val != new_prot_val) {
251                 /*
252                  * Do not fallback to certain memory types with certain
253                  * requested type:
254                  * - request is uc-, return cannot be write-back
255                  * - request is uc-, return cannot be write-combine
256                  * - request is write-combine, return cannot be write-back
257                  */
258                 if ((prot_val == _PAGE_CACHE_UC_MINUS &&
259                      (new_prot_val == _PAGE_CACHE_WB ||
260                       new_prot_val == _PAGE_CACHE_WC)) ||
261                     (prot_val == _PAGE_CACHE_WC &&
262                      new_prot_val == _PAGE_CACHE_WB)) {
263                         pr_debug(
264                 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
265                                 (unsigned long long)phys_addr,
266                                 (unsigned long long)(phys_addr + size),
267                                 prot_val, new_prot_val);
268                         free_memtype(phys_addr, phys_addr + size);
269                         return NULL;
270                 }
271                 prot_val = new_prot_val;
272         }
273
274         switch (prot_val) {
275         case _PAGE_CACHE_UC:
276         default:
277                 prot = PAGE_KERNEL_IO_NOCACHE;
278                 break;
279         case _PAGE_CACHE_UC_MINUS:
280                 prot = PAGE_KERNEL_IO_UC_MINUS;
281                 break;
282         case _PAGE_CACHE_WC:
283                 prot = PAGE_KERNEL_IO_WC;
284                 break;
285         case _PAGE_CACHE_WB:
286                 prot = PAGE_KERNEL_IO;
287                 break;
288         }
289
290         /*
291          * Ok, go for it..
292          */
293         area = get_vm_area_caller(size, VM_IOREMAP, caller);
294         if (!area)
295                 return NULL;
296         area->phys_addr = phys_addr;
297         vaddr = (unsigned long) area->addr;
298         if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
299                 free_memtype(phys_addr, phys_addr + size);
300                 free_vm_area(area);
301                 return NULL;
302         }
303
304         if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
305                 free_memtype(phys_addr, phys_addr + size);
306                 vunmap(area->addr);
307                 return NULL;
308         }
309
310         ret_addr = (void __iomem *) (vaddr + offset);
311         mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
312
313         return ret_addr;
314 }
315
316 /**
317  * ioremap_nocache     -   map bus memory into CPU space
318  * @offset:    bus address of the memory
319  * @size:      size of the resource to map
320  *
321  * ioremap_nocache performs a platform specific sequence of operations to
322  * make bus memory CPU accessible via the readb/readw/readl/writeb/
323  * writew/writel functions and the other mmio helpers. The returned
324  * address is not guaranteed to be usable directly as a virtual
325  * address.
326  *
327  * This version of ioremap ensures that the memory is marked uncachable
328  * on the CPU as well as honouring existing caching rules from things like
329  * the PCI bus. Note that there are other caches and buffers on many
330  * busses. In particular driver authors should read up on PCI writes
331  *
332  * It's useful if some control registers are in such an area and
333  * write combining or read caching is not desirable:
334  *
335  * Must be freed with iounmap.
336  */
337 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
338 {
339         /*
340          * Ideally, this should be:
341          *      pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
342          *
343          * Till we fix all X drivers to use ioremap_wc(), we will use
344          * UC MINUS.
345          */
346         unsigned long val = _PAGE_CACHE_UC_MINUS;
347
348         return __ioremap_caller(phys_addr, size, val,
349                                 __builtin_return_address(0));
350 }
351 EXPORT_SYMBOL(ioremap_nocache);
352
353 /**
354  * ioremap_wc   -       map memory into CPU space write combined
355  * @offset:     bus address of the memory
356  * @size:       size of the resource to map
357  *
358  * This version of ioremap ensures that the memory is marked write combining.
359  * Write combining allows faster writes to some hardware devices.
360  *
361  * Must be freed with iounmap.
362  */
363 void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
364 {
365         if (pat_enabled)
366                 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
367                                         __builtin_return_address(0));
368         else
369                 return ioremap_nocache(phys_addr, size);
370 }
371 EXPORT_SYMBOL(ioremap_wc);
372
373 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
374 {
375         return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
376                                 __builtin_return_address(0));
377 }
378 EXPORT_SYMBOL(ioremap_cache);
379
380 static void __iomem *ioremap_default(resource_size_t phys_addr,
381                                         unsigned long size)
382 {
383         unsigned long flags;
384         void *ret;
385         int err;
386
387         /*
388          * - WB for WB-able memory and no other conflicting mappings
389          * - UC_MINUS for non-WB-able memory with no other conflicting mappings
390          * - Inherit from confliting mappings otherwise
391          */
392         err = reserve_memtype(phys_addr, phys_addr + size, -1, &flags);
393         if (err < 0)
394                 return NULL;
395
396         ret = (void *) __ioremap_caller(phys_addr, size, flags,
397                                         __builtin_return_address(0));
398
399         free_memtype(phys_addr, phys_addr + size);
400         return (void __iomem *)ret;
401 }
402
403 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
404                                 unsigned long prot_val)
405 {
406         return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
407                                 __builtin_return_address(0));
408 }
409 EXPORT_SYMBOL(ioremap_prot);
410
411 /**
412  * iounmap - Free a IO remapping
413  * @addr: virtual address from ioremap_*
414  *
415  * Caller must ensure there is only one unmapping for the same pointer.
416  */
417 void iounmap(volatile void __iomem *addr)
418 {
419         struct vm_struct *p, *o;
420
421         if ((void __force *)addr <= high_memory)
422                 return;
423
424         /*
425          * __ioremap special-cases the PCI/ISA range by not instantiating a
426          * vm_area and by simply returning an address into the kernel mapping
427          * of ISA space.   So handle that here.
428          */
429         if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
430             (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
431                 return;
432
433         addr = (volatile void __iomem *)
434                 (PAGE_MASK & (unsigned long __force)addr);
435
436         mmiotrace_iounmap(addr);
437
438         /* Use the vm area unlocked, assuming the caller
439            ensures there isn't another iounmap for the same address
440            in parallel. Reuse of the virtual address is prevented by
441            leaving it in the global lists until we're done with it.
442            cpa takes care of the direct mappings. */
443         read_lock(&vmlist_lock);
444         for (p = vmlist; p; p = p->next) {
445                 if (p->addr == (void __force *)addr)
446                         break;
447         }
448         read_unlock(&vmlist_lock);
449
450         if (!p) {
451                 printk(KERN_ERR "iounmap: bad address %p\n", addr);
452                 dump_stack();
453                 return;
454         }
455
456         free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
457
458         /* Finally remove it */
459         o = remove_vm_area((void __force *)addr);
460         BUG_ON(p != o || o == NULL);
461         kfree(p);
462 }
463 EXPORT_SYMBOL(iounmap);
464
465 /*
466  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
467  * access
468  */
469 void *xlate_dev_mem_ptr(unsigned long phys)
470 {
471         void *addr;
472         unsigned long start = phys & PAGE_MASK;
473
474         /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
475         if (page_is_ram(start >> PAGE_SHIFT))
476                 return __va(phys);
477
478         addr = (void __force *)ioremap_default(start, PAGE_SIZE);
479         if (addr)
480                 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
481
482         return addr;
483 }
484
485 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
486 {
487         if (page_is_ram(phys >> PAGE_SHIFT))
488                 return;
489
490         iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
491         return;
492 }
493
494 static int __initdata early_ioremap_debug;
495
496 static int __init early_ioremap_debug_setup(char *str)
497 {
498         early_ioremap_debug = 1;
499
500         return 0;
501 }
502 early_param("early_ioremap_debug", early_ioremap_debug_setup);
503
504 static __initdata int after_paging_init;
505 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
506
507 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
508 {
509         /* Don't assume we're using swapper_pg_dir at this point */
510         pgd_t *base = __va(read_cr3());
511         pgd_t *pgd = &base[pgd_index(addr)];
512         pud_t *pud = pud_offset(pgd, addr);
513         pmd_t *pmd = pmd_offset(pud, addr);
514
515         return pmd;
516 }
517
518 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
519 {
520         return &bm_pte[pte_index(addr)];
521 }
522
523 void __init early_ioremap_init(void)
524 {
525         pmd_t *pmd;
526
527         if (early_ioremap_debug)
528                 printk(KERN_INFO "early_ioremap_init()\n");
529
530         pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
531         memset(bm_pte, 0, sizeof(bm_pte));
532         pmd_populate_kernel(&init_mm, pmd, bm_pte);
533
534         /*
535          * The boot-ioremap range spans multiple pmds, for which
536          * we are not prepared:
537          */
538         if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
539                 WARN_ON(1);
540                 printk(KERN_WARNING "pmd %p != %p\n",
541                        pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
542                 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
543                         fix_to_virt(FIX_BTMAP_BEGIN));
544                 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
545                         fix_to_virt(FIX_BTMAP_END));
546
547                 printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
548                 printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
549                        FIX_BTMAP_BEGIN);
550         }
551 }
552
553 void __init early_ioremap_clear(void)
554 {
555         pmd_t *pmd;
556
557         if (early_ioremap_debug)
558                 printk(KERN_INFO "early_ioremap_clear()\n");
559
560         pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
561         pmd_clear(pmd);
562         paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT);
563         __flush_tlb_all();
564 }
565
566 void __init early_ioremap_reset(void)
567 {
568         enum fixed_addresses idx;
569         unsigned long addr, phys;
570         pte_t *pte;
571
572         after_paging_init = 1;
573         for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
574                 addr = fix_to_virt(idx);
575                 pte = early_ioremap_pte(addr);
576                 if (pte_present(*pte)) {
577                         phys = pte_val(*pte) & PAGE_MASK;
578                         set_fixmap(idx, phys);
579                 }
580         }
581 }
582
583 static void __init __early_set_fixmap(enum fixed_addresses idx,
584                                    unsigned long phys, pgprot_t flags)
585 {
586         unsigned long addr = __fix_to_virt(idx);
587         pte_t *pte;
588
589         if (idx >= __end_of_fixed_addresses) {
590                 BUG();
591                 return;
592         }
593         pte = early_ioremap_pte(addr);
594
595         if (pgprot_val(flags))
596                 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
597         else
598                 pte_clear(&init_mm, addr, pte);
599         __flush_tlb_one(addr);
600 }
601
602 static inline void __init early_set_fixmap(enum fixed_addresses idx,
603                                            unsigned long phys, pgprot_t prot)
604 {
605         if (after_paging_init)
606                 __set_fixmap(idx, phys, prot);
607         else
608                 __early_set_fixmap(idx, phys, prot);
609 }
610
611 static inline void __init early_clear_fixmap(enum fixed_addresses idx)
612 {
613         if (after_paging_init)
614                 clear_fixmap(idx);
615         else
616                 __early_set_fixmap(idx, 0, __pgprot(0));
617 }
618
619 static void *prev_map[FIX_BTMAPS_SLOTS] __initdata;
620 static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
621 static int __init check_early_ioremap_leak(void)
622 {
623         int count = 0;
624         int i;
625
626         for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
627                 if (prev_map[i])
628                         count++;
629
630         if (!count)
631                 return 0;
632         WARN(1, KERN_WARNING
633                "Debug warning: early ioremap leak of %d areas detected.\n",
634                 count);
635         printk(KERN_WARNING
636                 "please boot with early_ioremap_debug and report the dmesg.\n");
637
638         return 1;
639 }
640 late_initcall(check_early_ioremap_leak);
641
642 static void __init *__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
643 {
644         unsigned long offset, last_addr;
645         unsigned int nrpages;
646         enum fixed_addresses idx0, idx;
647         int i, slot;
648
649         WARN_ON(system_state != SYSTEM_BOOTING);
650
651         slot = -1;
652         for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
653                 if (!prev_map[i]) {
654                         slot = i;
655                         break;
656                 }
657         }
658
659         if (slot < 0) {
660                 printk(KERN_INFO "early_iomap(%08lx, %08lx) not found slot\n",
661                          phys_addr, size);
662                 WARN_ON(1);
663                 return NULL;
664         }
665
666         if (early_ioremap_debug) {
667                 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
668                        phys_addr, size, slot);
669                 dump_stack();
670         }
671
672         /* Don't allow wraparound or zero size */
673         last_addr = phys_addr + size - 1;
674         if (!size || last_addr < phys_addr) {
675                 WARN_ON(1);
676                 return NULL;
677         }
678
679         prev_size[slot] = size;
680         /*
681          * Mappings have to be page-aligned
682          */
683         offset = phys_addr & ~PAGE_MASK;
684         phys_addr &= PAGE_MASK;
685         size = PAGE_ALIGN(last_addr + 1) - phys_addr;
686
687         /*
688          * Mappings have to fit in the FIX_BTMAP area.
689          */
690         nrpages = size >> PAGE_SHIFT;
691         if (nrpages > NR_FIX_BTMAPS) {
692                 WARN_ON(1);
693                 return NULL;
694         }
695
696         /*
697          * Ok, go for it..
698          */
699         idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
700         idx = idx0;
701         while (nrpages > 0) {
702                 early_set_fixmap(idx, phys_addr, prot);
703                 phys_addr += PAGE_SIZE;
704                 --idx;
705                 --nrpages;
706         }
707         if (early_ioremap_debug)
708                 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
709
710         prev_map[slot] = (void *) (offset + fix_to_virt(idx0));
711         return prev_map[slot];
712 }
713
714 /* Remap an IO device */
715 void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
716 {
717         return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
718 }
719
720 /* Remap memory */
721 void __init *early_memremap(unsigned long phys_addr, unsigned long size)
722 {
723         return __early_ioremap(phys_addr, size, PAGE_KERNEL);
724 }
725
726 void __init early_iounmap(void *addr, unsigned long size)
727 {
728         unsigned long virt_addr;
729         unsigned long offset;
730         unsigned int nrpages;
731         enum fixed_addresses idx;
732         int i, slot;
733
734         slot = -1;
735         for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
736                 if (prev_map[i] == addr) {
737                         slot = i;
738                         break;
739                 }
740         }
741
742         if (slot < 0) {
743                 printk(KERN_INFO "early_iounmap(%p, %08lx) not found slot\n",
744                          addr, size);
745                 WARN_ON(1);
746                 return;
747         }
748
749         if (prev_size[slot] != size) {
750                 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
751                          addr, size, slot, prev_size[slot]);
752                 WARN_ON(1);
753                 return;
754         }
755
756         if (early_ioremap_debug) {
757                 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
758                        size, slot);
759                 dump_stack();
760         }
761
762         virt_addr = (unsigned long)addr;
763         if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
764                 WARN_ON(1);
765                 return;
766         }
767         offset = virt_addr & ~PAGE_MASK;
768         nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
769
770         idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
771         while (nrpages > 0) {
772                 early_clear_fixmap(idx);
773                 --idx;
774                 --nrpages;
775         }
776         prev_map[slot] = 0;
777 }
778
779 void __this_fixmap_does_not_exist(void)
780 {
781         WARN_ON(1);
782 }