]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - arch/powerpc/kernel/setup_64.c
powerpc: create kernel/setup.h
[karo-tx-linux.git] / arch / powerpc / kernel / setup_64.c
index 950e6f0fea985b1d3659f9e2c492025f72947441..fe39aac4f24dc76bd3cb44198c8161dcc4b84a04 100644 (file)
@@ -41,7 +41,6 @@
 #include <asm/elf.h>
 #include <asm/machdep.h>
 #include <asm/paca.h>
-#include <asm/ppcdebug.h>
 #include <asm/time.h>
 #include <asm/cputable.h>
 #include <asm/sections.h>
 #include <asm/page.h>
 #include <asm/mmu.h>
 #include <asm/lmb.h>
-#include <asm/iSeries/ItLpNaca.h>
+#include <asm/iseries/it_lp_naca.h>
 #include <asm/firmware.h>
 #include <asm/systemcfg.h>
+#include <asm/xmon.h>
+#include <asm/udbg.h>
+
+#include "setup.h"
 
 #ifdef DEBUG
 #define DBG(fmt...) udbg_printf(fmt)
@@ -102,8 +105,6 @@ extern void htab_initialize(void);
 extern void early_init_devtree(void *flat_dt);
 extern void unflatten_device_tree(void);
 
-extern void smp_release_cpus(void);
-
 int have_of = 1;
 int boot_cpuid = 0;
 int boot_cpuid_phys = 0;
@@ -182,120 +183,14 @@ static int __init early_smt_enabled(char *p)
 }
 early_param("smt-enabled", early_smt_enabled);
 
-/**
- * setup_cpu_maps - initialize the following cpu maps:
- *                  cpu_possible_map
- *                  cpu_present_map
- *                  cpu_sibling_map
- *
- * Having the possible map set up early allows us to restrict allocations
- * of things like irqstacks to num_possible_cpus() rather than NR_CPUS.
- *
- * We do not initialize the online map here; cpus set their own bits in
- * cpu_online_map as they come up.
- *
- * This function is valid only for Open Firmware systems.  finish_device_tree
- * must be called before using this.
- *
- * While we're here, we may as well set the "physical" cpu ids in the paca.
- */
-static void __init setup_cpu_maps(void)
-{
-       struct device_node *dn = NULL;
-       int cpu = 0;
-       int swap_cpuid = 0;
-
-       check_smt_enabled();
-
-       while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) {
-               u32 *intserv;
-               int j, len = sizeof(u32), nthreads;
-
-               intserv = (u32 *)get_property(dn, "ibm,ppc-interrupt-server#s",
-                                             &len);
-               if (!intserv)
-                       intserv = (u32 *)get_property(dn, "reg", NULL);
-
-               nthreads = len / sizeof(u32);
-
-               for (j = 0; j < nthreads && cpu < NR_CPUS; j++) {
-                       cpu_set(cpu, cpu_present_map);
-                       set_hard_smp_processor_id(cpu, intserv[j]);
-
-                       if (intserv[j] == boot_cpuid_phys)
-                               swap_cpuid = cpu;
-                       cpu_set(cpu, cpu_possible_map);
-                       cpu++;
-               }
-       }
-
-       /* Swap CPU id 0 with boot_cpuid_phys, so we can always assume that
-        * boot cpu is logical 0.
-        */
-       if (boot_cpuid_phys != get_hard_smp_processor_id(0)) {
-               u32 tmp;
-               tmp = get_hard_smp_processor_id(0);
-               set_hard_smp_processor_id(0, boot_cpuid_phys);
-               set_hard_smp_processor_id(swap_cpuid, tmp);
-       }
-
-       /*
-        * On pSeries LPAR, we need to know how many cpus
-        * could possibly be added to this partition.
-        */
-       if (systemcfg->platform == PLATFORM_PSERIES_LPAR &&
-                               (dn = of_find_node_by_path("/rtas"))) {
-               int num_addr_cell, num_size_cell, maxcpus;
-               unsigned int *ireg;
-
-               num_addr_cell = prom_n_addr_cells(dn);
-               num_size_cell = prom_n_size_cells(dn);
-
-               ireg = (unsigned int *)
-                       get_property(dn, "ibm,lrdr-capacity", NULL);
-
-               if (!ireg)
-                       goto out;
-
-               maxcpus = ireg[num_addr_cell + num_size_cell];
-
-               /* Double maxcpus for processors which have SMT capability */
-               if (cpu_has_feature(CPU_FTR_SMT))
-                       maxcpus *= 2;
-
-               if (maxcpus > NR_CPUS) {
-                       printk(KERN_WARNING
-                              "Partition configured for %d cpus, "
-                              "operating system maximum is %d.\n",
-                              maxcpus, NR_CPUS);
-                       maxcpus = NR_CPUS;
-               } else
-                       printk(KERN_INFO "Partition configured for %d cpus.\n",
-                              maxcpus);
-
-               for (cpu = 0; cpu < maxcpus; cpu++)
-                       cpu_set(cpu, cpu_possible_map);
-       out:
-               of_node_put(dn);
-       }
-
-       /*
-        * Do the sibling map; assume only two threads per processor.
-        */
-       for_each_cpu(cpu) {
-               cpu_set(cpu, cpu_sibling_map[cpu]);
-               if (cpu_has_feature(CPU_FTR_SMT))
-                       cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]);
-       }
-
-       systemcfg->processorCount = num_present_cpus();
-}
+#else
+#define check_smt_enabled()
 #endif /* CONFIG_SMP */
 
 extern struct machdep_calls pSeries_md;
 extern struct machdep_calls pmac_md;
 extern struct machdep_calls maple_md;
-extern struct machdep_calls bpa_md;
+extern struct machdep_calls cell_md;
 extern struct machdep_calls iseries_md;
 
 /* Ultimately, stuff them in an elf section like initcalls... */
@@ -309,8 +204,8 @@ static struct machdep_calls __initdata *machines[] = {
 #ifdef CONFIG_PPC_MAPLE
        &maple_md,
 #endif /* CONFIG_PPC_MAPLE */
-#ifdef CONFIG_PPC_BPA
-       &bpa_md,
+#ifdef CONFIG_PPC_CELL
+       &cell_md,
 #endif
 #ifdef CONFIG_PPC_ISERIES
        &iseries_md,
@@ -350,12 +245,6 @@ void __init early_setup(unsigned long dt_ptr)
 
        DBG(" -> early_setup()\n");
 
-       /*
-        * Fill the default DBG level (do we want to keep
-        * that old mecanism around forever ?)
-        */
-       ppcdbg_initialize();
-
        /*
         * Do early initializations using the flattened device
         * tree, like retreiving the physical memory map or
@@ -384,21 +273,49 @@ void __init early_setup(unsigned long dt_ptr)
        DBG("Found, Initializing memory management...\n");
 
        /*
-        * Initialize stab / SLB management
+        * Initialize the MMU Hash table and create the linear mapping
+        * of memory. Has to be done before stab/slb initialization as
+        * this is currently where the page size encoding is obtained
         */
-       if (!firmware_has_feature(FW_FEATURE_ISERIES))
-               stab_initialize(lpaca->stab_real);
+       htab_initialize();
 
        /*
-        * Initialize the MMU Hash table and create the linear mapping
-        * of memory
+        * Initialize stab / SLB management except on iSeries
         */
-       htab_initialize();
+       if (!firmware_has_feature(FW_FEATURE_ISERIES)) {
+               if (cpu_has_feature(CPU_FTR_SLB))
+                       slb_initialize();
+               else
+                       stab_initialize(lpaca->stab_real);
+       }
 
        DBG(" <- early_setup()\n");
 }
 
 
+#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
+void smp_release_cpus(void)
+{
+       extern unsigned long __secondary_hold_spinloop;
+
+       DBG(" -> smp_release_cpus()\n");
+
+       /* All secondary cpus are spinning on a common spinloop, release them
+        * all now so they can start to spin on their individual paca
+        * spinloops. For non SMP kernels, the secondary cpus never get out
+        * of the common spinloop.
+        * This is useless but harmless on iSeries, secondaries are already
+        * waiting on their paca spinloops. */
+
+       __secondary_hold_spinloop = 1;
+       mb();
+
+       DBG(" <- smp_release_cpus()\n");
+}
+#else
+#define smp_release_cpus()
+#endif /* CONFIG_SMP || CONFIG_KEXEC */
+
 /*
  * Initialize some remaining members of the ppc64_caches and systemcfg structures
  * (at least until we get rid of them completely). This is mostly some
@@ -480,43 +397,6 @@ static void __init initialize_cache_info(void)
        DBG(" <- initialize_cache_info()\n");
 }
 
-static void __init check_for_initrd(void)
-{
-#ifdef CONFIG_BLK_DEV_INITRD
-       u64 *prop;
-
-       DBG(" -> check_for_initrd()\n");
-
-       if (of_chosen) {
-               prop = (u64 *)get_property(of_chosen,
-                               "linux,initrd-start", NULL);
-               if (prop != NULL) {
-                       initrd_start = (unsigned long)__va(*prop);
-                       prop = (u64 *)get_property(of_chosen,
-                                       "linux,initrd-end", NULL);
-                       if (prop != NULL) {
-                               initrd_end = (unsigned long)__va(*prop);
-                               initrd_below_start_ok = 1;
-                       } else
-                               initrd_start = 0;
-               }
-       }
-
-       /* If we were passed an initrd, set the ROOT_DEV properly if the values
-        * look sensible. If not, clear initrd reference.
-        */
-       if (initrd_start >= KERNELBASE && initrd_end >= KERNELBASE &&
-           initrd_end > initrd_start)
-               ROOT_DEV = Root_RAM0;
-       else
-               initrd_start = initrd_end = 0;
-
-       if (initrd_start)
-               printk("Found initrd at 0x%lx:0x%lx\n", initrd_start, initrd_end);
-
-       DBG(" <- check_for_initrd()\n");
-#endif /* CONFIG_BLK_DEV_INITRD */
-}
 
 /*
  * Do some initial setup of the system.  The parameters are those which 
@@ -588,23 +468,18 @@ void __init setup_system(void)
 
        parse_early_param();
 
-#ifdef CONFIG_SMP
-       /*
-        * iSeries has already initialized the cpu maps at this point.
-        */
-       setup_cpu_maps();
+       check_smt_enabled();
+       smp_setup_cpu_maps();
 
        /* Release secondary cpus out of their spinloops at 0x60 now that
         * we can map physical -> logical CPU ids
         */
        smp_release_cpus();
-#endif
 
        printk("Starting Linux PPC64 %s\n", system_utsname.version);
 
        printk("-----------------------------------------------------\n");
        printk("ppc64_pft_size                = 0x%lx\n", ppc64_pft_size);
-       printk("ppc64_debug_switch            = 0x%lx\n", ppc64_debug_switch);
        printk("ppc64_interrupt_controller    = 0x%ld\n", ppc64_interrupt_controller);
        printk("systemcfg                     = 0x%p\n", systemcfg);
        printk("systemcfg->platform           = 0x%x\n", systemcfg->platform);
@@ -630,32 +505,6 @@ static int ppc64_panic_event(struct notifier_block *this,
        return NOTIFY_DONE;
 }
 
-/*
- * These three variables are used to save values passed to us by prom_init()
- * via the device tree. The TCE variables are needed because with a memory_limit
- * in force we may need to explicitly map the TCE are at the top of RAM.
- */
-unsigned long memory_limit;
-unsigned long tce_alloc_start;
-unsigned long tce_alloc_end;
-
-#ifdef CONFIG_PPC_ISERIES
-/*
- * On iSeries we just parse the mem=X option from the command line.
- * On pSeries it's a bit more complicated, see prom_init_mem()
- */
-static int __init early_parsemem(char *p)
-{
-       if (!p)
-               return 0;
-
-       memory_limit = ALIGN(memparse(p, &p), PAGE_SIZE);
-
-       return 0;
-}
-early_param("mem", early_parsemem);
-#endif /* CONFIG_PPC_ISERIES */
-
 #ifdef CONFIG_IRQSTACKS
 static void __init irqstack_early_init(void)
 {
@@ -666,10 +515,12 @@ static void __init irqstack_early_init(void)
         * SLB misses on them.
         */
        for_each_cpu(i) {
-               softirq_ctx[i] = (struct thread_info *)__va(lmb_alloc_base(THREAD_SIZE,
-                                       THREAD_SIZE, 0x10000000));
-               hardirq_ctx[i] = (struct thread_info *)__va(lmb_alloc_base(THREAD_SIZE,
-                                       THREAD_SIZE, 0x10000000));
+               softirq_ctx[i] = (struct thread_info *)
+                       __va(lmb_alloc_base(THREAD_SIZE,
+                                           THREAD_SIZE, 0x10000000));
+               hardirq_ctx[i] = (struct thread_info *)
+                       __va(lmb_alloc_base(THREAD_SIZE,
+                                           THREAD_SIZE, 0x10000000));
        }
 }
 #else
@@ -697,8 +548,8 @@ static void __init emergency_stack_init(void)
        limit = min(0x10000000UL, lmb.rmo_size);
 
        for_each_cpu(i)
-               paca[i].emergency_sp = __va(lmb_alloc_base(PAGE_SIZE, 128,
-                                               limit)) + PAGE_SIZE;
+               paca[i].emergency_sp =
+               __va(lmb_alloc_base(HW_PAGE_SIZE, 128, limit)) + HW_PAGE_SIZE;
 }
 
 /*
@@ -1009,26 +860,6 @@ int check_legacy_ioport(unsigned long base_port)
 }
 EXPORT_SYMBOL(check_legacy_ioport);
 
-#ifdef CONFIG_XMON
-static int __init early_xmon(char *p)
-{
-       /* ensure xmon is enabled */
-       if (p) {
-               if (strncmp(p, "on", 2) == 0)
-                       xmon_init(1);
-               if (strncmp(p, "off", 3) == 0)
-                       xmon_init(0);
-               if (strncmp(p, "early", 5) != 0)
-                       return 0;
-       }
-       xmon_init(1);
-       debugger(NULL);
-
-       return 0;
-}
-early_param("xmon", early_xmon);
-#endif
-
 void cpu_die(void)
 {
        if (ppc_md.cpu_die)