]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge commit 'v2.6.26-rc9' into x86/cpu
authorIngo Molnar <mingo@elte.hu>
Tue, 8 Jul 2008 05:47:47 +0000 (07:47 +0200)
committerIngo Molnar <mingo@elte.hu>
Tue, 8 Jul 2008 05:47:47 +0000 (07:47 +0200)
25 files changed:
arch/x86/kernel/Makefile
arch/x86/kernel/apic_32.c
arch/x86/kernel/apic_64.c
arch/x86/kernel/cpu/Makefile
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/amd_64.c [new file with mode: 0644]
arch/x86/kernel/cpu/bugs_64.c [moved from arch/x86/kernel/bugs_64.c with 100% similarity]
arch/x86/kernel/cpu/centaur_64.c [new file with mode: 0644]
arch/x86/kernel/cpu/cpu.h
arch/x86/kernel/cpu/intel_64.c [new file with mode: 0644]
arch/x86/kernel/mmconf-fam10h_64.c
arch/x86/kernel/process.c
arch/x86/kernel/process_32.c
arch/x86/kernel/process_64.c
arch/x86/kernel/setup_64.c
arch/x86/kernel/vmlinux_64.lds.S
arch/x86/pci/Makefile_32
arch/x86/pci/Makefile_64
arch/x86/pci/amd_bus.c [moved from arch/x86/pci/k8-bus_64.c with 95% similarity]
arch/x86/pci/direct.c
arch/x86/pci/pci.h
include/asm-x86/apic.h
include/asm-x86/mmconfig.h [new file with mode: 0644]
include/asm-x86/msr-index.h
kernel/time/tick-broadcast.c

index 77807d4769c99c455237a810fe0edd0f996fb31f..3a2a54c950ffab775424fc554d813ed664c83393 100644 (file)
@@ -25,7 +25,6 @@ obj-$(CONFIG_X86_64)  += syscall_64.o vsyscall_64.o setup64.o
 obj-y                  += bootflag.o e820_$(BITS).o
 obj-y                  += pci-dma.o quirks.o i8237.o topology.o kdebugfs.o
 obj-y                  += alternative.o i8253.o pci-nommu.o
-obj-$(CONFIG_X86_64)   += bugs_64.o
 obj-y                  += tsc_$(BITS).o io_delay.o rtc.o
 
 obj-$(CONFIG_X86_TRAMPOLINE)   += trampoline.o
index 4b99b1bdeb6cbb090e5868138922b259da7d98fe..c44206e731d4345c60e9dc06a46dd23002fd23ce 100644 (file)
@@ -64,9 +64,8 @@ static int enable_local_apic __initdata;
 
 /* Local APIC timer verification ok */
 static int local_apic_timer_verify_ok;
-/* Disable local APIC timer from the kernel commandline or via dmi quirk
-   or using CPU MSR check */
-int local_apic_timer_disabled;
+/* Disable local APIC timer from the kernel commandline or via dmi quirk */
+static int local_apic_timer_disabled;
 /* Local APIC timer works in C2 */
 int local_apic_timer_c2_ok;
 EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
index 0633cfd0dc291a1310f0632e1012cf663b5e89a9..a5cc8447cf4dde3cde1ea58bbb3620e828833e26 100644 (file)
@@ -43,7 +43,7 @@
 #include <mach_ipi.h>
 #include <mach_apic.h>
 
-int disable_apic_timer __cpuinitdata;
+static int disable_apic_timer __cpuinitdata;
 static int apic_calibrate_pmtmr __initdata;
 int disable_apic;
 
@@ -422,32 +422,8 @@ void __init setup_boot_APIC_clock(void)
        setup_APIC_timer();
 }
 
-/*
- * AMD C1E enabled CPUs have a real nasty problem: Some BIOSes set the
- * C1E flag only in the secondary CPU, so when we detect the wreckage
- * we already have enabled the boot CPU local apic timer. Check, if
- * disable_apic_timer is set and the DUMMY flag is cleared. If yes,
- * set the DUMMY flag again and force the broadcast mode in the
- * clockevents layer.
- */
-static void __cpuinit check_boot_apic_timer_broadcast(void)
-{
-       if (!disable_apic_timer ||
-           (lapic_clockevent.features & CLOCK_EVT_FEAT_DUMMY))
-               return;
-
-       printk(KERN_INFO "AMD C1E detected late. Force timer broadcast.\n");
-       lapic_clockevent.features |= CLOCK_EVT_FEAT_DUMMY;
-
-       local_irq_enable();
-       clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
-                          &boot_cpu_physical_apicid);
-       local_irq_disable();
-}
-
 void __cpuinit setup_secondary_APIC_clock(void)
 {
-       check_boot_apic_timer_broadcast();
        setup_APIC_timer();
 }
 
index a0c6f8190887f4220b4b76e531e57ad471fc1acb..65b1be5fe9ce16932ac617a246d5ee1eaa79ac09 100644 (file)
@@ -6,11 +6,15 @@ obj-y                 := intel_cacheinfo.o addon_cpuid_features.o
 obj-y                  += proc.o feature_names.o
 
 obj-$(CONFIG_X86_32)   += common.o bugs.o
+obj-$(CONFIG_X86_64)   += bugs_64.o
 obj-$(CONFIG_X86_32)   += amd.o
+obj-$(CONFIG_X86_64)   += amd_64.o
 obj-$(CONFIG_X86_32)   += cyrix.o
 obj-$(CONFIG_X86_32)   += centaur.o
+obj-$(CONFIG_X86_64)   += centaur_64.o
 obj-$(CONFIG_X86_32)   += transmeta.o
 obj-$(CONFIG_X86_32)   += intel.o
+obj-$(CONFIG_X86_64)   += intel_64.o
 obj-$(CONFIG_X86_32)   += umc.o
 
 obj-$(CONFIG_X86_MCE)  += mcheck/
index 245866828294c6be3fcf369d0ea76a0a3eed59cb..81a07ca65d4487d7f3133619210d287238ac411c 100644 (file)
 extern void vide(void);
 __asm__(".align 4\nvide: ret");
 
-#ifdef CONFIG_X86_LOCAL_APIC
-#define ENABLE_C1E_MASK         0x18000000
-#define CPUID_PROCESSOR_SIGNATURE       1
-#define CPUID_XFAM              0x0ff00000
-#define CPUID_XFAM_K8           0x00000000
-#define CPUID_XFAM_10H          0x00100000
-#define CPUID_XFAM_11H          0x00200000
-#define CPUID_XMOD              0x000f0000
-#define CPUID_XMOD_REV_F        0x00040000
-
-/* AMD systems with C1E don't have a working lAPIC timer. Check for that. */
-static __cpuinit int amd_apic_timer_broken(void)
-{
-       u32 lo, hi;
-       u32 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
-       switch (eax & CPUID_XFAM) {
-       case CPUID_XFAM_K8:
-               if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F)
-                       break;
-       case CPUID_XFAM_10H:
-       case CPUID_XFAM_11H:
-               rdmsr(MSR_K8_ENABLE_C1E, lo, hi);
-               if (lo & ENABLE_C1E_MASK) {
-                       if (smp_processor_id() != boot_cpu_physical_apicid)
-                               printk(KERN_INFO "AMD C1E detected late. "
-                                      "        Force timer broadcast.\n");
-                       return 1;
-               }
-               break;
-       default:
-               /* err on the side of caution */
-               return 1;
-       }
-       return 0;
-}
-#endif
-
 int force_mwait __cpuinitdata;
 
 static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
@@ -297,11 +260,6 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
                        num_cache_leaves = 3;
        }
 
-#ifdef CONFIG_X86_LOCAL_APIC
-       if (amd_apic_timer_broken())
-               local_apic_timer_disabled = 1;
-#endif
-
        /* K6s reports MCEs but don't actually have all the MSRs */
        if (c->x86 < 6)
                clear_cpu_cap(c, X86_FEATURE_MCE);
diff --git a/arch/x86/kernel/cpu/amd_64.c b/arch/x86/kernel/cpu/amd_64.c
new file mode 100644 (file)
index 0000000..30b7557
--- /dev/null
@@ -0,0 +1,211 @@
+#include <linux/init.h>
+#include <linux/mm.h>
+
+#include <asm/numa_64.h>
+#include <asm/mmconfig.h>
+#include <asm/cacheflush.h>
+
+#include <mach_apic.h>
+
+#include "cpu.h"
+
+int force_mwait __cpuinitdata;
+
+#ifdef CONFIG_NUMA
+static int __cpuinit nearby_node(int apicid)
+{
+       int i, node;
+
+       for (i = apicid - 1; i >= 0; i--) {
+               node = apicid_to_node[i];
+               if (node != NUMA_NO_NODE && node_online(node))
+                       return node;
+       }
+       for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
+               node = apicid_to_node[i];
+               if (node != NUMA_NO_NODE && node_online(node))
+                       return node;
+       }
+       return first_node(node_online_map); /* Shouldn't happen */
+}
+#endif
+
+/*
+ * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
+ * Assumes number of cores is a power of two.
+ */
+static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
+{
+#ifdef CONFIG_SMP
+       unsigned bits;
+#ifdef CONFIG_NUMA
+       int cpu = smp_processor_id();
+       int node = 0;
+       unsigned apicid = hard_smp_processor_id();
+#endif
+       bits = c->x86_coreid_bits;
+
+       /* Low order bits define the core id (index of core in socket) */
+       c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
+       /* Convert the initial APIC ID into the socket ID */
+       c->phys_proc_id = c->initial_apicid >> bits;
+
+#ifdef CONFIG_NUMA
+       node = c->phys_proc_id;
+       if (apicid_to_node[apicid] != NUMA_NO_NODE)
+               node = apicid_to_node[apicid];
+       if (!node_online(node)) {
+               /* Two possibilities here:
+                  - The CPU is missing memory and no node was created.
+                  In that case try picking one from a nearby CPU
+                  - The APIC IDs differ from the HyperTransport node IDs
+                  which the K8 northbridge parsing fills in.
+                  Assume they are all increased by a constant offset,
+                  but in the same order as the HT nodeids.
+                  If that doesn't result in a usable node fall back to the
+                  path for the previous case.  */
+
+               int ht_nodeid = c->initial_apicid;
+
+               if (ht_nodeid >= 0 &&
+                   apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
+                       node = apicid_to_node[ht_nodeid];
+               /* Pick a nearby node */
+               if (!node_online(node))
+                       node = nearby_node(apicid);
+       }
+       numa_set_node(cpu, node);
+
+       printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
+#endif
+#endif
+}
+
+static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c)
+{
+#ifdef CONFIG_SMP
+       unsigned bits, ecx;
+
+       /* Multi core CPU? */
+       if (c->extended_cpuid_level < 0x80000008)
+               return;
+
+       ecx = cpuid_ecx(0x80000008);
+
+       c->x86_max_cores = (ecx & 0xff) + 1;
+
+       /* CPU telling us the core id bits shift? */
+       bits = (ecx >> 12) & 0xF;
+
+       /* Otherwise recompute */
+       if (bits == 0) {
+               while ((1 << bits) < c->x86_max_cores)
+                       bits++;
+       }
+
+       c->x86_coreid_bits = bits;
+
+#endif
+}
+
+static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
+{
+       early_init_amd_mc(c);
+
+       /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
+       if (c->x86_power & (1<<8))
+               set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
+}
+
+static void __cpuinit init_amd(struct cpuinfo_x86 *c)
+{
+       unsigned level;
+
+#ifdef CONFIG_SMP
+       unsigned long value;
+
+       /*
+        * Disable TLB flush filter by setting HWCR.FFDIS on K8
+        * bit 6 of msr C001_0015
+        *
+        * Errata 63 for SH-B3 steppings
+        * Errata 122 for all steppings (F+ have it disabled by default)
+        */
+       if (c->x86 == 15) {
+               rdmsrl(MSR_K8_HWCR, value);
+               value |= 1 << 6;
+               wrmsrl(MSR_K8_HWCR, value);
+       }
+#endif
+
+       /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
+          3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
+       clear_cpu_cap(c, 0*32+31);
+
+       /* On C+ stepping K8 rep microcode works well for copy/memset */
+       level = cpuid_eax(1);
+       if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) ||
+                            level >= 0x0f58))
+               set_cpu_cap(c, X86_FEATURE_REP_GOOD);
+       if (c->x86 == 0x10 || c->x86 == 0x11)
+               set_cpu_cap(c, X86_FEATURE_REP_GOOD);
+
+       /* Enable workaround for FXSAVE leak */
+       if (c->x86 >= 6)
+               set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK);
+
+       level = get_model_name(c);
+       if (!level) {
+               switch (c->x86) {
+               case 15:
+                       /* Should distinguish Models here, but this is only
+                          a fallback anyways. */
+                       strcpy(c->x86_model_id, "Hammer");
+                       break;
+               }
+       }
+       display_cacheinfo(c);
+
+       /* Multi core CPU? */
+       if (c->extended_cpuid_level >= 0x80000008)
+               amd_detect_cmp(c);
+
+       if (c->extended_cpuid_level >= 0x80000006 &&
+               (cpuid_edx(0x80000006) & 0xf000))
+               num_cache_leaves = 4;
+       else
+               num_cache_leaves = 3;
+
+       if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x11)
+               set_cpu_cap(c, X86_FEATURE_K8);
+
+       /* MFENCE stops RDTSC speculation */
+       set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
+
+       if (c->x86 == 0x10)
+               fam10h_check_enable_mmcfg();
+
+       if (c == &boot_cpu_data && c->x86 >= 0xf && c->x86 <= 0x11) {
+               unsigned long long tseg;
+
+               /*
+                * Split up direct mapping around the TSEG SMM area.
+                * Don't do it for gbpages because there seems very little
+                * benefit in doing so.
+                */
+               if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg) &&
+                   (tseg >> PMD_SHIFT) <
+                       (max_pfn_mapped >> (PMD_SHIFT-PAGE_SHIFT)))
+                       set_memory_4k((unsigned long)__va(tseg), 1);
+       }
+}
+
+static struct cpu_dev amd_cpu_dev __cpuinitdata = {
+       .c_vendor       = "AMD",
+       .c_ident        = { "AuthenticAMD" },
+       .c_early_init   = early_init_amd,
+       .c_init         = init_amd,
+};
+
+cpu_vendor_dev_register(X86_VENDOR_AMD, &amd_cpu_dev);
+
diff --git a/arch/x86/kernel/cpu/centaur_64.c b/arch/x86/kernel/cpu/centaur_64.c
new file mode 100644 (file)
index 0000000..13526fd
--- /dev/null
@@ -0,0 +1,43 @@
+#include <linux/init.h>
+#include <linux/smp.h>
+
+#include <asm/cpufeature.h>
+#include <asm/processor.h>
+
+#include "cpu.h"
+
+static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c)
+{
+       if (c->x86 == 0x6 && c->x86_model >= 0xf)
+               set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
+}
+
+static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
+{
+       /* Cache sizes */
+       unsigned n;
+
+       n = c->extended_cpuid_level;
+       if (n >= 0x80000008) {
+               unsigned eax = cpuid_eax(0x80000008);
+               c->x86_virt_bits = (eax >> 8) & 0xff;
+               c->x86_phys_bits = eax & 0xff;
+       }
+
+       if (c->x86 == 0x6 && c->x86_model >= 0xf) {
+               c->x86_cache_alignment = c->x86_clflush_size * 2;
+               set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
+               set_cpu_cap(c, X86_FEATURE_REP_GOOD);
+       }
+       set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
+}
+
+static struct cpu_dev centaur_cpu_dev __cpuinitdata = {
+       .c_vendor       = "Centaur",
+       .c_ident        = { "CentaurHauls" },
+       .c_early_init   = early_init_centaur,
+       .c_init         = init_centaur,
+};
+
+cpu_vendor_dev_register(X86_VENDOR_CENTAUR, &centaur_cpu_dev);
+
index 783691b2a7381648bbc03ccf98842b0fcad6df22..4d894e8565feb6ad22b103f9c87ae51641ca0eda 100644 (file)
@@ -1,3 +1,6 @@
+#ifndef ARCH_X86_CPU_H
+
+#define ARCH_X86_CPU_H
 
 struct cpu_model_info {
        int vendor;
@@ -36,3 +39,5 @@ extern struct cpu_vendor_dev __x86cpuvendor_start[], __x86cpuvendor_end[];
 
 extern int get_model_name(struct cpuinfo_x86 *c);
 extern void display_cacheinfo(struct cpuinfo_x86 *c);
+
+#endif
diff --git a/arch/x86/kernel/cpu/intel_64.c b/arch/x86/kernel/cpu/intel_64.c
new file mode 100644 (file)
index 0000000..fcb1cc9
--- /dev/null
@@ -0,0 +1,103 @@
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+#include <asm/topology.h>
+#include <asm/numa_64.h>
+
+#include "cpu.h"
+
+static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
+{
+       if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
+           (c->x86 == 0x6 && c->x86_model >= 0x0e))
+               set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
+}
+
+/*
+ * find out the number of processor cores on the die
+ */
+static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
+{
+       unsigned int eax, t;
+
+       if (c->cpuid_level < 4)
+               return 1;
+
+       cpuid_count(4, 0, &eax, &t, &t, &t);
+
+       if (eax & 0x1f)
+               return ((eax >> 26) + 1);
+       else
+               return 1;
+}
+
+static void __cpuinit srat_detect_node(void)
+{
+#ifdef CONFIG_NUMA
+       unsigned node;
+       int cpu = smp_processor_id();
+       int apicid = hard_smp_processor_id();
+
+       /* Don't do the funky fallback heuristics the AMD version employs
+          for now. */
+       node = apicid_to_node[apicid];
+       if (node == NUMA_NO_NODE || !node_online(node))
+               node = first_node(node_online_map);
+       numa_set_node(cpu, node);
+
+       printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
+#endif
+}
+
+static void __cpuinit init_intel(struct cpuinfo_x86 *c)
+{
+       /* Cache sizes */
+       unsigned n;
+
+       init_intel_cacheinfo(c);
+       if (c->cpuid_level > 9) {
+               unsigned eax = cpuid_eax(10);
+               /* Check for version and the number of counters */
+               if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
+                       set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
+       }
+
+       if (cpu_has_ds) {
+               unsigned int l1, l2;
+               rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
+               if (!(l1 & (1<<11)))
+                       set_cpu_cap(c, X86_FEATURE_BTS);
+               if (!(l1 & (1<<12)))
+                       set_cpu_cap(c, X86_FEATURE_PEBS);
+       }
+
+
+       if (cpu_has_bts)
+               ds_init_intel(c);
+
+       n = c->extended_cpuid_level;
+       if (n >= 0x80000008) {
+               unsigned eax = cpuid_eax(0x80000008);
+               c->x86_virt_bits = (eax >> 8) & 0xff;
+               c->x86_phys_bits = eax & 0xff;
+       }
+
+       if (c->x86 == 15)
+               c->x86_cache_alignment = c->x86_clflush_size * 2;
+       if (c->x86 == 6)
+               set_cpu_cap(c, X86_FEATURE_REP_GOOD);
+       set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
+       c->x86_max_cores = intel_num_cpu_cores(c);
+
+       srat_detect_node();
+}
+
+static struct cpu_dev intel_cpu_dev __cpuinitdata = {
+       .c_vendor       = "Intel",
+       .c_ident        = { "GenuineIntel" },
+       .c_early_init   = early_init_intel,
+       .c_init         = init_intel,
+};
+cpu_vendor_dev_register(X86_VENDOR_INTEL, &intel_cpu_dev);
+
index edc5fbfe85c06eb1e2eff8aebdbcf3740b074a63..fdfdc550b366043045a34a4c476c61d6ff67b5d7 100644 (file)
@@ -12,6 +12,7 @@
 #include <asm/io.h>
 #include <asm/msr.h>
 #include <asm/acpi.h>
+#include <asm/mmconfig.h>
 
 #include "../pci/pci.h"
 
index ba370dc8685bf8f9bf31e9100f78c4e800ea755e..4061d63aabe74bb12f32f5d162c564240ae8d8a5 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/sched.h>
 #include <linux/module.h>
 #include <linux/pm.h>
+#include <linux/clockchips.h>
 
 struct kmem_cache *task_xstate_cachep;
 
@@ -45,6 +46,76 @@ void arch_task_cache_init(void)
                                  SLAB_PANIC, NULL);
 }
 
+/*
+ * Idle related variables and functions
+ */
+unsigned long boot_option_idle_override = 0;
+EXPORT_SYMBOL(boot_option_idle_override);
+
+/*
+ * Powermanagement idle function, if any..
+ */
+void (*pm_idle)(void);
+EXPORT_SYMBOL(pm_idle);
+
+#ifdef CONFIG_X86_32
+/*
+ * This halt magic was a workaround for ancient floppy DMA
+ * wreckage. It should be safe to remove.
+ */
+static int hlt_counter;
+void disable_hlt(void)
+{
+       hlt_counter++;
+}
+EXPORT_SYMBOL(disable_hlt);
+
+void enable_hlt(void)
+{
+       hlt_counter--;
+}
+EXPORT_SYMBOL(enable_hlt);
+
+static inline int hlt_use_halt(void)
+{
+       return (!hlt_counter && boot_cpu_data.hlt_works_ok);
+}
+#else
+static inline int hlt_use_halt(void)
+{
+       return 1;
+}
+#endif
+
+/*
+ * We use this if we don't have any better
+ * idle routine..
+ */
+void default_idle(void)
+{
+       if (hlt_use_halt()) {
+               current_thread_info()->status &= ~TS_POLLING;
+               /*
+                * TS_POLLING-cleared state must be visible before we
+                * test NEED_RESCHED:
+                */
+               smp_mb();
+
+               if (!need_resched())
+                       safe_halt();    /* enables interrupts racelessly */
+               else
+                       local_irq_enable();
+               current_thread_info()->status |= TS_POLLING;
+       } else {
+               local_irq_enable();
+               /* loop is done by the caller */
+               cpu_relax();
+       }
+}
+#ifdef CONFIG_APM_MODULE
+EXPORT_SYMBOL(default_idle);
+#endif
+
 static void do_nothing(void *unused)
 {
 }
@@ -122,44 +193,129 @@ static void poll_idle(void)
  *
  * idle=mwait overrides this decision and forces the usage of mwait.
  */
+
+#define MWAIT_INFO                     0x05
+#define MWAIT_ECX_EXTENDED_INFO                0x01
+#define MWAIT_EDX_C1                   0xf0
+
 static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
 {
+       u32 eax, ebx, ecx, edx;
+
        if (force_mwait)
                return 1;
 
-       if (c->x86_vendor == X86_VENDOR_AMD) {
-               switch(c->x86) {
-               case 0x10:
-               case 0x11:
-                       return 0;
-               }
-       }
+       if (c->cpuid_level < MWAIT_INFO)
+               return 0;
+
+       cpuid(MWAIT_INFO, &eax, &ebx, &ecx, &edx);
+       /* Check, whether EDX has extended info about MWAIT */
+       if (!(ecx & MWAIT_ECX_EXTENDED_INFO))
+               return 1;
+
+       /*
+        * edx enumeratios MONITOR/MWAIT extensions. Check, whether
+        * C1  supports MWAIT
+        */
+       return (edx & MWAIT_EDX_C1);
+}
+
+/*
+ * Check for AMD CPUs, which have potentially C1E support
+ */
+static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c)
+{
+       if (c->x86_vendor != X86_VENDOR_AMD)
+               return 0;
+
+       if (c->x86 < 0x0F)
+               return 0;
+
+       /* Family 0x0f models < rev F do not have C1E */
+       if (c->x86 == 0x0f && c->x86_model < 0x40)
+               return 0;
+
        return 1;
 }
 
-void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
+/*
+ * C1E aware idle routine. We check for C1E active in the interrupt
+ * pending message MSR. If we detect C1E, then we handle it the same
+ * way as C3 power states (local apic timer and TSC stop)
+ */
+static void c1e_idle(void)
 {
-       static int selected;
+       static cpumask_t c1e_mask = CPU_MASK_NONE;
+       static int c1e_detected;
 
-       if (selected)
+       if (need_resched())
                return;
+
+       if (!c1e_detected) {
+               u32 lo, hi;
+
+               rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
+               if (lo & K8_INTP_C1E_ACTIVE_MASK) {
+                       c1e_detected = 1;
+                       mark_tsc_unstable("TSC halt in C1E");
+                       printk(KERN_INFO "System has C1E enabled\n");
+               }
+       }
+
+       if (c1e_detected) {
+               int cpu = smp_processor_id();
+
+               if (!cpu_isset(cpu, c1e_mask)) {
+                       cpu_set(cpu, c1e_mask);
+                       /*
+                        * Force broadcast so ACPI can not interfere. Needs
+                        * to run with interrupts enabled as it uses
+                        * smp_function_call.
+                        */
+                       local_irq_enable();
+                       clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
+                                          &cpu);
+                       printk(KERN_INFO "Switch to broadcast mode on CPU%d\n",
+                              cpu);
+                       local_irq_disable();
+               }
+               clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
+
+               default_idle();
+
+               /*
+                * The switch back from broadcast mode needs to be
+                * called with interrupts disabled.
+                */
+                local_irq_disable();
+                clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
+                local_irq_enable();
+       } else
+               default_idle();
+}
+
+void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
+{
 #ifdef CONFIG_X86_SMP
        if (pm_idle == poll_idle && smp_num_siblings > 1) {
                printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
                        " performance may degrade.\n");
        }
 #endif
+       if (pm_idle)
+               return;
+
        if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
                /*
-                * Skip, if setup has overridden idle.
                 * One CPU supports mwait => All CPUs supports mwait
                 */
-               if (!pm_idle) {
-                       printk(KERN_INFO "using mwait in idle threads.\n");
-                       pm_idle = mwait_idle;
-               }
-       }
-       selected = 1;
+               printk(KERN_INFO "using mwait in idle threads.\n");
+               pm_idle = mwait_idle;
+       } else if (check_c1e_idle(c)) {
+               printk(KERN_INFO "using C1E aware idle routine\n");
+               pm_idle = c1e_idle;
+       } else
+               pm_idle = default_idle;
 }
 
 static int __init idle_setup(char *str)
index e2db9ac5c61c2b6d6de1f0b13f71b6d834eaeca2..c2a11d77b1b5664391b96577d67c698fec5534d3 100644 (file)
 
 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
 
-static int hlt_counter;
-
-unsigned long boot_option_idle_override = 0;
-EXPORT_SYMBOL(boot_option_idle_override);
-
 DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
 EXPORT_PER_CPU_SYMBOL(current_task);
 
@@ -77,55 +72,6 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
        return ((unsigned long *)tsk->thread.sp)[3];
 }
 
-/*
- * Powermanagement idle function, if any..
- */
-void (*pm_idle)(void);
-EXPORT_SYMBOL(pm_idle);
-
-void disable_hlt(void)
-{
-       hlt_counter++;
-}
-
-EXPORT_SYMBOL(disable_hlt);
-
-void enable_hlt(void)
-{
-       hlt_counter--;
-}
-
-EXPORT_SYMBOL(enable_hlt);
-
-/*
- * We use this if we don't have any better
- * idle routine..
- */
-void default_idle(void)
-{
-       if (!hlt_counter && boot_cpu_data.hlt_works_ok) {
-               current_thread_info()->status &= ~TS_POLLING;
-               /*
-                * TS_POLLING-cleared state must be visible before we
-                * test NEED_RESCHED:
-                */
-               smp_mb();
-
-               if (!need_resched())
-                       safe_halt();    /* enables interrupts racelessly */
-               else
-                       local_irq_enable();
-               current_thread_info()->status |= TS_POLLING;
-       } else {
-               local_irq_enable();
-               /* loop is done by the caller */
-               cpu_relax();
-       }
-}
-#ifdef CONFIG_APM_MODULE
-EXPORT_SYMBOL(default_idle);
-#endif
-
 #ifdef CONFIG_HOTPLUG_CPU
 #include <asm/nmi.h>
 /* We don't actually take CPU down, just spin without interrupts. */
@@ -168,24 +114,19 @@ void cpu_idle(void)
        while (1) {
                tick_nohz_stop_sched_tick();
                while (!need_resched()) {
-                       void (*idle)(void);
 
                        check_pgt_cache();
                        rmb();
-                       idle = pm_idle;
 
                        if (rcu_pending(cpu))
                                rcu_check_callbacks(cpu, 0);
 
-                       if (!idle)
-                               idle = default_idle;
-
                        if (cpu_is_offline(cpu))
                                play_dead();
 
                        local_irq_disable();
                        __get_cpu_var(irq_stat).idle_timestamp = jiffies;
-                       idle();
+                       pm_idle();
                }
                tick_nohz_restart_sched_tick();
                preempt_enable_no_resched();
index c6eb5c91e5f607f7fb70758ed65eee667c7d1401..290183e9731a2c45e25d0855533d8a7bea69e74c 100644 (file)
@@ -56,15 +56,6 @@ asmlinkage extern void ret_from_fork(void);
 
 unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
 
-unsigned long boot_option_idle_override = 0;
-EXPORT_SYMBOL(boot_option_idle_override);
-
-/*
- * Powermanagement idle function, if any..
- */
-void (*pm_idle)(void);
-EXPORT_SYMBOL(pm_idle);
-
 static ATOMIC_NOTIFIER_HEAD(idle_notifier);
 
 void idle_notifier_register(struct notifier_block *n)
@@ -94,25 +85,6 @@ void exit_idle(void)
        __exit_idle();
 }
 
-/*
- * We use this if we don't have any better
- * idle routine..
- */
-void default_idle(void)
-{
-       current_thread_info()->status &= ~TS_POLLING;
-       /*
-        * TS_POLLING-cleared state must be visible before we
-        * test NEED_RESCHED:
-        */
-       smp_mb();
-       if (!need_resched())
-               safe_halt();    /* enables interrupts racelessly */
-       else
-               local_irq_enable();
-       current_thread_info()->status |= TS_POLLING;
-}
-
 #ifdef CONFIG_HOTPLUG_CPU
 DECLARE_PER_CPU(int, cpu_state);
 
@@ -150,12 +122,9 @@ void cpu_idle(void)
        while (1) {
                tick_nohz_stop_sched_tick();
                while (!need_resched()) {
-                       void (*idle)(void);
 
                        rmb();
-                       idle = pm_idle;
-                       if (!idle)
-                               idle = default_idle;
+
                        if (cpu_is_offline(smp_processor_id()))
                                play_dead();
                        /*
@@ -165,7 +134,7 @@ void cpu_idle(void)
                         */
                        local_irq_disable();
                        enter_idle();
-                       idle();
+                       pm_idle();
                        /* In many cases the interrupt that ended idle
                           has already called exit_idle. But some idle
                           loops can be woken up without interrupt. */
index 6dff1286ad8adec4b9fc6bb7808c3a233c476fd2..b789ec5999233a608f43851d201fb62754287fd1 100644 (file)
@@ -71,6 +71,7 @@
 #include <asm/topology.h>
 #include <asm/trampoline.h>
 #include <asm/pat.h>
+#include <asm/mmconfig.h>
 
 #include <mach_apic.h>
 #ifdef CONFIG_PARAVIRT
@@ -79,6 +80,8 @@
 #define ARCH_SETUP
 #endif
 
+#include "cpu/cpu.h"
+
 /*
  * Machine setup..
  */
@@ -95,8 +98,6 @@ int bootloader_type;
 
 unsigned long saved_video_mode;
 
-int force_mwait __cpuinitdata;
-
 /*
  * Early DMI memory
  */
@@ -164,6 +165,7 @@ static struct resource bss_resource = {
        .flags = IORESOURCE_RAM,
 };
 
+static void __init early_cpu_init(void);
 static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c);
 
 #ifdef CONFIG_PROC_VMCORE
@@ -293,18 +295,6 @@ static void __init parse_setup_data(void)
        }
 }
 
-#ifdef CONFIG_PCI_MMCONFIG
-extern void __cpuinit fam10h_check_enable_mmcfg(void);
-extern void __init check_enable_amd_mmconf_dmi(void);
-#else
-void __cpuinit fam10h_check_enable_mmcfg(void)
-{
-}
-void __init check_enable_amd_mmconf_dmi(void)
-{
-}
-#endif
-
 /*
  * setup_arch - architecture-specific boot-time initializations
  *
@@ -352,6 +342,7 @@ void __init setup_arch(char **cmdline_p)
        bss_resource.start = virt_to_phys(&__bss_start);
        bss_resource.end = virt_to_phys(&__bss_stop)-1;
 
+       early_cpu_init();
        early_identify_cpu(&boot_cpu_data);
 
        strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
@@ -537,7 +528,20 @@ void __init setup_arch(char **cmdline_p)
        check_enable_amd_mmconf_dmi();
 }
 
-static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
+struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
+
+static void __cpuinit default_init(struct cpuinfo_x86 *c)
+{
+       display_cacheinfo(c);
+}
+
+static struct cpu_dev __cpuinitdata default_cpu = {
+       .c_init = default_init,
+       .c_vendor = "Unknown",
+};
+static struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
+
+int __cpuinit get_model_name(struct cpuinfo_x86 *c)
 {
        unsigned int *v;
 
@@ -553,7 +557,7 @@ static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
 }
 
 
-static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
+void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
 {
        unsigned int n, dummy, eax, ebx, ecx, edx;
 
@@ -585,228 +589,6 @@ static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
        }
 }
 
-#ifdef CONFIG_NUMA
-static int __cpuinit nearby_node(int apicid)
-{
-       int i, node;
-
-       for (i = apicid - 1; i >= 0; i--) {
-               node = apicid_to_node[i];
-               if (node != NUMA_NO_NODE && node_online(node))
-                       return node;
-       }
-       for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
-               node = apicid_to_node[i];
-               if (node != NUMA_NO_NODE && node_online(node))
-                       return node;
-       }
-       return first_node(node_online_map); /* Shouldn't happen */
-}
-#endif
-
-/*
- * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
- * Assumes number of cores is a power of two.
- */
-static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
-{
-#ifdef CONFIG_SMP
-       unsigned bits;
-#ifdef CONFIG_NUMA
-       int cpu = smp_processor_id();
-       int node = 0;
-       unsigned apicid = hard_smp_processor_id();
-#endif
-       bits = c->x86_coreid_bits;
-
-       /* Low order bits define the core id (index of core in socket) */
-       c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
-       /* Convert the initial APIC ID into the socket ID */
-       c->phys_proc_id = c->initial_apicid >> bits;
-
-#ifdef CONFIG_NUMA
-       node = c->phys_proc_id;
-       if (apicid_to_node[apicid] != NUMA_NO_NODE)
-               node = apicid_to_node[apicid];
-       if (!node_online(node)) {
-               /* Two possibilities here:
-                  - The CPU is missing memory and no node was created.
-                  In that case try picking one from a nearby CPU
-                  - The APIC IDs differ from the HyperTransport node IDs
-                  which the K8 northbridge parsing fills in.
-                  Assume they are all increased by a constant offset,
-                  but in the same order as the HT nodeids.
-                  If that doesn't result in a usable node fall back to the
-                  path for the previous case.  */
-
-               int ht_nodeid = c->initial_apicid;
-
-               if (ht_nodeid >= 0 &&
-                   apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
-                       node = apicid_to_node[ht_nodeid];
-               /* Pick a nearby node */
-               if (!node_online(node))
-                       node = nearby_node(apicid);
-       }
-       numa_set_node(cpu, node);
-
-       printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
-#endif
-#endif
-}
-
-static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c)
-{
-#ifdef CONFIG_SMP
-       unsigned bits, ecx;
-
-       /* Multi core CPU? */
-       if (c->extended_cpuid_level < 0x80000008)
-               return;
-
-       ecx = cpuid_ecx(0x80000008);
-
-       c->x86_max_cores = (ecx & 0xff) + 1;
-
-       /* CPU telling us the core id bits shift? */
-       bits = (ecx >> 12) & 0xF;
-
-       /* Otherwise recompute */
-       if (bits == 0) {
-               while ((1 << bits) < c->x86_max_cores)
-                       bits++;
-       }
-
-       c->x86_coreid_bits = bits;
-
-#endif
-}
-
-#define ENABLE_C1E_MASK                0x18000000
-#define CPUID_PROCESSOR_SIGNATURE      1
-#define CPUID_XFAM             0x0ff00000
-#define CPUID_XFAM_K8          0x00000000
-#define CPUID_XFAM_10H         0x00100000
-#define CPUID_XFAM_11H         0x00200000
-#define CPUID_XMOD             0x000f0000
-#define CPUID_XMOD_REV_F       0x00040000
-
-/* AMD systems with C1E don't have a working lAPIC timer. Check for that. */
-static __cpuinit int amd_apic_timer_broken(void)
-{
-       u32 lo, hi, eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
-
-       switch (eax & CPUID_XFAM) {
-       case CPUID_XFAM_K8:
-               if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F)
-                       break;
-       case CPUID_XFAM_10H:
-       case CPUID_XFAM_11H:
-               rdmsr(MSR_K8_ENABLE_C1E, lo, hi);
-               if (lo & ENABLE_C1E_MASK)
-                       return 1;
-               break;
-       default:
-               /* err on the side of caution */
-               return 1;
-       }
-       return 0;
-}
-
-static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
-{
-       early_init_amd_mc(c);
-
-       /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
-       if (c->x86_power & (1<<8))
-               set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
-}
-
-static void __cpuinit init_amd(struct cpuinfo_x86 *c)
-{
-       unsigned level;
-
-#ifdef CONFIG_SMP
-       unsigned long value;
-
-       /*
-        * Disable TLB flush filter by setting HWCR.FFDIS on K8
-        * bit 6 of msr C001_0015
-        *
-        * Errata 63 for SH-B3 steppings
-        * Errata 122 for all steppings (F+ have it disabled by default)
-        */
-       if (c->x86 == 15) {
-               rdmsrl(MSR_K8_HWCR, value);
-               value |= 1 << 6;
-               wrmsrl(MSR_K8_HWCR, value);
-       }
-#endif
-
-       /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
-          3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
-       clear_cpu_cap(c, 0*32+31);
-
-       /* On C+ stepping K8 rep microcode works well for copy/memset */
-       level = cpuid_eax(1);
-       if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) ||
-                            level >= 0x0f58))
-               set_cpu_cap(c, X86_FEATURE_REP_GOOD);
-       if (c->x86 == 0x10 || c->x86 == 0x11)
-               set_cpu_cap(c, X86_FEATURE_REP_GOOD);
-
-       /* Enable workaround for FXSAVE leak */
-       if (c->x86 >= 6)
-               set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK);
-
-       level = get_model_name(c);
-       if (!level) {
-               switch (c->x86) {
-               case 15:
-                       /* Should distinguish Models here, but this is only
-                          a fallback anyways. */
-                       strcpy(c->x86_model_id, "Hammer");
-                       break;
-               }
-       }
-       display_cacheinfo(c);
-
-       /* Multi core CPU? */
-       if (c->extended_cpuid_level >= 0x80000008)
-               amd_detect_cmp(c);
-
-       if (c->extended_cpuid_level >= 0x80000006 &&
-               (cpuid_edx(0x80000006) & 0xf000))
-               num_cache_leaves = 4;
-       else
-               num_cache_leaves = 3;
-
-       if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x11)
-               set_cpu_cap(c, X86_FEATURE_K8);
-
-       /* MFENCE stops RDTSC speculation */
-       set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
-
-       if (c->x86 == 0x10)
-               fam10h_check_enable_mmcfg();
-
-       if (amd_apic_timer_broken())
-               disable_apic_timer = 1;
-
-       if (c == &boot_cpu_data && c->x86 >= 0xf && c->x86 <= 0x11) {
-               unsigned long long tseg;
-
-               /*
-                * Split up direct mapping around the TSEG SMM area.
-                * Don't do it for gbpages because there seems very little
-                * benefit in doing so.
-                */
-               if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg) &&
-               (tseg >> PMD_SHIFT) < (max_pfn_mapped >> (PMD_SHIFT-PAGE_SHIFT)))
-                       set_memory_4k((unsigned long)__va(tseg), 1);
-       }
-}
-
 void __cpuinit detect_ht(struct cpuinfo_x86 *c)
 {
 #ifdef CONFIG_SMP
@@ -857,135 +639,59 @@ out:
 #endif
 }
 
-/*
- * find out the number of processor cores on the die
- */
-static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
-{
-       unsigned int eax, t;
-
-       if (c->cpuid_level < 4)
-               return 1;
-
-       cpuid_count(4, 0, &eax, &t, &t, &t);
-
-       if (eax & 0x1f)
-               return ((eax >> 26) + 1);
-       else
-               return 1;
-}
-
-static void __cpuinit srat_detect_node(void)
-{
-#ifdef CONFIG_NUMA
-       unsigned node;
-       int cpu = smp_processor_id();
-       int apicid = hard_smp_processor_id();
-
-       /* Don't do the funky fallback heuristics the AMD version employs
-          for now. */
-       node = apicid_to_node[apicid];
-       if (node == NUMA_NO_NODE || !node_online(node))
-               node = first_node(node_online_map);
-       numa_set_node(cpu, node);
-
-       printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
-#endif
-}
-
-static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
-{
-       if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
-           (c->x86 == 0x6 && c->x86_model >= 0x0e))
-               set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
-}
-
-static void __cpuinit init_intel(struct cpuinfo_x86 *c)
+static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
 {
-       /* Cache sizes */
-       unsigned n;
-
-       init_intel_cacheinfo(c);
-       if (c->cpuid_level > 9) {
-               unsigned eax = cpuid_eax(10);
-               /* Check for version and the number of counters */
-               if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
-                       set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
+       char *v = c->x86_vendor_id;
+       int i;
+       static int printed;
+
+       for (i = 0; i < X86_VENDOR_NUM; i++) {
+               if (cpu_devs[i]) {
+                       if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
+                           (cpu_devs[i]->c_ident[1] &&
+                           !strcmp(v, cpu_devs[i]->c_ident[1]))) {
+                               c->x86_vendor = i;
+                               this_cpu = cpu_devs[i];
+                               return;
+                       }
+               }
        }
-
-       if (cpu_has_ds) {
-               unsigned int l1, l2;
-               rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
-               if (!(l1 & (1<<11)))
-                       set_cpu_cap(c, X86_FEATURE_BTS);
-               if (!(l1 & (1<<12)))
-                       set_cpu_cap(c, X86_FEATURE_PEBS);
+       if (!printed) {
+               printed++;
+               printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
+               printk(KERN_ERR "CPU: Your system may be unstable.\n");
        }
-
-
-       if (cpu_has_bts)
-               ds_init_intel(c);
-
-       n = c->extended_cpuid_level;
-       if (n >= 0x80000008) {
-               unsigned eax = cpuid_eax(0x80000008);
-               c->x86_virt_bits = (eax >> 8) & 0xff;
-               c->x86_phys_bits = eax & 0xff;
-               /* CPUID workaround for Intel 0F34 CPU */
-               if (c->x86_vendor == X86_VENDOR_INTEL &&
-                   c->x86 == 0xF && c->x86_model == 0x3 &&
-                   c->x86_mask == 0x4)
-                       c->x86_phys_bits = 36;
-       }
-
-       if (c->x86 == 15)
-               c->x86_cache_alignment = c->x86_clflush_size * 2;
-       if (c->x86 == 6)
-               set_cpu_cap(c, X86_FEATURE_REP_GOOD);
-       set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
-       c->x86_max_cores = intel_num_cpu_cores(c);
-
-       srat_detect_node();
-}
-
-static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c)
-{
-       if (c->x86 == 0x6 && c->x86_model >= 0xf)
-               set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
+       c->x86_vendor = X86_VENDOR_UNKNOWN;
 }
 
-static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
+static void __init early_cpu_support_print(void)
 {
-       /* Cache sizes */
-       unsigned n;
+       int i,j;
+       struct cpu_dev *cpu_devx;
 
-       n = c->extended_cpuid_level;
-       if (n >= 0x80000008) {
-               unsigned eax = cpuid_eax(0x80000008);
-               c->x86_virt_bits = (eax >> 8) & 0xff;
-               c->x86_phys_bits = eax & 0xff;
-       }
-
-       if (c->x86 == 0x6 && c->x86_model >= 0xf) {
-               c->x86_cache_alignment = c->x86_clflush_size * 2;
-               set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
-               set_cpu_cap(c, X86_FEATURE_REP_GOOD);
+       printk("KERNEL supported cpus:\n");
+       for (i = 0; i < X86_VENDOR_NUM; i++) {
+               cpu_devx = cpu_devs[i];
+               if (!cpu_devx)
+                       continue;
+               for (j = 0; j < 2; j++) {
+                       if (!cpu_devx->c_ident[j])
+                               continue;
+                       printk("  %s %s\n", cpu_devx->c_vendor,
+                               cpu_devx->c_ident[j]);
+               }
        }
-       set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
 }
 
-static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
+static void __init early_cpu_init(void)
 {
-       char *v = c->x86_vendor_id;
+        struct cpu_vendor_dev *cvdev;
 
-       if (!strcmp(v, "AuthenticAMD"))
-               c->x86_vendor = X86_VENDOR_AMD;
-       else if (!strcmp(v, "GenuineIntel"))
-               c->x86_vendor = X86_VENDOR_INTEL;
-       else if (!strcmp(v, "CentaurHauls"))
-               c->x86_vendor = X86_VENDOR_CENTAUR;
-       else
-               c->x86_vendor = X86_VENDOR_UNKNOWN;
+        for (cvdev = __x86cpuvendor_start ;
+             cvdev < __x86cpuvendor_end   ;
+             cvdev++)
+                cpu_devs[cvdev->vendor] = cvdev->cpu_dev;
+       early_cpu_support_print();
 }
 
 /* Do some early cpuid on the boot CPU to get some parameter that are
@@ -1066,17 +772,9 @@ static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
        if (c->extended_cpuid_level >= 0x80000007)
                c->x86_power = cpuid_edx(0x80000007);
 
-       switch (c->x86_vendor) {
-       case X86_VENDOR_AMD:
-               early_init_amd(c);
-               break;
-       case X86_VENDOR_INTEL:
-               early_init_intel(c);
-               break;
-       case X86_VENDOR_CENTAUR:
-               early_init_centaur(c);
-               break;
-       }
+       if (c->x86_vendor != X86_VENDOR_UNKNOWN &&
+           cpu_devs[c->x86_vendor]->c_early_init)
+               cpu_devs[c->x86_vendor]->c_early_init(c);
 
        validate_pat_support(c);
 }
@@ -1104,24 +802,8 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
         * At the end of this section, c->x86_capability better
         * indicate the features this CPU genuinely supports!
         */
-       switch (c->x86_vendor) {
-       case X86_VENDOR_AMD:
-               init_amd(c);
-               break;
-
-       case X86_VENDOR_INTEL:
-               init_intel(c);
-               break;
-
-       case X86_VENDOR_CENTAUR:
-               init_centaur(c);
-               break;
-
-       case X86_VENDOR_UNKNOWN:
-       default:
-               display_cacheinfo(c);
-               break;
-       }
+       if (this_cpu->c_init)
+               this_cpu->c_init(c);
 
        detect_ht(c);
 
index fad3674b06a58f990cabab44bd61910f975a6682..b29f63bdff5ea7675f08c42341fec8aada90b9e8 100644 (file)
@@ -177,6 +177,7 @@ SECTIONS
        *(.con_initcall.init)
   }
   __con_initcall_end = .;
+  . = ALIGN(16);
   __x86cpuvendor_start = .;
   .x86cpuvendor.init : AT(ADDR(.x86cpuvendor.init) - LOAD_OFFSET) {
        *(.x86cpuvendor.init)
index 89ec35d00efde0ebde8488f84bc491cffe8fb092..f647e7e56da4c69e653e344059cff04abe191af0 100644 (file)
@@ -22,3 +22,4 @@ pci-$(CONFIG_X86_NUMAQ)               := numa.o irq.o
 pci-$(CONFIG_NUMA)             += mp_bus_to_node.o
 
 obj-y                          += $(pci-y) common.o early.o
+obj-y                          += amd_bus.o
index 8fbd19832cf6795f34be5e61d0c9c9c942ee6b74..fd47068c95de65ef7786e9a05998a71de91fe228 100644 (file)
@@ -13,5 +13,5 @@ obj-y                 += legacy.o irq.o common.o early.o
 # mmconfig has a 64bit special
 obj-$(CONFIG_PCI_MMCONFIG) += mmconfig_64.o direct.o mmconfig-shared.o
 
-obj-y          += k8-bus_64.o
+obj-y          += amd_bus.o
 
similarity index 95%
rename from arch/x86/pci/k8-bus_64.c
rename to arch/x86/pci/amd_bus.c
index 5c2799c20e47b7a48b05e1fd15cbdbc79229d86b..15f505d3a78ef99b3591d1ca7fb24d0d2c278688 100644 (file)
@@ -1,5 +1,9 @@
 #include <linux/init.h>
 #include <linux/pci.h>
+#include "pci.h"
+
+#ifdef CONFIG_X86_64
+
 #include <asm/pci-direct.h>
 #include <asm/mpspec.h>
 #include <linux/cpumask.h>
@@ -526,3 +530,31 @@ static int __init early_fill_mp_bus_info(void)
 }
 
 postcore_initcall(early_fill_mp_bus_info);
+
+#endif
+
+/* common 32/64 bit code */
+
+#define ENABLE_CF8_EXT_CFG      (1ULL << 46)
+
+static void enable_pci_io_ecs_per_cpu(void *unused)
+{
+       u64 reg;
+       rdmsrl(MSR_AMD64_NB_CFG, reg);
+       if (!(reg & ENABLE_CF8_EXT_CFG)) {
+               reg |= ENABLE_CF8_EXT_CFG;
+               wrmsrl(MSR_AMD64_NB_CFG, reg);
+       }
+}
+
+static int __init enable_pci_io_ecs(void)
+{
+       /* assume all cpus from fam10h have IO ECS */
+        if (boot_cpu_data.x86 < 0x10)
+               return 0;
+       on_each_cpu(enable_pci_io_ecs_per_cpu, NULL, 1, 1);
+       pci_probe |= PCI_HAS_IO_ECS;
+       return 0;
+}
+
+postcore_initcall(enable_pci_io_ecs);
index 21d1e0e0d535bfef58228d695b983c747dcea805..9915293500fb69ebdae0b8d80b69155f180f1274 100644 (file)
@@ -8,18 +8,21 @@
 #include "pci.h"
 
 /*
- * Functions for accessing PCI configuration space with type 1 accesses
+ * Functions for accessing PCI base (first 256 bytes) and extended
+ * (4096 bytes per PCI function) configuration space with type 1
+ * accesses.
  */
 
 #define PCI_CONF1_ADDRESS(bus, devfn, reg) \
-       (0x80000000 | (bus << 16) | (devfn << 8) | (reg & ~3))
+       (0x80000000 | ((reg & 0xF00) << 16) | (bus << 16) \
+       | (devfn << 8) | (reg & 0xFC))
 
 static int pci_conf1_read(unsigned int seg, unsigned int bus,
                          unsigned int devfn, int reg, int len, u32 *value)
 {
        unsigned long flags;
 
-       if ((bus > 255) || (devfn > 255) || (reg > 255)) {
+       if ((bus > 255) || (devfn > 255) || (reg > 4095)) {
                *value = -1;
                return -EINVAL;
        }
@@ -50,7 +53,7 @@ static int pci_conf1_write(unsigned int seg, unsigned int bus,
 {
        unsigned long flags;
 
-       if ((bus > 255) || (devfn > 255) || (reg > 255)) 
+       if ((bus > 255) || (devfn > 255) || (reg > 4095))
                return -EINVAL;
 
        spin_lock_irqsave(&pci_config_lock, flags);
@@ -260,10 +263,18 @@ void __init pci_direct_init(int type)
                return;
        printk(KERN_INFO "PCI: Using configuration type %d for base access\n",
                 type);
-       if (type == 1)
+       if (type == 1) {
                raw_pci_ops = &pci_direct_conf1;
-       else
-               raw_pci_ops = &pci_direct_conf2;
+               if (raw_pci_ext_ops)
+                       return;
+               if (!(pci_probe & PCI_HAS_IO_ECS))
+                       return;
+               printk(KERN_INFO "PCI: Using configuration type 1 "
+                      "for extended access\n");
+               raw_pci_ext_ops = &pci_direct_conf1;
+               return;
+       }
+       raw_pci_ops = &pci_direct_conf2;
 }
 
 int __init pci_direct_probe(void)
index 720c4c55453462d78770d03506c26ae45d6d2edd..ba263e626a68762bf4e4c470fe325fa01c4f45ee 100644 (file)
@@ -27,6 +27,7 @@
 #define PCI_CAN_SKIP_ISA_ALIGN 0x8000
 #define PCI_USE__CRS           0x10000
 #define PCI_CHECK_ENABLE_AMD_MMCONF    0x20000
+#define PCI_HAS_IO_ECS         0x40000
 
 extern unsigned int pci_probe;
 extern unsigned long pirq_table_addr;
index be9639a9a186244fef74318f901172415a7e9343..3c387cda95fa2188051b8669b4ff3ea2825838a2 100644 (file)
@@ -38,12 +38,9 @@ extern void generic_apic_probe(void);
 extern int apic_verbosity;
 extern int timer_over_8254;
 extern int local_apic_timer_c2_ok;
-extern int local_apic_timer_disabled;
 
-extern int apic_runs_main_timer;
 extern int ioapic_force;
 extern int disable_apic;
-extern int disable_apic_timer;
 
 /*
  * Basic functions accessing APICs.
diff --git a/include/asm-x86/mmconfig.h b/include/asm-x86/mmconfig.h
new file mode 100644 (file)
index 0000000..95beda0
--- /dev/null
@@ -0,0 +1,12 @@
+#ifndef _ASM_MMCONFIG_H
+#define _ASM_MMCONFIG_H
+
+#ifdef CONFIG_PCI_MMCONFIG
+extern void __cpuinit fam10h_check_enable_mmcfg(void);
+extern void __init check_enable_amd_mmconf_dmi(void);
+#else
+static inline void fam10h_check_enable_mmcfg(void) { }
+static inline void check_enable_amd_mmconf_dmi(void) { }
+#endif
+
+#endif
index 09413ad39d3c8db4aeb42edcedd1a38d6e3779a6..44bce773012e80b11190b7400852f35a55683c98 100644 (file)
 #define MSR_K8_TOP_MEM2                        0xc001001d
 #define MSR_K8_SYSCFG                  0xc0010010
 #define MSR_K8_HWCR                    0xc0010015
-#define MSR_K8_ENABLE_C1E              0xc0010055
+#define MSR_K8_INT_PENDING_MSG         0xc0010055
+/* C1E active bits in int pending message */
+#define K8_INTP_C1E_ACTIVE_MASK                0x18000000
 #define MSR_K8_TSEG_ADDR               0xc0010112
 #define K8_MTRRFIXRANGE_DRAM_ENABLE    0x00040000 /* MtrrFixDramEn bit    */
 #define K8_MTRRFIXRANGE_DRAM_MODIFY    0x00080000 /* MtrrFixDramModEn bit */
index 57a1f02e5ec07fc20a6715d40d7fe41855449727..67f80c2617096454a1d0b1f520825034f7c87dfc 100644 (file)
@@ -30,6 +30,7 @@
 struct tick_device tick_broadcast_device;
 static cpumask_t tick_broadcast_mask;
 static DEFINE_SPINLOCK(tick_broadcast_lock);
+static int tick_broadcast_force;
 
 #ifdef CONFIG_TICK_ONESHOT
 static void tick_broadcast_clear_oneshot(int cpu);
@@ -232,10 +233,11 @@ static void tick_do_broadcast_on_off(void *why)
                                                     CLOCK_EVT_MODE_SHUTDOWN);
                }
                if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE)
-                       dev->features |= CLOCK_EVT_FEAT_DUMMY;
+                       tick_broadcast_force = 1;
                break;
        case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
-               if (cpu_isset(cpu, tick_broadcast_mask)) {
+               if (!tick_broadcast_force &&
+                   cpu_isset(cpu, tick_broadcast_mask)) {
                        cpu_clear(cpu, tick_broadcast_mask);
                        if (td->mode == TICKDEV_MODE_PERIODIC)
                                tick_setup_periodic(dev, 0);