]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
[PATCH] smp_processor_id() cleanup
authorIngo Molnar <mingo@elte.hu>
Wed, 22 Jun 2005 00:14:34 +0000 (17:14 -0700)
committerLinus Torvalds <torvalds@ppc970.osdl.org>
Wed, 22 Jun 2005 01:46:13 +0000 (18:46 -0700)
This patch implements a number of smp_processor_id() cleanup ideas that
Arjan van de Ven and I came up with.

The previous __smp_processor_id/_smp_processor_id/smp_processor_id API
spaghetti was hard to follow both on the implementational and on the
usage side.

Some of the complexity arose from picking wrong names, some of the
complexity comes from the fact that not all architectures defined
__smp_processor_id.

In the new code, there are two externally visible symbols:

 - smp_processor_id(): debug variant.

 - raw_smp_processor_id(): nondebug variant. Replaces all existing
   uses of _smp_processor_id() and __smp_processor_id(). Defined
   by every SMP architecture in include/asm-*/smp.h.

There is one new internal symbol, dependent on DEBUG_PREEMPT:

 - debug_smp_processor_id(): internal debug variant, mapped to
                             smp_processor_id().

Also, i moved debug_smp_processor_id() from lib/kernel_lock.c into a new
lib/smp_processor_id.c file.  All related comments got updated and/or
clarified.

I have build/boot tested the following 8 .config combinations on x86:

 {SMP,UP} x {PREEMPT,!PREEMPT} x {DEBUG_PREEMPT,!DEBUG_PREEMPT}

I have also build/boot tested x64 on UP/PREEMPT/DEBUG_PREEMPT.  (Other
architectures are untested, but should work just fine.)

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjan@infradead.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
37 files changed:
arch/i386/kernel/traps.c
arch/i386/lib/delay.c
arch/ppc/lib/locks.c
arch/ppc64/kernel/idle.c
arch/sh/lib/delay.c
arch/sparc64/lib/delay.c
arch/x86_64/lib/delay.c
drivers/acpi/processor_idle.c
drivers/input/gameport/gameport.c
drivers/oprofile/buffer_sync.c
fs/xfs/linux-2.6/xfs_linux.h
include/asm-alpha/smp.h
include/asm-arm/smp.h
include/asm-i386/smp.h
include/asm-ia64/smp.h
include/asm-m32r/smp.h
include/asm-mips/smp.h
include/asm-parisc/smp.h
include/asm-ppc/smp.h
include/asm-ppc64/smp.h
include/asm-s390/smp.h
include/asm-sh/smp.h
include/asm-sparc/smp.h
include/asm-sparc64/smp.h
include/asm-um/smp.h
include/asm-x86_64/smp.h
include/linux/mmzone.h
include/linux/smp.h
include/net/route.h
include/net/snmp.h
kernel/module.c
kernel/power/smp.c
kernel/sched.c
kernel/stop_machine.c
lib/Makefile
lib/kernel_lock.c
lib/smp_processor_id.c [new file with mode: 0644]

index 00c63419c06f80f0af9ce17f4b7f78e18dd25df2..83c579e82a8137f24a6952efc461b4e434eb6158 100644 (file)
@@ -306,7 +306,7 @@ void die(const char * str, struct pt_regs * regs, long err)
        };
        static int die_counter;
 
-       if (die.lock_owner != _smp_processor_id()) {
+       if (die.lock_owner != raw_smp_processor_id()) {
                console_verbose();
                spin_lock_irq(&die.lock);
                die.lock_owner = smp_processor_id();
index 080639f262b1364479476e5f88065080388c2fa2..eb0cdfe9280fe11414239a640f9557279cfa2b23 100644 (file)
@@ -34,7 +34,7 @@ inline void __const_udelay(unsigned long xloops)
        xloops *= 4;
        __asm__("mull %0"
                :"=d" (xloops), "=&a" (d0)
-               :"1" (xloops),"0" (cpu_data[_smp_processor_id()].loops_per_jiffy * (HZ/4)));
+               :"1" (xloops),"0" (cpu_data[raw_smp_processor_id()].loops_per_jiffy * (HZ/4)));
         __delay(++xloops);
 }
 
index 694163d696d886244b6ea769ab8b19abed8b65a0..c450dc4b766e23b5588dccf0f57e42ea6e1ea22d 100644 (file)
@@ -130,7 +130,7 @@ void _raw_read_lock(rwlock_t *rw)
                while (!read_can_lock(rw)) {
                        if (--stuck == 0) {
                                printk("_read_lock(%p) CPU#%d lock %d\n",
-                                      rw, _smp_processor_id(), rw->lock);
+                                      rw, raw_smp_processor_id(), rw->lock);
                                stuck = INIT_STUCK;
                        }
                }
@@ -158,7 +158,7 @@ void _raw_write_lock(rwlock_t *rw)
                while (!write_can_lock(rw)) {
                        if (--stuck == 0) {
                                printk("write_lock(%p) CPU#%d lock %d)\n",
-                                      rw, _smp_processor_id(), rw->lock);
+                                      rw, raw_smp_processor_id(), rw->lock);
                                stuck = INIT_STUCK;
                        }
                }
index f24ce2b872004ae4feb1f419eddb3c0866c1c440..ff8a7db142d3ecce21bdcd54d4c5602e9e89faa2 100644 (file)
@@ -292,7 +292,7 @@ static int native_idle(void)
                if (need_resched())
                        schedule();
 
-               if (cpu_is_offline(_smp_processor_id()) &&
+               if (cpu_is_offline(raw_smp_processor_id()) &&
                    system_state == SYSTEM_RUNNING)
                        cpu_die();
        }
index 50b36037d86b01e51c0f4c1107c2e43588a62974..351714694d6d4b9b6f81d183e774f1fc47e0e215 100644 (file)
@@ -24,7 +24,7 @@ inline void __const_udelay(unsigned long xloops)
        __asm__("dmulu.l        %0, %2\n\t"
                "sts    mach, %0"
                : "=r" (xloops)
-               : "0" (xloops), "r" (cpu_data[_smp_processor_id()].loops_per_jiffy)
+               : "0" (xloops), "r" (cpu_data[raw_smp_processor_id()].loops_per_jiffy)
                : "macl", "mach");
        __delay(xloops * HZ);
 }
index f6b4c784d53e05111fbda459f465c04d20c8cc43..e8808727617a146aea231267d4fafe7beb8e8132 100644 (file)
@@ -31,7 +31,7 @@ void __const_udelay(unsigned long n)
 {
        n *= 4;
 
-       n *= (cpu_data(_smp_processor_id()).udelay_val * (HZ/4));
+       n *= (cpu_data(raw_smp_processor_id()).udelay_val * (HZ/4));
        n >>= 32;
 
        __delay(n + 1);
index 6e2d66472eb1938f50aa45c2e0306b124f7885b1..aed61a668a1bebbf5b4661cd5fad774e03cca895 100644 (file)
@@ -34,7 +34,7 @@ void __delay(unsigned long loops)
 
 inline void __const_udelay(unsigned long xloops)
 {
-       __delay(((xloops * cpu_data[_smp_processor_id()].loops_per_jiffy) >> 32) * HZ);
+       __delay(((xloops * cpu_data[raw_smp_processor_id()].loops_per_jiffy) >> 32) * HZ);
 }
 
 void __udelay(unsigned long usecs)
index ff64d333e95f58c68ac45f83832e41060ecb9d1c..c9d671cf7857dbc7101e99d469fa24eed711ac60 100644 (file)
@@ -171,7 +171,7 @@ static void acpi_processor_idle (void)
        int                     sleep_ticks = 0;
        u32                     t1, t2 = 0;
 
-       pr = processors[_smp_processor_id()];
+       pr = processors[raw_smp_processor_id()];
        if (!pr)
                return;
 
index 9b8ff396e6f82d58740e151729cd8cdb706973b7..e152d0fa0cdd25d4d4ac4de364eaf80b9502a0cc 100644 (file)
@@ -134,7 +134,7 @@ static int gameport_measure_speed(struct gameport *gameport)
        }
 
        gameport_close(gameport);
-       return (cpu_data[_smp_processor_id()].loops_per_jiffy * (unsigned long)HZ / (1000 / 50)) / (tx < 1 ? 1 : tx);
+       return (cpu_data[raw_smp_processor_id()].loops_per_jiffy * (unsigned long)HZ / (1000 / 50)) / (tx < 1 ? 1 : tx);
 
 #else
 
index 55720dc6ec43feb06b3b1edecb8aca6716cea578..745a14183634feb56d83e51685b10d549eb4e846 100644 (file)
@@ -62,7 +62,7 @@ static int task_exit_notify(struct notifier_block * self, unsigned long val, voi
        /* To avoid latency problems, we only process the current CPU,
         * hoping that most samples for the task are on this CPU
         */
-       sync_buffer(_smp_processor_id());
+       sync_buffer(raw_smp_processor_id());
        return 0;
 }
 
@@ -86,7 +86,7 @@ static int munmap_notify(struct notifier_block * self, unsigned long val, void *
                /* To avoid latency problems, we only process the current CPU,
                 * hoping that most samples for the task are on this CPU
                 */
-               sync_buffer(_smp_processor_id());
+               sync_buffer(raw_smp_processor_id());
                return 0;
        }
 
index 71bb41019a12c16c9bf2ee4e879842e9d21f7320..7d7c8788ea75192aaf619b21cfdf15c21e8ce786 100644 (file)
@@ -145,10 +145,10 @@ static inline void set_buffer_unwritten_io(struct buffer_head *bh)
 #define xfs_inherit_nosymlinks xfs_params.inherit_nosym.val
 #define xfs_rotorstep          xfs_params.rotorstep.val
 
-#ifndef __smp_processor_id
-#define __smp_processor_id()   smp_processor_id()
+#ifndef raw_smp_processor_id
+#define raw_smp_processor_id() smp_processor_id()
 #endif
-#define current_cpu()          __smp_processor_id()
+#define current_cpu()          raw_smp_processor_id()
 #define current_pid()          (current->pid)
 #define current_fsuid(cred)    (current->fsuid)
 #define current_fsgid(cred)    (current->fsgid)
index cbc173ae45aa2a711fee8cf9c913aac5c91c2e84..9950706abdf8289f0d2815dd45102aa787016038 100644 (file)
@@ -43,7 +43,7 @@ extern struct cpuinfo_alpha cpu_data[NR_CPUS];
 #define PROC_CHANGE_PENALTY     20
 
 #define hard_smp_processor_id()        __hard_smp_processor_id()
-#define smp_processor_id()     (current_thread_info()->cpu)
+#define raw_smp_processor_id() (current_thread_info()->cpu)
 
 extern cpumask_t cpu_present_mask;
 extern cpumask_t cpu_online_map;
index bd44f894690f55d74c8679ad2168c4e73dbd030c..6c6c60adbbaa0552aabe8433625a2153b45e5d0e 100644 (file)
@@ -21,7 +21,7 @@
 # error "<asm-arm/smp.h> included in non-SMP build"
 #endif
 
-#define smp_processor_id()     (current_thread_info()->cpu)
+#define raw_smp_processor_id() (current_thread_info()->cpu)
 
 extern cpumask_t cpu_present_mask;
 #define cpu_possible_map cpu_present_mask
index e03a206dfa36fbde0d15ac5a65f8eea1e7fa7503..55ef31f66bbec2c7771a7571658dad9451dd4823 100644 (file)
@@ -51,7 +51,7 @@ extern u8 x86_cpu_to_apicid[];
  * from the initial startup. We map APIC_BASE very early in page_setup(),
  * so this is correct in the x86 case.
  */
-#define __smp_processor_id() (current_thread_info()->cpu)
+#define raw_smp_processor_id() (current_thread_info()->cpu)
 
 extern cpumask_t cpu_callout_map;
 extern cpumask_t cpu_callin_map;
index 3ba1a061e4aed516b6eef4452af84b1c5f444db6..a3914352c995e83222080a9d7151a9567256196c 100644 (file)
@@ -46,7 +46,7 @@ ia64_get_lid (void)
 #define SMP_IRQ_REDIRECTION    (1 << 0)
 #define SMP_IPI_REDIRECTION    (1 << 1)
 
-#define smp_processor_id()     (current_thread_info()->cpu)
+#define raw_smp_processor_id() (current_thread_info()->cpu)
 
 extern struct smp_boot_data {
        int cpu_count;
index 8cd4d0da4be19ec366dccef697a3dc6bc5f15c16..b9a20cdad65f4a7c4208e6407b1bc2fdd7218f0d 100644 (file)
@@ -66,7 +66,7 @@ extern volatile int cpu_2_physid[NR_CPUS];
 #define physid_to_cpu(physid)  physid_2_cpu[physid]
 #define cpu_to_physid(cpu_id)  cpu_2_physid[cpu_id]
 
-#define smp_processor_id()     (current_thread_info()->cpu)
+#define raw_smp_processor_id() (current_thread_info()->cpu)
 
 extern cpumask_t cpu_callout_map;
 #define cpu_possible_map cpu_callout_map
index 8ba370ecfd4cc26b9138da3bf2fce8ca759fa4fa..5618f1e12f404d6d6d93671a471c69ef46619530 100644 (file)
@@ -21,7 +21,7 @@
 #include <linux/cpumask.h>
 #include <asm/atomic.h>
 
-#define smp_processor_id()     (current_thread_info()->cpu)
+#define raw_smp_processor_id() (current_thread_info()->cpu)
 
 /* Map from cpu id to sequential logical cpu number.  This will only
    not be idempotent when cpus failed to come on-line.  */
index fde77ac35463b1610c7c085d57b3ca9bd55f0fc6..9413f67a540bb24ae2b2d2866a48c3fc8dc1a37a 100644 (file)
@@ -51,7 +51,7 @@ extern void smp_send_reschedule(int cpu);
 
 extern unsigned long cpu_present_mask;
 
-#define smp_processor_id()     (current_thread_info()->cpu)
+#define raw_smp_processor_id() (current_thread_info()->cpu)
 
 #endif /* CONFIG_SMP */
 
index ebfb614f55f650707ec092e05a15187d6d0855b4..17530c232c7690c6a47a0b87a0e50ea4e23b8788 100644 (file)
@@ -44,7 +44,7 @@ extern void smp_message_recv(int, struct pt_regs *);
 #define NO_PROC_ID             0xFF            /* No processor magic marker */
 #define PROC_CHANGE_PENALTY    20
 
-#define smp_processor_id() (current_thread_info()->cpu)
+#define raw_smp_processor_id() (current_thread_info()->cpu)
 
 extern int __cpu_up(unsigned int cpu);
 
index c8646fa999c2ff61217ff01b2cb702784a5ed1df..8115ecb8feee24abd99a25b3659fb58d98315ce0 100644 (file)
@@ -45,7 +45,7 @@ void generic_cpu_die(unsigned int cpu);
 void generic_mach_cpu_die(void);
 #endif
 
-#define __smp_processor_id() (get_paca()->paca_index)
+#define raw_smp_processor_id() (get_paca()->paca_index)
 #define hard_smp_processor_id() (get_paca()->hw_cpu_id)
 
 extern cpumask_t cpu_sibling_map[NR_CPUS];
index 9473786387a349123cafb4cd43221bf884ccd089..dd50e57a928f2a6fa37e29b5cca503075221971c 100644 (file)
@@ -47,7 +47,7 @@ extern int smp_call_function_on(void (*func) (void *info), void *info,
  
 #define PROC_CHANGE_PENALTY    20              /* Schedule penalty */
 
-#define smp_processor_id() (S390_lowcore.cpu_data.cpu_nr)
+#define raw_smp_processor_id() (S390_lowcore.cpu_data.cpu_nr)
 
 extern int smp_get_cpu(cpumask_t cpu_map);
 extern void smp_put_cpu(int cpu);
index 38b54469d7d1a1557715f2196f703094a6c93cb1..f19a8b3b69a6a24af8519ab3828469ed510eb5d3 100644 (file)
@@ -25,7 +25,7 @@ extern cpumask_t cpu_possible_map;
 
 #define cpu_online(cpu)                cpu_isset(cpu, cpu_online_map)
 
-#define smp_processor_id()     (current_thread_info()->cpu)
+#define raw_smp_processor_id() (current_thread_info()->cpu)
 
 /* I've no idea what the real meaning of this is */
 #define PROC_CHANGE_PENALTY    20
index f986c0d0922a1c87215fb48456a41132245616a0..4f96d8333a12ad362a70b3c86d95937564cd43c5 100644 (file)
@@ -148,7 +148,7 @@ extern __inline__ int hard_smp_processor_id(void)
 }
 #endif
 
-#define smp_processor_id()     (current_thread_info()->cpu)
+#define raw_smp_processor_id()         (current_thread_info()->cpu)
 
 #define prof_multiplier(__cpu)         cpu_data(__cpu).multiplier
 #define prof_counter(__cpu)            cpu_data(__cpu).counter
index 5e3e06d908feb9b19f33b0b18816d1da8e823710..110a2de891239407ff048772b920fa531119a7eb 100644 (file)
@@ -64,7 +64,7 @@ static __inline__ int hard_smp_processor_id(void)
        }
 }
 
-#define smp_processor_id() (current_thread_info()->cpu)
+#define raw_smp_processor_id() (current_thread_info()->cpu)
 
 #endif /* !(__ASSEMBLY__) */
 
index 4412d5d9c26ba7376a738ba419f2b544b88486fe..d879eba2b52c7d74e646a3213911304f877279c5 100644 (file)
@@ -8,7 +8,8 @@
 #include "asm/current.h"
 #include "linux/cpumask.h"
 
-#define smp_processor_id() (current_thread->cpu)
+#define raw_smp_processor_id() (current_thread->cpu)
+
 #define cpu_logical_map(n) (n)
 #define cpu_number_map(n) (n)
 #define PROC_CHANGE_PENALTY    15 /* Pick a number, any number */
index 96844fecbde8cd1336555a8006aa0bd0e7eb5cbd..a7425aa5a3b72d369ef676c5f1fda8c5dec049b3 100644 (file)
@@ -68,7 +68,7 @@ static inline int num_booting_cpus(void)
        return cpus_weight(cpu_callout_map);
 }
 
-#define __smp_processor_id() read_pda(cpunumber)
+#define raw_smp_processor_id() read_pda(cpunumber)
 
 extern __inline int hard_smp_processor_id(void)
 {
index e530c6c092f1610535f50f24f689bc45e1bbf7ac..beacd931b606f478cc188ec72bb081fc1f966632 100644 (file)
@@ -381,7 +381,7 @@ int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *,
 
 #include <linux/topology.h>
 /* Returns the number of the current Node. */
-#define numa_node_id()         (cpu_to_node(_smp_processor_id()))
+#define numa_node_id()         (cpu_to_node(raw_smp_processor_id()))
 
 #ifndef CONFIG_DISCONTIGMEM
 
index dcf1db3b35d338ba459d5b53015671a767892edd..9dfa3ee769ae2c02846a19a8f4a7b727a78baf33 100644 (file)
@@ -92,10 +92,7 @@ void smp_prepare_boot_cpu(void);
 /*
  *     These macros fold the SMP functionality into a single CPU system
  */
-
-#if !defined(__smp_processor_id) || !defined(CONFIG_PREEMPT)
-# define smp_processor_id()                    0
-#endif
+#define raw_smp_processor_id()                 0
 #define hard_smp_processor_id()                        0
 #define smp_call_function(func,info,retry,wait)        ({ 0; })
 #define on_each_cpu(func,info,retry,wait)      ({ func(info); 0; })
@@ -106,30 +103,25 @@ static inline void smp_send_reschedule(int cpu) { }
 #endif /* !SMP */
 
 /*
- * DEBUG_PREEMPT support: check whether smp_processor_id() is being
- * used in a preemption-safe way.
+ * smp_processor_id(): get the current CPU ID.
  *
- * An architecture has to enable this debugging code explicitly.
- * It can do so by renaming the smp_processor_id() macro to
- * __smp_processor_id().  This should only be done after some minimal
- * testing, because usually there are a number of false positives
- * that an architecture will trigger.
+ * if DEBUG_PREEMPT is enabled the we check whether it is
+ * used in a preemption-safe way. (smp_processor_id() is safe
+ * if it's used in a preemption-off critical section, or in
+ * a thread that is bound to the current CPU.)
  *
- * To fix a false positive (i.e. smp_processor_id() use that the
- * debugging code reports but which use for some reason is legal),
- * change the smp_processor_id() reference to _smp_processor_id(),
- * which is the nondebug variant.  NOTE: don't use this to hack around
- * real bugs.
+ * NOTE: raw_smp_processor_id() is for internal use only
+ * (smp_processor_id() is the preferred variant), but in rare
+ * instances it might also be used to turn off false positives
+ * (i.e. smp_processor_id() use that the debugging code reports but
+ * which use for some reason is legal). Don't use this to hack around
+ * the warning message, as your code might not work under PREEMPT.
  */
-#ifdef __smp_processor_id
-# if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT)
-   extern unsigned int smp_processor_id(void);
-# else
-#  define smp_processor_id() __smp_processor_id()
-# endif
-# define _smp_processor_id() __smp_processor_id()
+#ifdef CONFIG_DEBUG_PREEMPT
+  extern unsigned int debug_smp_processor_id(void);
+# define smp_processor_id() debug_smp_processor_id()
 #else
-# define _smp_processor_id() smp_processor_id()
+# define smp_processor_id() raw_smp_processor_id()
 #endif
 
 #define get_cpu()              ({ preempt_disable(); smp_processor_id(); })
index d34ca8fc67569ccf2d4af632bb1a08862b359ba1..c3cd069a9aca5f17c0c09b6b6a0f2f7a53be6fa9 100644 (file)
@@ -107,7 +107,7 @@ struct rt_cache_stat
 
 extern struct rt_cache_stat *rt_cache_stat;
 #define RT_CACHE_STAT_INC(field)                                         \
-               (per_cpu_ptr(rt_cache_stat, _smp_processor_id())->field++)
+               (per_cpu_ptr(rt_cache_stat, raw_smp_processor_id())->field++)
 
 extern struct ip_rt_acct *ip_rt_acct;
 
index a15ab256276ed971b4d66c55b093dd3e4818c1bb..a36bed8ea21003b70d6edfcf561fb7ff2b1afc71 100644 (file)
@@ -128,18 +128,18 @@ struct linux_mib {
 #define SNMP_STAT_USRPTR(name) (name[1])
 
 #define SNMP_INC_STATS_BH(mib, field)  \
-       (per_cpu_ptr(mib[0], _smp_processor_id())->mibs[field]++)
+       (per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field]++)
 #define SNMP_INC_STATS_OFFSET_BH(mib, field, offset)   \
-       (per_cpu_ptr(mib[0], _smp_processor_id())->mibs[field + (offset)]++)
+       (per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field + (offset)]++)
 #define SNMP_INC_STATS_USER(mib, field) \
-       (per_cpu_ptr(mib[1], _smp_processor_id())->mibs[field]++)
+       (per_cpu_ptr(mib[1], raw_smp_processor_id())->mibs[field]++)
 #define SNMP_INC_STATS(mib, field)     \
-       (per_cpu_ptr(mib[!in_softirq()], _smp_processor_id())->mibs[field]++)
+       (per_cpu_ptr(mib[!in_softirq()], raw_smp_processor_id())->mibs[field]++)
 #define SNMP_DEC_STATS(mib, field)     \
-       (per_cpu_ptr(mib[!in_softirq()], _smp_processor_id())->mibs[field]--)
+       (per_cpu_ptr(mib[!in_softirq()], raw_smp_processor_id())->mibs[field]--)
 #define SNMP_ADD_STATS_BH(mib, field, addend)  \
-       (per_cpu_ptr(mib[0], _smp_processor_id())->mibs[field] += addend)
+       (per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field] += addend)
 #define SNMP_ADD_STATS_USER(mib, field, addend)        \
-       (per_cpu_ptr(mib[1], _smp_processor_id())->mibs[field] += addend)
+       (per_cpu_ptr(mib[1], raw_smp_processor_id())->mibs[field] += addend)
 
 #endif
index 83b3d376708c04f00fdf674622b1c1071045eada..a566745dde621a075e225c52a432316bec58afc0 100644 (file)
@@ -379,7 +379,7 @@ static void module_unload_init(struct module *mod)
        for (i = 0; i < NR_CPUS; i++)
                local_set(&mod->ref[i].count, 0);
        /* Hold reference count during initialization. */
-       local_set(&mod->ref[_smp_processor_id()].count, 1);
+       local_set(&mod->ref[raw_smp_processor_id()].count, 1);
        /* Backwards compatibility macros put refcount during init. */
        mod->waiter = current;
 }
index cba3584b80fefa88b3fdcb8d938800b08d44abeb..457c2302ed424bf05e516136d21c1884033ca1e9 100644 (file)
@@ -48,11 +48,11 @@ void disable_nonboot_cpus(void)
 {
        oldmask = current->cpus_allowed;
        set_cpus_allowed(current, cpumask_of_cpu(0));
-       printk("Freezing CPUs (at %d)", _smp_processor_id());
+       printk("Freezing CPUs (at %d)", raw_smp_processor_id());
        current->state = TASK_INTERRUPTIBLE;
        schedule_timeout(HZ);
        printk("...");
-       BUG_ON(_smp_processor_id() != 0);
+       BUG_ON(raw_smp_processor_id() != 0);
 
        /* FIXME: for this to work, all the CPUs must be running
         * "idle" thread (or we deadlock). Is that guaranteed? */
index f12a0c8a7d98d0cbda8507dd9fa8233acb51f087..deca041fc3645670055502888171811404d249f9 100644 (file)
@@ -3814,7 +3814,7 @@ EXPORT_SYMBOL(yield);
  */
 void __sched io_schedule(void)
 {
-       struct runqueue *rq = &per_cpu(runqueues, _smp_processor_id());
+       struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id());
 
        atomic_inc(&rq->nr_iowait);
        schedule();
@@ -3825,7 +3825,7 @@ EXPORT_SYMBOL(io_schedule);
 
 long __sched io_schedule_timeout(long timeout)
 {
-       struct runqueue *rq = &per_cpu(runqueues, _smp_processor_id());
+       struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id());
        long ret;
 
        atomic_inc(&rq->nr_iowait);
index 6116b25aa7cf32ad98761de21f3219b30cea31cc..84a9d18aa8da0ecb9309c6b58a2b8b694a1493e2 100644 (file)
@@ -100,7 +100,7 @@ static int stop_machine(void)
        stopmachine_state = STOPMACHINE_WAIT;
 
        for_each_online_cpu(i) {
-               if (i == _smp_processor_id())
+               if (i == raw_smp_processor_id())
                        continue;
                ret = kernel_thread(stopmachine, (void *)(long)i,CLONE_KERNEL);
                if (ret < 0)
@@ -182,7 +182,7 @@ struct task_struct *__stop_machine_run(int (*fn)(void *), void *data,
 
        /* If they don't care which CPU fn runs on, bind to any online one. */
        if (cpu == NR_CPUS)
-               cpu = _smp_processor_id();
+               cpu = raw_smp_processor_id();
 
        p = kthread_create(do_stop, &smdata, "kstopmachine");
        if (!IS_ERR(p)) {
index 9eccea9429a74774ca7c3c0d5a7efc122f2149ff..5f10cb898407c721c8016fad48ca234fedc33fc7 100644 (file)
@@ -20,6 +20,7 @@ lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
 lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
 lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
 obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
+obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
 
 ifneq ($(CONFIG_HAVE_DEC_LOCK),y) 
   lib-y += dec_and_lock.o
index 99b0ae3d51dded390d221525b2cd2389d845ca8c..bd2bc5d887b815e261ff82c4167e410966b659d3 100644 (file)
@@ -9,61 +9,6 @@
 #include <linux/module.h>
 #include <linux/kallsyms.h>
 
-#if defined(CONFIG_PREEMPT) && defined(__smp_processor_id) && \
-               defined(CONFIG_DEBUG_PREEMPT)
-
-/*
- * Debugging check.
- */
-unsigned int smp_processor_id(void)
-{
-       unsigned long preempt_count = preempt_count();
-       int this_cpu = __smp_processor_id();
-       cpumask_t this_mask;
-
-       if (likely(preempt_count))
-               goto out;
-
-       if (irqs_disabled())
-               goto out;
-
-       /*
-        * Kernel threads bound to a single CPU can safely use
-        * smp_processor_id():
-        */
-       this_mask = cpumask_of_cpu(this_cpu);
-
-       if (cpus_equal(current->cpus_allowed, this_mask))
-               goto out;
-
-       /*
-        * It is valid to assume CPU-locality during early bootup:
-        */
-       if (system_state != SYSTEM_RUNNING)
-               goto out;
-
-       /*
-        * Avoid recursion:
-        */
-       preempt_disable();
-
-       if (!printk_ratelimit())
-               goto out_enable;
-
-       printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] code: %s/%d\n", preempt_count(), current->comm, current->pid);
-       print_symbol("caller is %s\n", (long)__builtin_return_address(0));
-       dump_stack();
-
-out_enable:
-       preempt_enable_no_resched();
-out:
-       return this_cpu;
-}
-
-EXPORT_SYMBOL(smp_processor_id);
-
-#endif /* PREEMPT && __smp_processor_id && DEBUG_PREEMPT */
-
 #ifdef CONFIG_PREEMPT_BKL
 /*
  * The 'big kernel semaphore'
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
new file mode 100644 (file)
index 0000000..42c08ef
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * lib/smp_processor_id.c
+ *
+ * DEBUG_PREEMPT variant of smp_processor_id().
+ */
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+
+unsigned int debug_smp_processor_id(void)
+{
+       unsigned long preempt_count = preempt_count();
+       int this_cpu = raw_smp_processor_id();
+       cpumask_t this_mask;
+
+       if (likely(preempt_count))
+               goto out;
+
+       if (irqs_disabled())
+               goto out;
+
+       /*
+        * Kernel threads bound to a single CPU can safely use
+        * smp_processor_id():
+        */
+       this_mask = cpumask_of_cpu(this_cpu);
+
+       if (cpus_equal(current->cpus_allowed, this_mask))
+               goto out;
+
+       /*
+        * It is valid to assume CPU-locality during early bootup:
+        */
+       if (system_state != SYSTEM_RUNNING)
+               goto out;
+
+       /*
+        * Avoid recursion:
+        */
+       preempt_disable();
+
+       if (!printk_ratelimit())
+               goto out_enable;
+
+       printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] code: %s/%d\n", preempt_count(), current->comm, current->pid);
+       print_symbol("caller is %s\n", (long)__builtin_return_address(0));
+       dump_stack();
+
+out_enable:
+       preempt_enable_no_resched();
+out:
+       return this_cpu;
+}
+
+EXPORT_SYMBOL(debug_smp_processor_id);
+