]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 14 Nov 2013 07:30:30 +0000 (16:30 +0900)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 14 Nov 2013 07:30:30 +0000 (16:30 +0900)
Pull core locking changes from Ingo Molnar:
 "The biggest changes:

   - add lockdep support for seqcount/seqlocks structures, this
     unearthed both bugs and required extra annotation.

   - move the various kernel locking primitives to the new
     kernel/locking/ directory"

* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (21 commits)
  block: Use u64_stats_init() to initialize seqcounts
  locking/lockdep: Mark __lockdep_count_forward_deps() as static
  lockdep/proc: Fix lock-time avg computation
  locking/doc: Update references to kernel/mutex.c
  ipv6: Fix possible ipv6 seqlock deadlock
  cpuset: Fix potential deadlock w/ set_mems_allowed
  seqcount: Add lockdep functionality to seqcount/seqlock structures
  net: Explicitly initialize u64_stats_sync structures for lockdep
  locking: Move the percpu-rwsem code to kernel/locking/
  locking: Move the lglocks code to kernel/locking/
  locking: Move the rwsem code to kernel/locking/
  locking: Move the rtmutex code to kernel/locking/
  locking: Move the semaphore core to kernel/locking/
  locking: Move the spinlock code to kernel/locking/
  locking: Move the lockdep code to kernel/locking/
  locking: Move the mutex code to kernel/locking/
  hung_task debugging: Add tracepoint to report the hang
  x86/locking/kconfig: Update paravirt spinlock Kconfig description
  lockstat: Report avg wait and hold times
  lockdep, x86/alternatives: Drop ancient lockdep fixup message
  ...

83 files changed:
Documentation/DocBook/kernel-locking.tmpl
Documentation/lockstat.txt
Documentation/mutex-design.txt
arch/x86/Kconfig
arch/x86/kernel/alternative.c
arch/x86/vdso/vclock_gettime.c
block/blk-cgroup.h
block/blk-throttle.c
block/cfq-iosched.c
drivers/net/dummy.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/sky2.c
drivers/net/ethernet/neterion/vxge/vxge-main.c
drivers/net/ethernet/nvidia/forcedeth.c
drivers/net/ethernet/realtek/8139too.c
drivers/net/ethernet/tile/tilepro.c
drivers/net/ethernet/via/via-rhine.c
drivers/net/ifb.c
drivers/net/loopback.c
drivers/net/macvlan.c
drivers/net/nlmon.c
drivers/net/team/team.c
drivers/net/team/team_mode_loadbalance.c
drivers/net/veth.c
drivers/net/virtio_net.c
drivers/net/vxlan.c
drivers/net/xen-netfront.c
fs/dcache.c
fs/fs_struct.c
include/linux/cpuset.h
include/linux/init_task.h
include/linux/lockdep.h
include/linux/mutex.h
include/linux/sched/sysctl.h
include/linux/seqlock.h
include/linux/u64_stats_sync.h
include/trace/events/sched.h
kernel/Makefile
kernel/futex.c
kernel/hung_task.c
kernel/locking/Makefile [new file with mode: 0644]
kernel/locking/lglock.c [moved from kernel/lglock.c with 100% similarity]
kernel/locking/lockdep.c [moved from kernel/lockdep.c with 99% similarity]
kernel/locking/lockdep_internals.h [moved from kernel/lockdep_internals.h with 100% similarity]
kernel/locking/lockdep_proc.c [moved from kernel/lockdep_proc.c with 97% similarity]
kernel/locking/lockdep_states.h [moved from kernel/lockdep_states.h with 100% similarity]
kernel/locking/mutex-debug.c [moved from kernel/mutex-debug.c with 100% similarity]
kernel/locking/mutex-debug.h [moved from kernel/mutex-debug.h with 100% similarity]
kernel/locking/mutex.c [moved from kernel/mutex.c with 99% similarity]
kernel/locking/mutex.h [moved from kernel/mutex.h with 100% similarity]
kernel/locking/percpu-rwsem.c [moved from lib/percpu-rwsem.c with 100% similarity]
kernel/locking/rtmutex-debug.c [moved from kernel/rtmutex-debug.c with 100% similarity]
kernel/locking/rtmutex-debug.h [moved from kernel/rtmutex-debug.h with 100% similarity]
kernel/locking/rtmutex-tester.c [moved from kernel/rtmutex-tester.c with 100% similarity]
kernel/locking/rtmutex.c [moved from kernel/rtmutex.c with 100% similarity]
kernel/locking/rtmutex.h [moved from kernel/rtmutex.h with 100% similarity]
kernel/locking/rtmutex_common.h [moved from kernel/rtmutex_common.h with 100% similarity]
kernel/locking/rwsem-spinlock.c [moved from lib/rwsem-spinlock.c with 100% similarity]
kernel/locking/rwsem-xadd.c [moved from lib/rwsem.c with 100% similarity]
kernel/locking/rwsem.c [moved from kernel/rwsem.c with 100% similarity]
kernel/locking/semaphore.c [moved from kernel/semaphore.c with 100% similarity]
kernel/locking/spinlock.c [moved from kernel/spinlock.c with 100% similarity]
kernel/locking/spinlock_debug.c [moved from lib/spinlock_debug.c with 100% similarity]
kernel/rcu/tree_plugin.h
kernel/sysctl.c
lib/Makefile
mm/filemap_xip.c
net/8021q/vlan_dev.c
net/bridge/br_device.c
net/ipv4/af_inet.c
net/ipv4/ip_tunnel.c
net/ipv6/addrconf.c
net/ipv6/af_inet6.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/sit.c
net/netfilter/ipvs/ip_vs_ctl.c
net/openvswitch/datapath.c
net/openvswitch/vport.c

index 09e884e5b9f53d1ff32bbc785c61dcb6ec5e56e4..19f2a5a5a5b49702777bfde41d27cb5a6482706e 100644 (file)
@@ -1958,7 +1958,7 @@ machines due to caching.
   <chapter id="apiref-mutex">
    <title>Mutex API reference</title>
 !Iinclude/linux/mutex.h
-!Ekernel/mutex.c
+!Ekernel/locking/mutex.c
   </chapter>
 
   <chapter id="apiref-futex">
index dd2f7b26ca3077737dd93ee97217248216b1878d..72d010689751b3cc7c60349342ede7cd3f8faf49 100644 (file)
@@ -46,16 +46,14 @@ With these hooks we provide the following statistics:
  contentions       - number of lock acquisitions that had to wait
  wait time min     - shortest (non-0) time we ever had to wait for a lock
            max     - longest time we ever had to wait for a lock
-           total   - total time we spend waiting on this lock
+          total   - total time we spend waiting on this lock
+          avg     - average time spent waiting on this lock
  acq-bounces       - number of lock acquisitions that involved x-cpu data
  acquisitions      - number of times we took the lock
  hold time min     - shortest (non-0) time we ever held the lock
-           max     - longest time we ever held the lock
-           total   - total time this lock was held
-
-From these number various other statistics can be derived, such as:
-
- hold time average = hold time total / acquisitions
+          max     - longest time we ever held the lock
+          total   - total time this lock was held
+          avg     - average time this lock was held
 
 These numbers are gathered per lock class, per read/write state (when
 applicable).
@@ -84,37 +82,38 @@ Look at the current lock statistics:
 
 # less /proc/lock_stat
 
-01 lock_stat version 0.3
-02 -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
-03                               class name    con-bounces    contentions   waittime-min   waittime-max waittime-total    acq-bounces   acquisitions   holdtime-min   holdtime-max holdtime-total
-04 -----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+01 lock_stat version 0.4
+02-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+03                              class name    con-bounces    contentions   waittime-min   waittime-max waittime-total   waittime-avg    acq-bounces   acquisitions   holdtime-min   holdtime-max holdtime-total   holdtime-avg
+04-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
 05
-06                          &mm->mmap_sem-W:           233            538 18446744073708       22924.27      607243.51           1342          45806           1.71        8595.89     1180582.34
-07                          &mm->mmap_sem-R:           205            587 18446744073708       28403.36      731975.00           1940         412426           0.58      187825.45     6307502.88
-08                          ---------------
-09                            &mm->mmap_sem            487          [<ffffffff8053491f>] do_page_fault+0x466/0x928
-10                            &mm->mmap_sem            179          [<ffffffff802a6200>] sys_mprotect+0xcd/0x21d
-11                            &mm->mmap_sem            279          [<ffffffff80210a57>] sys_mmap+0x75/0xce
-12                            &mm->mmap_sem             76          [<ffffffff802a490b>] sys_munmap+0x32/0x59
-13                          ---------------
-14                            &mm->mmap_sem            270          [<ffffffff80210a57>] sys_mmap+0x75/0xce
-15                            &mm->mmap_sem            431          [<ffffffff8053491f>] do_page_fault+0x466/0x928
-16                            &mm->mmap_sem            138          [<ffffffff802a490b>] sys_munmap+0x32/0x59
-17                            &mm->mmap_sem            145          [<ffffffff802a6200>] sys_mprotect+0xcd/0x21d
+06                         &mm->mmap_sem-W:            46             84           0.26         939.10       16371.53         194.90          47291        2922365           0.16     2220301.69 17464026916.32        5975.99
+07                         &mm->mmap_sem-R:            37            100           1.31      299502.61      325629.52        3256.30         212344       34316685           0.10        7744.91    95016910.20           2.77
+08                         ---------------
+09                           &mm->mmap_sem              1          [<ffffffff811502a7>] khugepaged_scan_mm_slot+0x57/0x280
+19                           &mm->mmap_sem             96          [<ffffffff815351c4>] __do_page_fault+0x1d4/0x510
+11                           &mm->mmap_sem             34          [<ffffffff81113d77>] vm_mmap_pgoff+0x87/0xd0
+12                           &mm->mmap_sem             17          [<ffffffff81127e71>] vm_munmap+0x41/0x80
+13                         ---------------
+14                           &mm->mmap_sem              1          [<ffffffff81046fda>] dup_mmap+0x2a/0x3f0
+15                           &mm->mmap_sem             60          [<ffffffff81129e29>] SyS_mprotect+0xe9/0x250
+16                           &mm->mmap_sem             41          [<ffffffff815351c4>] __do_page_fault+0x1d4/0x510
+17                           &mm->mmap_sem             68          [<ffffffff81113d77>] vm_mmap_pgoff+0x87/0xd0
 18
-19 ...............................................................................................................................................................................................
+19.............................................................................................................................................................................................................................
 20
-21                              dcache_lock:           621            623           0.52         118.26        1053.02           6745          91930           0.29         316.29      118423.41
-22                              -----------
-23                              dcache_lock            179          [<ffffffff80378274>] _atomic_dec_and_lock+0x34/0x54
-24                              dcache_lock            113          [<ffffffff802cc17b>] d_alloc+0x19a/0x1eb
-25                              dcache_lock             99          [<ffffffff802ca0dc>] d_rehash+0x1b/0x44
-26                              dcache_lock            104          [<ffffffff802cbca0>] d_instantiate+0x36/0x8a
-27                              -----------
-28                              dcache_lock            192          [<ffffffff80378274>] _atomic_dec_and_lock+0x34/0x54
-29                              dcache_lock             98          [<ffffffff802ca0dc>] d_rehash+0x1b/0x44
-30                              dcache_lock             72          [<ffffffff802cc17b>] d_alloc+0x19a/0x1eb
-31                              dcache_lock            112          [<ffffffff802cbca0>] d_instantiate+0x36/0x8a
+21                         unix_table_lock:           110            112           0.21          49.24         163.91           1.46          21094          66312           0.12         624.42       31589.81           0.48
+22                         ---------------
+23                         unix_table_lock             45          [<ffffffff8150ad8e>] unix_create1+0x16e/0x1b0
+24                         unix_table_lock             47          [<ffffffff8150b111>] unix_release_sock+0x31/0x250
+25                         unix_table_lock             15          [<ffffffff8150ca37>] unix_find_other+0x117/0x230
+26                         unix_table_lock              5          [<ffffffff8150a09f>] unix_autobind+0x11f/0x1b0
+27                         ---------------
+28                         unix_table_lock             39          [<ffffffff8150b111>] unix_release_sock+0x31/0x250
+29                         unix_table_lock             49          [<ffffffff8150ad8e>] unix_create1+0x16e/0x1b0
+30                         unix_table_lock             20          [<ffffffff8150ca37>] unix_find_other+0x117/0x230
+31                         unix_table_lock              4          [<ffffffff8150a09f>] unix_autobind+0x11f/0x1b0
+
 
 This excerpt shows the first two lock class statistics. Line 01 shows the
 output version - each time the format changes this will be updated. Line 02-04
@@ -131,30 +130,30 @@ The integer part of the time values is in us.
 
 Dealing with nested locks, subclasses may appear:
 
-32...............................................................................................................................................................................................
+32...........................................................................................................................................................................................................................
 33
-34                               &rq->lock:         13128          13128           0.43         190.53      103881.26          97454        3453404           0.00         401.11    13224683.11
+34                               &rq->lock:       13128          13128           0.43         190.53      103881.26           7.91          97454        3453404           0.00         401.11    13224683.11           3.82
 35                               ---------
-36                               &rq->lock            645          [<ffffffff8103bfc4>] task_rq_lock+0x43/0x75
-37                               &rq->lock            297          [<ffffffff8104ba65>] try_to_wake_up+0x127/0x25a
-38                               &rq->lock            360          [<ffffffff8103c4c5>] select_task_rq_fair+0x1f0/0x74a
-39                               &rq->lock            428          [<ffffffff81045f98>] scheduler_tick+0x46/0x1fb
+36                               &rq->lock          645          [<ffffffff8103bfc4>] task_rq_lock+0x43/0x75
+37                               &rq->lock          297          [<ffffffff8104ba65>] try_to_wake_up+0x127/0x25a
+38                               &rq->lock          360          [<ffffffff8103c4c5>] select_task_rq_fair+0x1f0/0x74a
+39                               &rq->lock          428          [<ffffffff81045f98>] scheduler_tick+0x46/0x1fb
 40                               ---------
-41                               &rq->lock             77          [<ffffffff8103bfc4>] task_rq_lock+0x43/0x75
-42                               &rq->lock            174          [<ffffffff8104ba65>] try_to_wake_up+0x127/0x25a
-43                               &rq->lock           4715          [<ffffffff8103ed4b>] double_rq_lock+0x42/0x54
-44                               &rq->lock            893          [<ffffffff81340524>] schedule+0x157/0x7b8
+41                               &rq->lock           77          [<ffffffff8103bfc4>] task_rq_lock+0x43/0x75
+42                               &rq->lock          174          [<ffffffff8104ba65>] try_to_wake_up+0x127/0x25a
+43                               &rq->lock         4715          [<ffffffff8103ed4b>] double_rq_lock+0x42/0x54
+44                               &rq->lock          893          [<ffffffff81340524>] schedule+0x157/0x7b8
 45
-46...............................................................................................................................................................................................
+46...........................................................................................................................................................................................................................
 47
-48                             &rq->lock/1:         11526          11488           0.33         388.73      136294.31          21461          38404           0.00          37.93      109388.53
+48                             &rq->lock/1:        1526          11488           0.33         388.73      136294.31          11.86          21461          38404           0.00          37.93      109388.53           2.84
 49                             -----------
-50                             &rq->lock/1          11526          [<ffffffff8103ed58>] double_rq_lock+0x4f/0x54
+50                             &rq->lock/1        11526          [<ffffffff8103ed58>] double_rq_lock+0x4f/0x54
 51                             -----------
-52                             &rq->lock/1           5645          [<ffffffff8103ed4b>] double_rq_lock+0x42/0x54
-53                             &rq->lock/1           1224          [<ffffffff81340524>] schedule+0x157/0x7b8
-54                             &rq->lock/1           4336          [<ffffffff8103ed58>] double_rq_lock+0x4f/0x54
-55                             &rq->lock/1            181          [<ffffffff8104ba65>] try_to_wake_up+0x127/0x25a
+52                             &rq->lock/1         5645          [<ffffffff8103ed4b>] double_rq_lock+0x42/0x54
+53                             &rq->lock/1         1224          [<ffffffff81340524>] schedule+0x157/0x7b8
+54                             &rq->lock/1         4336          [<ffffffff8103ed58>] double_rq_lock+0x4f/0x54
+55                             &rq->lock/1          181          [<ffffffff8104ba65>] try_to_wake_up+0x127/0x25a
 
 Line 48 shows statistics for the second subclass (/1) of &rq->lock class
 (subclass starts from 0), since in this case, as line 50 suggests,
@@ -163,16 +162,16 @@ double_rq_lock actually acquires a nested lock of two spinlocks.
 View the top contending locks:
 
 # grep : /proc/lock_stat | head
-              &inode->i_data.tree_lock-W:            15          21657           0.18     1093295.30 11547131054.85             58          10415           0.16          87.51        6387.60
-              &inode->i_data.tree_lock-R:             0              0           0.00           0.00           0.00          23302         231198           0.25           8.45       98023.38
-                             dcache_lock:          1037           1161           0.38          45.32         774.51           6611         243371           0.15         306.48       77387.24
-                         &inode->i_mutex:           161            286 18446744073709       62882.54     1244614.55           3653          20598 18446744073709       62318.60     1693822.74
-                         &zone->lru_lock:            94             94           0.53           7.33          92.10           4366          32690           0.29          59.81       16350.06
-              &inode->i_data.i_mmap_mutex:            79             79           0.40           3.77          53.03          11779          87755           0.28         116.93       29898.44
-                        &q->__queue_lock:            48             50           0.52          31.62          86.31            774          13131           0.17         113.08       12277.52
-                        &rq->rq_lock_key:            43             47           0.74          68.50         170.63           3706          33929           0.22         107.99       17460.62
-                      &rq->rq_lock_key#2:            39             46           0.75           6.68          49.03           2979          32292           0.17         125.17       17137.63
-                         tasklist_lock-W:            15             15           1.45          10.87          32.70           1201           7390           0.58          62.55       13648.47
+                       clockevents_lock:       2926159        2947636           0.15       46882.81  1784540466.34         605.41        3381345        3879161           0.00        2260.97    53178395.68          13.71
+                    tick_broadcast_lock:        346460         346717           0.18        2257.43    39364622.71         113.54        3642919        4242696           0.00        2263.79    49173646.60          11.59
+                 &mapping->i_mmap_mutex:        203896         203899           3.36      645530.05 31767507988.39      155800.21        3361776        8893984           0.17        2254.15    14110121.02           1.59
+                              &rq->lock:        135014         136909           0.18         606.09      842160.68           6.15        1540728       10436146           0.00         728.72    17606683.41           1.69
+              &(&zone->lru_lock)->rlock:         93000          94934           0.16          59.18      188253.78           1.98        1199912        3809894           0.15         391.40     3559518.81           0.93
+                        tasklist_lock-W:         40667          41130           0.23        1189.42      428980.51          10.43         270278         510106           0.16         653.51     3939674.91           7.72
+                        tasklist_lock-R:         21298          21305           0.20        1310.05      215511.12          10.12         186204         241258           0.14        1162.33     1179779.23           4.89
+                             rcu_node_1:         47656          49022           0.16         635.41      193616.41           3.95         844888        1865423           0.00         764.26     1656226.96           0.89
+       &(&dentry->d_lockref.lock)->rlock:         39791          40179           0.15        1302.08       88851.96           2.21        2790851       12527025           0.10        1910.75     3379714.27           0.27
+                             rcu_node_0:         29203          30064           0.16         786.55     1555573.00          51.74          88963         244254           0.00         398.87      428872.51           1.76
 
 Clear the statistics:
 
index 38c10fd7f4110448facd7089b985c4776d264d85..1dfe62c3641d5d9087388c0255f2a9775b520faa 100644 (file)
@@ -116,11 +116,11 @@ using mutexes at the moment, please let me know if you find any. ]
 Implementation of mutexes
 -------------------------
 
-'struct mutex' is the new mutex type, defined in include/linux/mutex.h
-and implemented in kernel/mutex.c. It is a counter-based mutex with a
-spinlock and a wait-list. The counter has 3 states: 1 for "unlocked",
-0 for "locked" and negative numbers (usually -1) for "locked, potential
-waiters queued".
+'struct mutex' is the new mutex type, defined in include/linux/mutex.h and
+implemented in kernel/locking/mutex.c. It is a counter-based mutex with a
+spinlock and a wait-list. The counter has 3 states: 1 for "unlocked", 0 for
+"locked" and negative numbers (usually -1) for "locked, potential waiters
+queued".
 
 the APIs of 'struct mutex' have been streamlined:
 
index 14dc9c797abb40472cd7958042232b57c327584e..6e3e1cb3f6a0030e5344147b8ceeae684d637ad9 100644 (file)
@@ -635,10 +635,10 @@ config PARAVIRT_SPINLOCKS
          spinlock implementation with something virtualization-friendly
          (for example, block the virtual CPU rather than spinning).
 
-         Unfortunately the downside is an up to 5% performance hit on
-         native kernels, with various workloads.
+         It has a minimal impact on native kernels and gives a nice performance
+         benefit on paravirtualized KVM / Xen kernels.
 
-         If you are unsure how to answer this question, answer N.
+         If you are unsure how to answer this question, answer Y.
 
 source "arch/x86/xen/Kconfig"
 
index 15e8563e5c244e0712c9696a3367067044a300b3..df94598ad05a845902e9897214cdceacc779a80d 100644 (file)
@@ -402,17 +402,6 @@ void alternatives_enable_smp(void)
 {
        struct smp_alt_module *mod;
 
-#ifdef CONFIG_LOCKDEP
-       /*
-        * Older binutils section handling bug prevented
-        * alternatives-replacement from working reliably.
-        *
-        * If this still occurs then you should see a hang
-        * or crash shortly after this line:
-        */
-       pr_info("lockdep: fixing up alternatives\n");
-#endif
-
        /* Why bother if there are no other CPUs? */
        BUG_ON(num_possible_cpus() == 1);
 
index 72074d5284009a35265580a13eaa99e6f11588b4..2ada505067cceca9001bda33bb6aa66941d34b0d 100644 (file)
@@ -178,7 +178,7 @@ notrace static int __always_inline do_realtime(struct timespec *ts)
 
        ts->tv_nsec = 0;
        do {
-               seq = read_seqcount_begin(&gtod->seq);
+               seq = read_seqcount_begin_no_lockdep(&gtod->seq);
                mode = gtod->clock.vclock_mode;
                ts->tv_sec = gtod->wall_time_sec;
                ns = gtod->wall_time_snsec;
@@ -198,7 +198,7 @@ notrace static int do_monotonic(struct timespec *ts)
 
        ts->tv_nsec = 0;
        do {
-               seq = read_seqcount_begin(&gtod->seq);
+               seq = read_seqcount_begin_no_lockdep(&gtod->seq);
                mode = gtod->clock.vclock_mode;
                ts->tv_sec = gtod->monotonic_time_sec;
                ns = gtod->monotonic_time_snsec;
@@ -214,7 +214,7 @@ notrace static int do_realtime_coarse(struct timespec *ts)
 {
        unsigned long seq;
        do {
-               seq = read_seqcount_begin(&gtod->seq);
+               seq = read_seqcount_begin_no_lockdep(&gtod->seq);
                ts->tv_sec = gtod->wall_time_coarse.tv_sec;
                ts->tv_nsec = gtod->wall_time_coarse.tv_nsec;
        } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
@@ -225,7 +225,7 @@ notrace static int do_monotonic_coarse(struct timespec *ts)
 {
        unsigned long seq;
        do {
-               seq = read_seqcount_begin(&gtod->seq);
+               seq = read_seqcount_begin_no_lockdep(&gtod->seq);
                ts->tv_sec = gtod->monotonic_time_coarse.tv_sec;
                ts->tv_nsec = gtod->monotonic_time_coarse.tv_nsec;
        } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
index ae6969a7ffd4aa9bebd9373c13a73a503e3b4042..1610b22edf0992da4cdeb1657688527d5867e76c 100644 (file)
@@ -402,6 +402,11 @@ struct request_list *__blk_queue_next_rl(struct request_list *rl,
 #define blk_queue_for_each_rl(rl, q)   \
        for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
 
+static inline void blkg_stat_init(struct blkg_stat *stat)
+{
+       u64_stats_init(&stat->syncp);
+}
+
 /**
  * blkg_stat_add - add a value to a blkg_stat
  * @stat: target blkg_stat
@@ -458,6 +463,11 @@ static inline void blkg_stat_merge(struct blkg_stat *to, struct blkg_stat *from)
        blkg_stat_add(to, blkg_stat_read(from));
 }
 
+static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat)
+{
+       u64_stats_init(&rwstat->syncp);
+}
+
 /**
  * blkg_rwstat_add - add a value to a blkg_rwstat
  * @rwstat: target blkg_rwstat
index 8331aba9426f2c75ffb29ff65a3b89effa6b5262..06534049afbac1eb9d1c9ba77f7fc8369150a139 100644 (file)
@@ -256,6 +256,12 @@ static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
        }                                                               \
 } while (0)
 
+static void tg_stats_init(struct tg_stats_cpu *tg_stats)
+{
+       blkg_rwstat_init(&tg_stats->service_bytes);
+       blkg_rwstat_init(&tg_stats->serviced);
+}
+
 /*
  * Worker for allocating per cpu stat for tgs. This is scheduled on the
  * system_wq once there are some groups on the alloc_list waiting for
@@ -269,12 +275,16 @@ static void tg_stats_alloc_fn(struct work_struct *work)
 
 alloc_stats:
        if (!stats_cpu) {
+               int cpu;
+
                stats_cpu = alloc_percpu(struct tg_stats_cpu);
                if (!stats_cpu) {
                        /* allocation failed, try again after some time */
                        schedule_delayed_work(dwork, msecs_to_jiffies(10));
                        return;
                }
+               for_each_possible_cpu(cpu)
+                       tg_stats_init(per_cpu_ptr(stats_cpu, cpu));
        }
 
        spin_lock_irq(&tg_stats_alloc_lock);
index 434944cbd761884f0f6d9ffdfe96adc00afd1e5c..4d5cec1ad80d3e64314f43a84577a79d9e5d2506 100644 (file)
@@ -1508,6 +1508,29 @@ static void cfq_init_cfqg_base(struct cfq_group *cfqg)
 }
 
 #ifdef CONFIG_CFQ_GROUP_IOSCHED
+static void cfqg_stats_init(struct cfqg_stats *stats)
+{
+       blkg_rwstat_init(&stats->service_bytes);
+       blkg_rwstat_init(&stats->serviced);
+       blkg_rwstat_init(&stats->merged);
+       blkg_rwstat_init(&stats->service_time);
+       blkg_rwstat_init(&stats->wait_time);
+       blkg_rwstat_init(&stats->queued);
+
+       blkg_stat_init(&stats->sectors);
+       blkg_stat_init(&stats->time);
+
+#ifdef CONFIG_DEBUG_BLK_CGROUP
+       blkg_stat_init(&stats->unaccounted_time);
+       blkg_stat_init(&stats->avg_queue_size_sum);
+       blkg_stat_init(&stats->avg_queue_size_samples);
+       blkg_stat_init(&stats->dequeue);
+       blkg_stat_init(&stats->group_wait_time);
+       blkg_stat_init(&stats->idle_time);
+       blkg_stat_init(&stats->empty_time);
+#endif
+}
+
 static void cfq_pd_init(struct blkcg_gq *blkg)
 {
        struct cfq_group *cfqg = blkg_to_cfqg(blkg);
@@ -1515,6 +1538,8 @@ static void cfq_pd_init(struct blkcg_gq *blkg)
        cfq_init_cfqg_base(cfqg);
        cfqg->weight = blkg->blkcg->cfq_weight;
        cfqg->leaf_weight = blkg->blkcg->cfq_leaf_weight;
+       cfqg_stats_init(&cfqg->stats);
+       cfqg_stats_init(&cfqg->dead_stats);
 }
 
 static void cfq_pd_offline(struct blkcg_gq *blkg)
index b710c6b2d65962db616017a6572cf75e425d69a1..bd8f84b0b894ebfe616a85b365598347d648200f 100644 (file)
@@ -88,10 +88,16 @@ static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev)
 
 static int dummy_dev_init(struct net_device *dev)
 {
+       int i;
        dev->dstats = alloc_percpu(struct pcpu_dstats);
        if (!dev->dstats)
                return -ENOMEM;
 
+       for_each_possible_cpu(i) {
+               struct pcpu_dstats *dstats;
+               dstats = per_cpu_ptr(dev->dstats, i);
+               u64_stats_init(&dstats->syncp);
+       }
        return 0;
 }
 
index cb2bb6fccbc88102607a9ad97be27058fbc77fd1..eaecaadfa8c56436994c2afbe9a70223467ceec2 100644 (file)
@@ -2148,6 +2148,9 @@ static int be_tx_qs_create(struct be_adapter *adapter)
                if (status)
                        return status;
 
+               u64_stats_init(&txo->stats.sync);
+               u64_stats_init(&txo->stats.sync_compl);
+
                /* If num_evt_qs is less than num_tx_qs, then more than
                 * one txq share an eq
                 */
@@ -2209,6 +2212,7 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
                if (rc)
                        return rc;
 
+               u64_stats_init(&rxo->stats.sync);
                eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
                rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
                if (rc)
index 2ac14bdd5fbbd0d3fd08d7689094761be253348a..025e5f4b7481d680f6b2dff0491a857615af250b 100644 (file)
@@ -1224,6 +1224,9 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
                ring->count = adapter->tx_ring_count;
                ring->queue_index = txr_idx;
 
+               u64_stats_init(&ring->tx_syncp);
+               u64_stats_init(&ring->tx_syncp2);
+
                /* assign ring to adapter */
                adapter->tx_ring[txr_idx] = ring;
 
@@ -1257,6 +1260,8 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
                ring->count = adapter->rx_ring_count;
                ring->queue_index = rxr_idx;
 
+               u64_stats_init(&ring->rx_syncp);
+
                /* assign ring to adapter */
                adapter->rx_ring[rxr_idx] = ring;
        }
index 0066f0aefbfa8300d39b0cb2a58822d58dd3e8d4..0c55079ebee37c03d0a5a9a5cbdc6681d78c19a9 100644 (file)
@@ -5085,6 +5085,8 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
        if (!tx_ring->tx_buffer_info)
                goto err;
 
+       u64_stats_init(&tx_ring->syncp);
+
        /* round up to nearest 4K */
        tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
        tx_ring->size = ALIGN(tx_ring->size, 4096);
@@ -5167,6 +5169,8 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
        if (!rx_ring->rx_buffer_info)
                goto err;
 
+       u64_stats_init(&rx_ring->syncp);
+
        /* Round up to nearest 4K */
        rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
        rx_ring->size = ALIGN(rx_ring->size, 4096);
index 7d99e695a1106ff1ab74f00c4edd8363b4b7e2c6..b8e232b4ea2da88aca164039f81f1852ba9f779e 100644 (file)
@@ -2792,6 +2792,9 @@ static int mvneta_probe(struct platform_device *pdev)
 
        pp = netdev_priv(dev);
 
+       u64_stats_init(&pp->tx_stats.syncp);
+       u64_stats_init(&pp->rx_stats.syncp);
+
        pp->weight = MVNETA_RX_POLL_WEIGHT;
        pp->phy_node = phy_node;
        pp->phy_interface = phy_mode;
index a7df981d2123a9d0c73180fbc4fc70834adacd98..43aa7acd84a624705f4fd1430c8726839e468ef2 100644 (file)
@@ -4763,6 +4763,9 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
        sky2->hw = hw;
        sky2->msg_enable = netif_msg_init(debug, default_msg);
 
+       u64_stats_init(&sky2->tx_stats.syncp);
+       u64_stats_init(&sky2->rx_stats.syncp);
+
        /* Auto speed and flow control */
        sky2->flags = SKY2_FLAG_AUTO_SPEED | SKY2_FLAG_AUTO_PAUSE;
        if (hw->chip_id != CHIP_ID_YUKON_XL)
index 8614eeb7de8140114d256ae05c62ce95d5844a18..f9876ea8c8bfd75b9e53214f56ba624cb55396b4 100644 (file)
@@ -2072,6 +2072,10 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
                                vdev->config.tx_steering_type;
                        vpath->fifo.ndev = vdev->ndev;
                        vpath->fifo.pdev = vdev->pdev;
+
+                       u64_stats_init(&vpath->fifo.stats.syncp);
+                       u64_stats_init(&vpath->ring.stats.syncp);
+
                        if (vdev->config.tx_steering_type)
                                vpath->fifo.txq =
                                        netdev_get_tx_queue(vdev->ndev, i);
index 098b96dad66f901582c9d147416f7e2a2448d861..2d045be4b5cf64a5921a6687b127609093df376e 100644 (file)
@@ -5619,6 +5619,8 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
        spin_lock_init(&np->lock);
        spin_lock_init(&np->hwstats_lock);
        SET_NETDEV_DEV(dev, &pci_dev->dev);
+       u64_stats_init(&np->swstats_rx_syncp);
+       u64_stats_init(&np->swstats_tx_syncp);
 
        init_timer(&np->oom_kick);
        np->oom_kick.data = (unsigned long) dev;
index 50a92104dd0a65399f118b6c6505bc04e04329af..da5972eefdd2bfc5d702fd553cf68b91c5485fb5 100644 (file)
@@ -790,6 +790,9 @@ static struct net_device *rtl8139_init_board(struct pci_dev *pdev)
 
        pci_set_master (pdev);
 
+       u64_stats_init(&tp->rx_stats.syncp);
+       u64_stats_init(&tp->tx_stats.syncp);
+
 retry:
        /* PIO bar register comes first. */
        bar = !use_io;
index 106be47716e796105012efd7b8d644929faaccef..edb2e12a0fe214894e9a9a0445ccc7e869dbe7d6 100644 (file)
@@ -1008,6 +1008,8 @@ static void tile_net_register(void *dev_ptr)
        info->egress_timer.data = (long)info;
        info->egress_timer.function = tile_net_handle_egress_timer;
 
+       u64_stats_init(&info->stats.syncp);
+
        priv->cpu[my_cpu] = info;
 
        /*
index 4a7293ed95e9c124b84ec87cabf1a9c1ef6ab1f9..cce6c4bc556a97e03fbe5f1648396aed6bf7070a 100644 (file)
@@ -987,6 +987,9 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        rp->base = ioaddr;
 
+       u64_stats_init(&rp->tx_stats.syncp);
+       u64_stats_init(&rp->rx_stats.syncp);
+
        /* Get chip registers into a sane state */
        rhine_power_init(dev);
        rhine_hw_init(dev, pioaddr);
index a3bed28197d29bf9722b6c586f311827d357b501..c14d39bf32d06a6f8d1b42a29b36dcd84b25c8f6 100644 (file)
@@ -265,6 +265,7 @@ MODULE_PARM_DESC(numifbs, "Number of ifb devices");
 static int __init ifb_init_one(int index)
 {
        struct net_device *dev_ifb;
+       struct ifb_private *dp;
        int err;
 
        dev_ifb = alloc_netdev(sizeof(struct ifb_private),
@@ -273,6 +274,10 @@ static int __init ifb_init_one(int index)
        if (!dev_ifb)
                return -ENOMEM;
 
+       dp = netdev_priv(dev_ifb);
+       u64_stats_init(&dp->rsync);
+       u64_stats_init(&dp->tsync);
+
        dev_ifb->rtnl_link_ops = &ifb_link_ops;
        err = register_netdevice(dev_ifb);
        if (err < 0)
index a17d85a331f1ade54f9ec1834f1e8ee1c67d9b9d..ac24c27b4b2da10195b965cd3cfe66ceb8791c00 100644 (file)
@@ -137,10 +137,16 @@ static const struct ethtool_ops loopback_ethtool_ops = {
 
 static int loopback_dev_init(struct net_device *dev)
 {
+       int i;
        dev->lstats = alloc_percpu(struct pcpu_lstats);
        if (!dev->lstats)
                return -ENOMEM;
 
+       for_each_possible_cpu(i) {
+               struct pcpu_lstats *lb_stats;
+               lb_stats = per_cpu_ptr(dev->lstats, i);
+               u64_stats_init(&lb_stats->syncp);
+       }
        return 0;
 }
 
index af4aaa5893ff5a23a8d0014ab21403eb3177783f..acf93798dc675929394e82ffed96cd1616c3a31d 100644 (file)
@@ -534,6 +534,7 @@ static int macvlan_init(struct net_device *dev)
 {
        struct macvlan_dev *vlan = netdev_priv(dev);
        const struct net_device *lowerdev = vlan->lowerdev;
+       int i;
 
        dev->state              = (dev->state & ~MACVLAN_STATE_MASK) |
                                  (lowerdev->state & MACVLAN_STATE_MASK);
@@ -549,6 +550,12 @@ static int macvlan_init(struct net_device *dev)
        if (!vlan->pcpu_stats)
                return -ENOMEM;
 
+       for_each_possible_cpu(i) {
+               struct macvlan_pcpu_stats *mvlstats;
+               mvlstats = per_cpu_ptr(vlan->pcpu_stats, i);
+               u64_stats_init(&mvlstats->syncp);
+       }
+
        return 0;
 }
 
index b57ce5f4896217e30a41cf7951d7dd5f36c90319..d2bb12bfabd5501055dfd52760c19c598b254bc9 100644 (file)
@@ -47,8 +47,16 @@ static int nlmon_change_mtu(struct net_device *dev, int new_mtu)
 
 static int nlmon_dev_init(struct net_device *dev)
 {
+       int i;
+
        dev->lstats = alloc_percpu(struct pcpu_lstats);
 
+       for_each_possible_cpu(i) {
+               struct pcpu_lstats *nlmstats;
+               nlmstats = per_cpu_ptr(dev->lstats, i);
+               u64_stats_init(&nlmstats->syncp);
+       }
+
        return dev->lstats == NULL ? -ENOMEM : 0;
 }
 
index 50e43e64d51defbf84a399bdcfa048951ab2fdba..6574eb8766f90997c38d0ea56d67ad542ecf99b3 100644 (file)
@@ -1540,6 +1540,12 @@ static int team_init(struct net_device *dev)
        if (!team->pcpu_stats)
                return -ENOMEM;
 
+       for_each_possible_cpu(i) {
+               struct team_pcpu_stats *team_stats;
+               team_stats = per_cpu_ptr(team->pcpu_stats, i);
+               u64_stats_init(&team_stats->syncp);
+       }
+
        for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
                INIT_HLIST_HEAD(&team->en_port_hlist[i]);
        INIT_LIST_HEAD(&team->port_list);
index 829a9cd2b4dac5257f2deb8eabd39c0210ffc8bc..d671fc3ac5ac26ad2b7666617fab5adc1042e569 100644 (file)
@@ -570,7 +570,7 @@ static int lb_init(struct team *team)
 {
        struct lb_priv *lb_priv = get_lb_priv(team);
        lb_select_tx_port_func_t *func;
-       int err;
+       int i, err;
 
        /* set default tx port selector */
        func = lb_select_tx_port_get_func("hash");
@@ -588,6 +588,13 @@ static int lb_init(struct team *team)
                goto err_alloc_pcpu_stats;
        }
 
+       for_each_possible_cpu(i) {
+               struct lb_pcpu_stats *team_lb_stats;
+               team_lb_stats = per_cpu_ptr(lb_priv->pcpu_stats, i);
+               u64_stats_init(&team_lb_stats->syncp);
+       }
+
+
        INIT_DELAYED_WORK(&lb_priv->ex->stats.refresh_dw, lb_stats_refresh);
 
        err = team_options_register(team, lb_options, ARRAY_SIZE(lb_options));
index b24db7acbf1207e6e1d905111cf5a59108744743..2ec2041b62d4eb215bf23f74ad82ba44332b8d3d 100644 (file)
@@ -235,10 +235,18 @@ static int veth_change_mtu(struct net_device *dev, int new_mtu)
 
 static int veth_dev_init(struct net_device *dev)
 {
+       int i;
+
        dev->vstats = alloc_percpu(struct pcpu_vstats);
        if (!dev->vstats)
                return -ENOMEM;
 
+       for_each_possible_cpu(i) {
+               struct pcpu_vstats *veth_stats;
+               veth_stats = per_cpu_ptr(dev->vstats, i);
+               u64_stats_init(&veth_stats->syncp);
+       }
+
        return 0;
 }
 
index 01f4eb5c8b786aca792cd42b8e87568ced1413d1..bf7c734259ad640f235b3be752b8f9f38e59fd80 100644 (file)
@@ -1576,6 +1576,13 @@ static int virtnet_probe(struct virtio_device *vdev)
        if (vi->stats == NULL)
                goto free;
 
+       for_each_possible_cpu(i) {
+               struct virtnet_stats *virtnet_stats;
+               virtnet_stats = per_cpu_ptr(vi->stats, i);
+               u64_stats_init(&virtnet_stats->tx_syncp);
+               u64_stats_init(&virtnet_stats->rx_syncp);
+       }
+
        mutex_init(&vi->config_lock);
        vi->config_enable = true;
        INIT_WORK(&vi->config_work, virtnet_config_changed_work);
index 78df8f39e57cec7e9280811005b4a417b45ff35d..0358c07f7669142034e089660425a8fdb88236e2 100644 (file)
@@ -1880,11 +1880,19 @@ static int vxlan_init(struct net_device *dev)
        struct vxlan_dev *vxlan = netdev_priv(dev);
        struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
        struct vxlan_sock *vs;
+       int i;
 
        dev->tstats = alloc_percpu(struct pcpu_tstats);
        if (!dev->tstats)
                return -ENOMEM;
 
+       for_each_possible_cpu(i) {
+               struct pcpu_tstats *vxlan_stats;
+               vxlan_stats = per_cpu_ptr(dev->tstats, i);
+               u64_stats_init(&vxlan_stats->syncp);
+       }
+
+
        spin_lock(&vn->sock_lock);
        vs = vxlan_find_sock(dev_net(dev), vxlan->dst_port);
        if (vs) {
index dd1011e55cb598096ef7c2ad64981a2572b760d7..d85e66979711cbe62a168d9012f1a40ed2d5c405 100644 (file)
@@ -1340,6 +1340,12 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
        if (np->stats == NULL)
                goto exit;
 
+       for_each_possible_cpu(i) {
+               struct netfront_stats *xen_nf_stats;
+               xen_nf_stats = per_cpu_ptr(np->stats, i);
+               u64_stats_init(&xen_nf_stats->syncp);
+       }
+
        /* Initialise tx_skbs as a free chain containing every entry. */
        np->tx_skb_freelist = 0;
        for (i = 0; i < NET_TX_RING_SIZE; i++) {
index a9dd384c5e8039e5707789128160600b6e4ed3e6..0a38ef8d7f0088579089d101c99a0e12193f22f5 100644 (file)
@@ -2606,7 +2606,7 @@ static void __d_move(struct dentry * dentry, struct dentry * target)
        dentry_lock_for_move(dentry, target);
 
        write_seqcount_begin(&dentry->d_seq);
-       write_seqcount_begin(&target->d_seq);
+       write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
 
        /* __d_drop does write_seqcount_barrier, but they're OK to nest. */
 
@@ -2738,7 +2738,7 @@ static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
        dentry_lock_for_move(anon, dentry);
 
        write_seqcount_begin(&dentry->d_seq);
-       write_seqcount_begin(&anon->d_seq);
+       write_seqcount_begin_nested(&anon->d_seq, DENTRY_D_LOCK_NESTED);
 
        dparent = dentry->d_parent;
 
index d8ac61d0c9320ab194c9df90a1eaf3c858a55c0f..7dca743b2ce1c8796155a14c3d3a83023eed3de4 100644 (file)
@@ -161,6 +161,6 @@ EXPORT_SYMBOL(current_umask);
 struct fs_struct init_fs = {
        .users          = 1,
        .lock           = __SPIN_LOCK_UNLOCKED(init_fs.lock),
-       .seq            = SEQCNT_ZERO,
+       .seq            = SEQCNT_ZERO(init_fs.seq),
        .umask          = 0022,
 };
index cc1b01cf2035bced50bfc5d0015d1b6de7a7781b..3fe661fe96d13e534f2b1ece47a409b1609f055c 100644 (file)
@@ -110,10 +110,14 @@ static inline bool put_mems_allowed(unsigned int seq)
 
 static inline void set_mems_allowed(nodemask_t nodemask)
 {
+       unsigned long flags;
+
        task_lock(current);
+       local_irq_save(flags);
        write_seqcount_begin(&current->mems_allowed_seq);
        current->mems_allowed = nodemask;
        write_seqcount_end(&current->mems_allowed_seq);
+       local_irq_restore(flags);
        task_unlock(current);
 }
 
index 5cd0f09499271283795bb49a7b18b8ed3f1930cd..b0ed422e4e4a09f059eef7cb2018dee374801084 100644 (file)
@@ -32,10 +32,10 @@ extern struct fs_struct init_fs;
 #endif
 
 #ifdef CONFIG_CPUSETS
-#define INIT_CPUSET_SEQ                                                        \
-       .mems_allowed_seq = SEQCNT_ZERO,
+#define INIT_CPUSET_SEQ(tsk)                                                   \
+       .mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq),
 #else
-#define INIT_CPUSET_SEQ
+#define INIT_CPUSET_SEQ(tsk)
 #endif
 
 #define INIT_SIGNALS(sig) {                                            \
@@ -220,7 +220,7 @@ extern struct task_group root_task_group;
        INIT_FTRACE_GRAPH                                               \
        INIT_TRACE_RECURSION                                            \
        INIT_TASK_RCU_PREEMPT(tsk)                                      \
-       INIT_CPUSET_SEQ                                                 \
+       INIT_CPUSET_SEQ(tsk)                                            \
        INIT_VTIME(tsk)                                                 \
 }
 
index cfc2f119779ab6a202082b42ba58d0fb6290f676..92b1bfc5da6087850e43015ebdaf7d3de455f522 100644 (file)
@@ -497,6 +497,10 @@ static inline void print_irqtrace_events(struct task_struct *curr)
 #define rwlock_acquire_read(l, s, t, i)                lock_acquire_shared_recursive(l, s, t, NULL, i)
 #define rwlock_release(l, n, i)                        lock_release(l, n, i)
 
+#define seqcount_acquire(l, s, t, i)           lock_acquire_exclusive(l, s, t, NULL, i)
+#define seqcount_acquire_read(l, s, t, i)      lock_acquire_shared_recursive(l, s, t, NULL, i)
+#define seqcount_release(l, n, i)              lock_release(l, n, i)
+
 #define mutex_acquire(l, s, t, i)              lock_acquire_exclusive(l, s, t, NULL, i)
 #define mutex_acquire_nest(l, s, t, n, i)      lock_acquire_exclusive(l, s, t, n, i)
 #define mutex_release(l, n, i)                 lock_release(l, n, i)
@@ -504,11 +508,11 @@ static inline void print_irqtrace_events(struct task_struct *curr)
 #define rwsem_acquire(l, s, t, i)              lock_acquire_exclusive(l, s, t, NULL, i)
 #define rwsem_acquire_nest(l, s, t, n, i)      lock_acquire_exclusive(l, s, t, n, i)
 #define rwsem_acquire_read(l, s, t, i)         lock_acquire_shared(l, s, t, NULL, i)
-# define rwsem_release(l, n, i)                        lock_release(l, n, i)
+#define rwsem_release(l, n, i)                 lock_release(l, n, i)
 
 #define lock_map_acquire(l)                    lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
 #define lock_map_acquire_read(l)               lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
-# define lock_map_release(l)                   lock_release(l, 1, _THIS_IP_)
+#define lock_map_release(l)                    lock_release(l, 1, _THIS_IP_)
 
 #ifdef CONFIG_PROVE_LOCKING
 # define might_lock(lock)                                              \
index bab49da8a0f0b1bd2e01d516fdd0e39a07162326..d3181936c138ba2583960815a722aa8f90938a9d 100644 (file)
@@ -131,7 +131,7 @@ static inline int mutex_is_locked(struct mutex *lock)
 }
 
 /*
- * See kernel/mutex.c for detailed documentation of these APIs.
+ * See kernel/locking/mutex.c for detailed documentation of these APIs.
  * Also see Documentation/mutex-design.txt.
  */
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
index 10d16c4fbe89e45e157e173a98607d8e315c0f42..41467f8ff8ec8c7c5766021abe00e358f63e93cc 100644 (file)
@@ -2,8 +2,8 @@
 #define _SCHED_SYSCTL_H
 
 #ifdef CONFIG_DETECT_HUNG_TASK
+extern int          sysctl_hung_task_check_count;
 extern unsigned int  sysctl_hung_task_panic;
-extern unsigned long sysctl_hung_task_check_count;
 extern unsigned long sysctl_hung_task_timeout_secs;
 extern unsigned long sysctl_hung_task_warnings;
 extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
index 21a209336e794fcf70be28f5a27b7e5f31a734ee..1e8a8b6e837d8621fa5488f832273df673ff3520 100644 (file)
@@ -34,6 +34,7 @@
 
 #include <linux/spinlock.h>
 #include <linux/preempt.h>
+#include <linux/lockdep.h>
 #include <asm/processor.h>
 
 /*
  */
 typedef struct seqcount {
        unsigned sequence;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+       struct lockdep_map dep_map;
+#endif
 } seqcount_t;
 
-#define SEQCNT_ZERO { 0 }
-#define seqcount_init(x)       do { *(x) = (seqcount_t) SEQCNT_ZERO; } while (0)
+static inline void __seqcount_init(seqcount_t *s, const char *name,
+                                         struct lock_class_key *key)
+{
+       /*
+        * Make sure we are not reinitializing a held lock:
+        */
+       lockdep_init_map(&s->dep_map, name, key, 0);
+       s->sequence = 0;
+}
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define SEQCOUNT_DEP_MAP_INIT(lockname) \
+               .dep_map = { .name = #lockname } \
+
+# define seqcount_init(s)                              \
+       do {                                            \
+               static struct lock_class_key __key;     \
+               __seqcount_init((s), #s, &__key);       \
+       } while (0)
+
+static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
+{
+       seqcount_t *l = (seqcount_t *)s;
+       unsigned long flags;
+
+       local_irq_save(flags);
+       seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_);
+       seqcount_release(&l->dep_map, 1, _RET_IP_);
+       local_irq_restore(flags);
+}
+
+#else
+# define SEQCOUNT_DEP_MAP_INIT(lockname)
+# define seqcount_init(s) __seqcount_init(s, NULL, NULL)
+# define seqcount_lockdep_reader_access(x)
+#endif
+
+#define SEQCNT_ZERO(lockname) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(lockname)}
+
 
 /**
  * __read_seqcount_begin - begin a seq-read critical section (without barrier)
@@ -75,6 +116,22 @@ repeat:
        return ret;
 }
 
+/**
+ * read_seqcount_begin_no_lockdep - start seq-read critical section w/o lockdep
+ * @s: pointer to seqcount_t
+ * Returns: count to be passed to read_seqcount_retry
+ *
+ * read_seqcount_begin_no_lockdep opens a read critical section of the given
+ * seqcount, but without any lockdep checking. Validity of the critical
+ * section is tested by checking read_seqcount_retry function.
+ */
+static inline unsigned read_seqcount_begin_no_lockdep(const seqcount_t *s)
+{
+       unsigned ret = __read_seqcount_begin(s);
+       smp_rmb();
+       return ret;
+}
+
 /**
  * read_seqcount_begin - begin a seq-read critical section
  * @s: pointer to seqcount_t
@@ -86,9 +143,8 @@ repeat:
  */
 static inline unsigned read_seqcount_begin(const seqcount_t *s)
 {
-       unsigned ret = __read_seqcount_begin(s);
-       smp_rmb();
-       return ret;
+       seqcount_lockdep_reader_access(s);
+       return read_seqcount_begin_no_lockdep(s);
 }
 
 /**
@@ -108,6 +164,8 @@ static inline unsigned read_seqcount_begin(const seqcount_t *s)
 static inline unsigned raw_seqcount_begin(const seqcount_t *s)
 {
        unsigned ret = ACCESS_ONCE(s->sequence);
+
+       seqcount_lockdep_reader_access(s);
        smp_rmb();
        return ret & ~1;
 }
@@ -152,14 +210,21 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
  * Sequence counter only version assumes that callers are using their
  * own mutexing.
  */
-static inline void write_seqcount_begin(seqcount_t *s)
+static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
 {
        s->sequence++;
        smp_wmb();
+       seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
+}
+
+static inline void write_seqcount_begin(seqcount_t *s)
+{
+       write_seqcount_begin_nested(s, 0);
 }
 
 static inline void write_seqcount_end(seqcount_t *s)
 {
+       seqcount_release(&s->dep_map, 1, _RET_IP_);
        smp_wmb();
        s->sequence++;
 }
@@ -188,7 +253,7 @@ typedef struct {
  */
 #define __SEQLOCK_UNLOCKED(lockname)                   \
        {                                               \
-               .seqcount = SEQCNT_ZERO,                \
+               .seqcount = SEQCNT_ZERO(lockname),      \
                .lock = __SPIN_LOCK_UNLOCKED(lockname)  \
        }
 
index 8da8c4e87da3018323177000d46a5e42b19bf23f..7bfabd20204c150afa40a7249932519c0e770267 100644 (file)
@@ -67,6 +67,13 @@ struct u64_stats_sync {
 #endif
 };
 
+
+#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
+# define u64_stats_init(syncp) seqcount_init(syncp.seq)
+#else
+# define u64_stats_init(syncp) do { } while (0)
+#endif
+
 static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
 {
 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
index 613381bcde40a8c68a0917cd19d36d6a603c9e2a..04c308413a5dd3b2295b33c8c78bf56a11fa6b20 100644 (file)
@@ -424,6 +424,25 @@ TRACE_EVENT(sched_pi_setprio,
                        __entry->oldprio, __entry->newprio)
 );
 
+#ifdef CONFIG_DETECT_HUNG_TASK
+TRACE_EVENT(sched_process_hang,
+       TP_PROTO(struct task_struct *tsk),
+       TP_ARGS(tsk),
+
+       TP_STRUCT__entry(
+               __array( char,  comm,   TASK_COMM_LEN   )
+               __field( pid_t, pid                     )
+       ),
+
+       TP_fast_assign(
+               memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
+               __entry->pid = tsk->pid;
+       ),
+
+       TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
+);
+#endif /* CONFIG_DETECT_HUNG_TASK */
+
 #endif /* _TRACE_SCHED_H */
 
 /* This part must be outside protection */
index a4d1aa8da9bc7180ad279a709e4d22cc5627543d..09a9c94f42bde841a58b875ca7fc75c1b69f65c6 100644 (file)
@@ -7,22 +7,19 @@ obj-y     = fork.o exec_domain.o panic.o \
            sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \
            signal.o sys.o kmod.o workqueue.o pid.o task_work.o \
            extable.o params.o posix-timers.o \
-           kthread.o sys_ni.o posix-cpu-timers.o mutex.o \
-           hrtimer.o rwsem.o nsproxy.o semaphore.o \
+           kthread.o sys_ni.o posix-cpu-timers.o \
+           hrtimer.o nsproxy.o \
            notifier.o ksysfs.o cred.o reboot.o \
-           async.o range.o groups.o lglock.o smpboot.o
+           async.o range.o groups.o smpboot.o
 
 ifdef CONFIG_FUNCTION_TRACER
 # Do not trace debug files and internal ftrace files
-CFLAGS_REMOVE_lockdep.o = -pg
-CFLAGS_REMOVE_lockdep_proc.o = -pg
-CFLAGS_REMOVE_mutex-debug.o = -pg
-CFLAGS_REMOVE_rtmutex-debug.o = -pg
 CFLAGS_REMOVE_cgroup-debug.o = -pg
 CFLAGS_REMOVE_irq_work.o = -pg
 endif
 
 obj-y += sched/
+obj-y += locking/
 obj-y += power/
 obj-y += printk/
 obj-y += cpu/
@@ -34,26 +31,15 @@ obj-$(CONFIG_FREEZER) += freezer.o
 obj-$(CONFIG_PROFILING) += profile.o
 obj-$(CONFIG_STACKTRACE) += stacktrace.o
 obj-y += time/
-obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
-obj-$(CONFIG_LOCKDEP) += lockdep.o
-ifeq ($(CONFIG_PROC_FS),y)
-obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
-endif
 obj-$(CONFIG_FUTEX) += futex.o
 ifeq ($(CONFIG_COMPAT),y)
 obj-$(CONFIG_FUTEX) += futex_compat.o
 endif
-obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
-obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
-obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
 obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
 obj-$(CONFIG_SMP) += smp.o
 ifneq ($(CONFIG_SMP),y)
 obj-y += up.o
 endif
-obj-$(CONFIG_SMP) += spinlock.o
-obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
-obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
 obj-$(CONFIG_UID16) += uid16.o
 obj-$(CONFIG_MODULES) += module.o
 obj-$(CONFIG_MODULE_SIG) += module_signing.o modsign_pubkey.o modsign_certificate.o
index c3a1a55a52141851630b91f2bff2aa789b508ace..80ba086f021d3022afcf3f06ecdb1d920cdc8f7c 100644 (file)
@@ -66,7 +66,7 @@
 
 #include <asm/futex.h>
 
-#include "rtmutex_common.h"
+#include "locking/rtmutex_common.h"
 
 int __read_mostly futex_cmpxchg_enabled;
 
index 3e97fb126e6b9255a852887923f39230cd52c513..8807061ca004cc0a532460244e01a56391eb0469 100644 (file)
 #include <linux/export.h>
 #include <linux/sysctl.h>
 #include <linux/utsname.h>
+#include <trace/events/sched.h>
 
 /*
  * The number of tasks checked:
  */
-unsigned long __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT;
+int __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT;
 
 /*
  * Limit number of tasks checked in a batch.
@@ -92,6 +93,9 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
                t->last_switch_count = switch_count;
                return;
        }
+
+       trace_sched_process_hang(t);
+
        if (!sysctl_hung_task_warnings)
                return;
        sysctl_hung_task_warnings--;
diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile
new file mode 100644 (file)
index 0000000..baab8e5
--- /dev/null
@@ -0,0 +1,25 @@
+
+obj-y += mutex.o semaphore.o rwsem.o lglock.o
+
+ifdef CONFIG_FUNCTION_TRACER
+CFLAGS_REMOVE_lockdep.o = -pg
+CFLAGS_REMOVE_lockdep_proc.o = -pg
+CFLAGS_REMOVE_mutex-debug.o = -pg
+CFLAGS_REMOVE_rtmutex-debug.o = -pg
+endif
+
+obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
+obj-$(CONFIG_LOCKDEP) += lockdep.o
+ifeq ($(CONFIG_PROC_FS),y)
+obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
+endif
+obj-$(CONFIG_SMP) += spinlock.o
+obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
+obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
+obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
+obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
+obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
+obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
+obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
+obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o
+obj-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o
similarity index 100%
rename from kernel/lglock.c
rename to kernel/locking/lglock.c
similarity index 99%
rename from kernel/lockdep.c
rename to kernel/locking/lockdep.c
index 4e8e14c34e428d6a75580ec862c4975b0425dfc0..576ba756a32d9c80948c72e31700738bcfc5ee06 100644 (file)
@@ -1232,7 +1232,7 @@ static int noop_count(struct lock_list *entry, void *data)
        return 0;
 }
 
-unsigned long __lockdep_count_forward_deps(struct lock_list *this)
+static unsigned long __lockdep_count_forward_deps(struct lock_list *this)
 {
        unsigned long  count = 0;
        struct lock_list *uninitialized_var(target_entry);
@@ -1258,7 +1258,7 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class)
        return ret;
 }
 
-unsigned long __lockdep_count_backward_deps(struct lock_list *this)
+static unsigned long __lockdep_count_backward_deps(struct lock_list *this)
 {
        unsigned long  count = 0;
        struct lock_list *uninitialized_var(target_entry);
similarity index 97%
rename from kernel/lockdep_proc.c
rename to kernel/locking/lockdep_proc.c
index b2c71c5873e441ae1d9a89d8caa7517eb5bcf976..ef43ac4bafb59b83ab979a680d49d6077749f955 100644 (file)
@@ -421,6 +421,7 @@ static void seq_lock_time(struct seq_file *m, struct lock_time *lt)
        seq_time(m, lt->min);
        seq_time(m, lt->max);
        seq_time(m, lt->total);
+       seq_time(m, lt->nr ? div_s64(lt->total, lt->nr) : 0);
 }
 
 static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
@@ -518,20 +519,20 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
        }
        if (i) {
                seq_puts(m, "\n");
-               seq_line(m, '.', 0, 40 + 1 + 10 * (14 + 1));
+               seq_line(m, '.', 0, 40 + 1 + 12 * (14 + 1));
                seq_puts(m, "\n");
        }
 }
 
 static void seq_header(struct seq_file *m)
 {
-       seq_printf(m, "lock_stat version 0.3\n");
+       seq_puts(m, "lock_stat version 0.4\n");
 
        if (unlikely(!debug_locks))
                seq_printf(m, "*WARNING* lock debugging disabled!! - possibly due to a lockdep warning\n");
 
-       seq_line(m, '-', 0, 40 + 1 + 10 * (14 + 1));
-       seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s "
+       seq_line(m, '-', 0, 40 + 1 + 12 * (14 + 1));
+       seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s %14s %14s "
                        "%14s %14s\n",
                        "class name",
                        "con-bounces",
@@ -539,12 +540,14 @@ static void seq_header(struct seq_file *m)
                        "waittime-min",
                        "waittime-max",
                        "waittime-total",
+                       "waittime-avg",
                        "acq-bounces",
                        "acquisitions",
                        "holdtime-min",
                        "holdtime-max",
-                       "holdtime-total");
-       seq_line(m, '-', 0, 40 + 1 + 10 * (14 + 1));
+                       "holdtime-total",
+                       "holdtime-avg");
+       seq_line(m, '-', 0, 40 + 1 + 12 * (14 + 1));
        seq_printf(m, "\n");
 }
 
similarity index 99%
rename from kernel/mutex.c
rename to kernel/locking/mutex.c
index d24105b1b794e635e93509c04e0ac6edb85cc7f5..4dd6e4c219de9316593b61daae8e17cf8dc5d874 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * kernel/mutex.c
+ * kernel/locking/mutex.c
  *
  * Mutexes: blocking mutual exclusion locks
  *
similarity index 100%
rename from kernel/mutex.h
rename to kernel/locking/mutex.h
similarity index 100%
rename from kernel/rtmutex.c
rename to kernel/locking/rtmutex.c
similarity index 100%
rename from kernel/rtmutex.h
rename to kernel/locking/rtmutex.h
similarity index 100%
rename from lib/rwsem.c
rename to kernel/locking/rwsem-xadd.c
similarity index 100%
rename from kernel/rwsem.c
rename to kernel/locking/rwsem.c
similarity index 100%
rename from kernel/spinlock.c
rename to kernel/locking/spinlock.c
index 3822ac0c4b2732dfd39982b74138a1960fa47a21..6abb03dff5c053f44ef5dbc28f7bf754e5669e78 100644 (file)
@@ -1133,7 +1133,7 @@ void exit_rcu(void)
 
 #ifdef CONFIG_RCU_BOOST
 
-#include "../rtmutex_common.h"
+#include "../locking/rtmutex_common.h"
 
 #ifdef CONFIG_RCU_TRACE
 
index d37d9dd8f4635069bf65bd2a10b00e1595ae7b64..34a604726d0b7c87b4b112a3f4ab9e1a6902ffcb 100644 (file)
@@ -969,9 +969,10 @@ static struct ctl_table kern_table[] = {
        {
                .procname       = "hung_task_check_count",
                .data           = &sysctl_hung_task_check_count,
-               .maxlen         = sizeof(unsigned long),
+               .maxlen         = sizeof(int),
                .mode           = 0644,
-               .proc_handler   = proc_doulongvec_minmax,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &zero,
        },
        {
                .procname       = "hung_task_timeout_secs",
index bb016e116ba43286ed28ca7bf2667c081d752e81..d480a8c9238562b144e59217cdf4ecc05b574556 100644 (file)
@@ -42,10 +42,6 @@ obj-$(CONFIG_GENERIC_PCI_IOMAP) += pci_iomap.o
 obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
 obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o
 obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
-obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
-lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
-lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
-lib-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o
 
 CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
 obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
index 28fe26b64f8a746119d8111ad0f3beb9ef4751c2..d8d9fe3f685c00c7e7f1430ca924850d89713b07 100644 (file)
@@ -26,7 +26,7 @@
  * of ZERO_PAGE(), such as /dev/zero
  */
 static DEFINE_MUTEX(xip_sparse_mutex);
-static seqcount_t xip_sparse_seq = SEQCNT_ZERO;
+static seqcount_t xip_sparse_seq = SEQCNT_ZERO(xip_sparse_seq);
 static struct page *__xip_sparse_page;
 
 /* called under xip_sparse_mutex */
index 8db1b985dbf14faffe2ee246de8bf0de17d8610c..762896ebfcf505348a659c83d8720b48e8b58c86 100644 (file)
@@ -539,7 +539,7 @@ static const struct net_device_ops vlan_netdev_ops;
 static int vlan_dev_init(struct net_device *dev)
 {
        struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
-       int subclass = 0;
+       int subclass = 0, i;
 
        netif_carrier_off(dev);
 
@@ -593,6 +593,13 @@ static int vlan_dev_init(struct net_device *dev)
        if (!vlan_dev_priv(dev)->vlan_pcpu_stats)
                return -ENOMEM;
 
+       for_each_possible_cpu(i) {
+               struct vlan_pcpu_stats *vlan_stat;
+               vlan_stat = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i);
+               u64_stats_init(&vlan_stat->syncp);
+       }
+
+
        return 0;
 }
 
index e6b7fecb3af185e452e99806d5c7962b5fad8f26..f00cfd2a0143e0e8868ca808cb6a236bb1124e3c 100644 (file)
@@ -88,11 +88,18 @@ out:
 static int br_dev_init(struct net_device *dev)
 {
        struct net_bridge *br = netdev_priv(dev);
+       int i;
 
        br->stats = alloc_percpu(struct br_cpu_netstats);
        if (!br->stats)
                return -ENOMEM;
 
+       for_each_possible_cpu(i) {
+               struct br_cpu_netstats *br_dev_stats;
+               br_dev_stats = per_cpu_ptr(br->stats, i);
+               u64_stats_init(&br_dev_stats->syncp);
+       }
+
        return 0;
 }
 
index 68af9aac91d04543bdbbbd55997ae901491543fd..70011e029ac13718d72d7172776ac8cb76798ced 100644 (file)
@@ -1503,6 +1503,7 @@ int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align)
        ptr[0] = __alloc_percpu(mibsize, align);
        if (!ptr[0])
                return -ENOMEM;
+
 #if SNMP_ARRAY_SZ == 2
        ptr[1] = __alloc_percpu(mibsize, align);
        if (!ptr[1]) {
@@ -1547,6 +1548,8 @@ static const struct net_protocol icmp_protocol = {
 
 static __net_init int ipv4_mib_init_net(struct net *net)
 {
+       int i;
+
        if (snmp_mib_init((void __percpu **)net->mib.tcp_statistics,
                          sizeof(struct tcp_mib),
                          __alignof__(struct tcp_mib)) < 0)
@@ -1555,6 +1558,17 @@ static __net_init int ipv4_mib_init_net(struct net *net)
                          sizeof(struct ipstats_mib),
                          __alignof__(struct ipstats_mib)) < 0)
                goto err_ip_mib;
+
+       for_each_possible_cpu(i) {
+               struct ipstats_mib *af_inet_stats;
+               af_inet_stats = per_cpu_ptr(net->mib.ip_statistics[0], i);
+               u64_stats_init(&af_inet_stats->syncp);
+#if SNMP_ARRAY_SZ == 2
+               af_inet_stats = per_cpu_ptr(net->mib.ip_statistics[1], i);
+               u64_stats_init(&af_inet_stats->syncp);
+#endif
+       }
+
        if (snmp_mib_init((void __percpu **)net->mib.net_statistics,
                          sizeof(struct linux_mib),
                          __alignof__(struct linux_mib)) < 0)
index 63a6d6d6b87581d3ac3bda52cab5833f3cb169ab..caf01176a5e49774555b9c4dea5809e4f57cce19 100644 (file)
@@ -976,13 +976,19 @@ int ip_tunnel_init(struct net_device *dev)
 {
        struct ip_tunnel *tunnel = netdev_priv(dev);
        struct iphdr *iph = &tunnel->parms.iph;
-       int err;
+       int i, err;
 
        dev->destructor = ip_tunnel_dev_free;
        dev->tstats = alloc_percpu(struct pcpu_tstats);
        if (!dev->tstats)
                return -ENOMEM;
 
+       for_each_possible_cpu(i) {
+               struct pcpu_tstats *ipt_stats;
+               ipt_stats = per_cpu_ptr(dev->tstats, i);
+               u64_stats_init(&ipt_stats->syncp);
+       }
+
        err = gro_cells_init(&tunnel->gro_cells, dev);
        if (err) {
                free_percpu(dev->tstats);
index 542d09561ed6df588c87946e50ba458ea20c9cf1..5658d9d51637beaa07790b49e4d405cc290afe16 100644 (file)
@@ -271,10 +271,24 @@ static void addrconf_mod_dad_timer(struct inet6_ifaddr *ifp,
 
 static int snmp6_alloc_dev(struct inet6_dev *idev)
 {
+       int i;
+
        if (snmp_mib_init((void __percpu **)idev->stats.ipv6,
                          sizeof(struct ipstats_mib),
                          __alignof__(struct ipstats_mib)) < 0)
                goto err_ip;
+
+       for_each_possible_cpu(i) {
+               struct ipstats_mib *addrconf_stats;
+               addrconf_stats = per_cpu_ptr(idev->stats.ipv6[0], i);
+               u64_stats_init(&addrconf_stats->syncp);
+#if SNMP_ARRAY_SZ == 2
+               addrconf_stats = per_cpu_ptr(idev->stats.ipv6[1], i);
+               u64_stats_init(&addrconf_stats->syncp);
+#endif
+       }
+
+
        idev->stats.icmpv6dev = kzalloc(sizeof(struct icmpv6_mib_device),
                                        GFP_KERNEL);
        if (!idev->stats.icmpv6dev)
index 6468bda1f2b94c382fe7212135ed1245f71e221f..ff75313f27a848db69dc0af6cf1a367205e13d2d 100644 (file)
@@ -714,6 +714,8 @@ static void ipv6_packet_cleanup(void)
 
 static int __net_init ipv6_init_mibs(struct net *net)
 {
+       int i;
+
        if (snmp_mib_init((void __percpu **)net->mib.udp_stats_in6,
                          sizeof(struct udp_mib),
                          __alignof__(struct udp_mib)) < 0)
@@ -726,6 +728,18 @@ static int __net_init ipv6_init_mibs(struct net *net)
                          sizeof(struct ipstats_mib),
                          __alignof__(struct ipstats_mib)) < 0)
                goto err_ip_mib;
+
+       for_each_possible_cpu(i) {
+               struct ipstats_mib *af_inet6_stats;
+               af_inet6_stats = per_cpu_ptr(net->mib.ipv6_statistics[0], i);
+               u64_stats_init(&af_inet6_stats->syncp);
+#if SNMP_ARRAY_SZ == 2
+               af_inet6_stats = per_cpu_ptr(net->mib.ipv6_statistics[1], i);
+               u64_stats_init(&af_inet6_stats->syncp);
+#endif
+       }
+
+
        if (snmp_mib_init((void __percpu **)net->mib.icmpv6_statistics,
                          sizeof(struct icmpv6_mib),
                          __alignof__(struct icmpv6_mib)) < 0)
index bf4a9a084de5aa8f733318276d6e84cc37d5e249..8acb28621f9cfd57071eeff1a39acd0de1ff6c02 100644 (file)
@@ -1252,6 +1252,7 @@ static void ip6gre_tunnel_setup(struct net_device *dev)
 static int ip6gre_tunnel_init(struct net_device *dev)
 {
        struct ip6_tnl *tunnel;
+       int i;
 
        tunnel = netdev_priv(dev);
 
@@ -1269,6 +1270,13 @@ static int ip6gre_tunnel_init(struct net_device *dev)
        if (!dev->tstats)
                return -ENOMEM;
 
+       for_each_possible_cpu(i) {
+               struct pcpu_tstats *ip6gre_tunnel_stats;
+               ip6gre_tunnel_stats = per_cpu_ptr(dev->tstats, i);
+               u64_stats_init(&ip6gre_tunnel_stats->syncp);
+       }
+
+
        return 0;
 }
 
@@ -1449,6 +1457,7 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
 static int ip6gre_tap_init(struct net_device *dev)
 {
        struct ip6_tnl *tunnel;
+       int i;
 
        tunnel = netdev_priv(dev);
 
@@ -1462,6 +1471,12 @@ static int ip6gre_tap_init(struct net_device *dev)
        if (!dev->tstats)
                return -ENOMEM;
 
+       for_each_possible_cpu(i) {
+               struct pcpu_tstats *ip6gre_tap_stats;
+               ip6gre_tap_stats = per_cpu_ptr(dev->tstats, i);
+               u64_stats_init(&ip6gre_tap_stats->syncp);
+       }
+
        return 0;
 }
 
index 5e31a909a2b0c215df8266a76f28d2d0218e9790..59df872e2f4d62f68b583ff40218d68c5d5189df 100644 (file)
@@ -910,7 +910,7 @@ static int ip6_dst_lookup_tail(struct sock *sk,
 
 out_err_release:
        if (err == -ENETUNREACH)
-               IP6_INC_STATS_BH(net, NULL, IPSTATS_MIB_OUTNOROUTES);
+               IP6_INC_STATS(net, NULL, IPSTATS_MIB_OUTNOROUTES);
        dst_release(*dst);
        *dst = NULL;
        return err;
index 583b77e2f69be1d1499da479e2f8da1435d22a97..df1fa58528c6fdeae476193f3bf7ae53d2e38a7e 100644 (file)
@@ -1494,12 +1494,19 @@ static inline int
 ip6_tnl_dev_init_gen(struct net_device *dev)
 {
        struct ip6_tnl *t = netdev_priv(dev);
+       int i;
 
        t->dev = dev;
        t->net = dev_net(dev);
        dev->tstats = alloc_percpu(struct pcpu_tstats);
        if (!dev->tstats)
                return -ENOMEM;
+
+       for_each_possible_cpu(i) {
+               struct pcpu_tstats *ip6_tnl_stats;
+               ip6_tnl_stats = per_cpu_ptr(dev->tstats, i);
+               u64_stats_init(&ip6_tnl_stats->syncp);
+       }
        return 0;
 }
 
index 3a9038dd818d7588c950c4bca2ff56279683314c..bfc6fcea38410e3a5817ad7ba254a98151bb0749 100644 (file)
@@ -1320,6 +1320,7 @@ static void ipip6_tunnel_setup(struct net_device *dev)
 static int ipip6_tunnel_init(struct net_device *dev)
 {
        struct ip_tunnel *tunnel = netdev_priv(dev);
+       int i;
 
        tunnel->dev = dev;
        tunnel->net = dev_net(dev);
@@ -1332,6 +1333,12 @@ static int ipip6_tunnel_init(struct net_device *dev)
        if (!dev->tstats)
                return -ENOMEM;
 
+       for_each_possible_cpu(i) {
+               struct pcpu_tstats *ipip6_tunnel_stats;
+               ipip6_tunnel_stats = per_cpu_ptr(dev->tstats, i);
+               u64_stats_init(&ipip6_tunnel_stats->syncp);
+       }
+
        return 0;
 }
 
@@ -1341,6 +1348,7 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
        struct iphdr *iph = &tunnel->parms.iph;
        struct net *net = dev_net(dev);
        struct sit_net *sitn = net_generic(net, sit_net_id);
+       int i;
 
        tunnel->dev = dev;
        tunnel->net = dev_net(dev);
@@ -1354,6 +1362,13 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
        dev->tstats = alloc_percpu(struct pcpu_tstats);
        if (!dev->tstats)
                return -ENOMEM;
+
+       for_each_possible_cpu(i) {
+               struct pcpu_tstats *ipip6_fb_stats;
+               ipip6_fb_stats = per_cpu_ptr(dev->tstats, i);
+               u64_stats_init(&ipip6_fb_stats->syncp);
+       }
+
        dev_hold(dev);
        rcu_assign_pointer(sitn->tunnels_wc[0], tunnel);
        return 0;
index 62786a495cea481a3fd18d063b7174cdb36052ad..1ded5c6d268c662af2e4743fedfb8226b5715cc3 100644 (file)
@@ -842,7 +842,7 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
               struct ip_vs_dest **dest_p)
 {
        struct ip_vs_dest *dest;
-       unsigned int atype;
+       unsigned int atype, i;
 
        EnterFunction(2);
 
@@ -869,6 +869,12 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
        if (!dest->stats.cpustats)
                goto err_alloc;
 
+       for_each_possible_cpu(i) {
+               struct ip_vs_cpu_stats *ip_vs_dest_stats;
+               ip_vs_dest_stats = per_cpu_ptr(dest->stats.cpustats, i);
+               u64_stats_init(&ip_vs_dest_stats->syncp);
+       }
+
        dest->af = svc->af;
        dest->protocol = svc->protocol;
        dest->vaddr = svc->addr;
@@ -1134,7 +1140,7 @@ static int
 ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
                  struct ip_vs_service **svc_p)
 {
-       int ret = 0;
+       int ret = 0, i;
        struct ip_vs_scheduler *sched = NULL;
        struct ip_vs_pe *pe = NULL;
        struct ip_vs_service *svc = NULL;
@@ -1184,6 +1190,13 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
                goto out_err;
        }
 
+       for_each_possible_cpu(i) {
+               struct ip_vs_cpu_stats *ip_vs_stats;
+               ip_vs_stats = per_cpu_ptr(svc->stats.cpustats, i);
+               u64_stats_init(&ip_vs_stats->syncp);
+       }
+
+
        /* I'm the first user of the service */
        atomic_set(&svc->refcnt, 0);
 
@@ -3780,7 +3793,7 @@ static struct notifier_block ip_vs_dst_notifier = {
 
 int __net_init ip_vs_control_net_init(struct net *net)
 {
-       int idx;
+       int i, idx;
        struct netns_ipvs *ipvs = net_ipvs(net);
 
        /* Initialize rs_table */
@@ -3799,6 +3812,12 @@ int __net_init ip_vs_control_net_init(struct net *net)
        if (!ipvs->tot_stats.cpustats)
                return -ENOMEM;
 
+       for_each_possible_cpu(i) {
+               struct ip_vs_cpu_stats *ipvs_tot_stats;
+               ipvs_tot_stats = per_cpu_ptr(ipvs->tot_stats.cpustats, i);
+               u64_stats_init(&ipvs_tot_stats->syncp);
+       }
+
        spin_lock_init(&ipvs->tot_stats.lock);
 
        proc_create("ip_vs", 0, net->proc_net, &ip_vs_info_fops);
index 1408adc2a2a7d2c47f4e32f71863bbd3a04cfdf2..449e0776a2c0887bea75fbc963224a37d20ccb33 100644 (file)
@@ -1199,6 +1199,12 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
                goto err_destroy_table;
        }
 
+       for_each_possible_cpu(i) {
+               struct dp_stats_percpu *dpath_stats;
+               dpath_stats = per_cpu_ptr(dp->stats_percpu, i);
+               u64_stats_init(&dpath_stats->sync);
+       }
+
        dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
                            GFP_KERNEL);
        if (!dp->ports) {
index 6f65dbe13812b15208e71745f4744db76e22c532..d830a95f03a4ba65ba755a3ff8dbf638700c9c68 100644 (file)
@@ -118,6 +118,7 @@ struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
 {
        struct vport *vport;
        size_t alloc_size;
+       int i;
 
        alloc_size = sizeof(struct vport);
        if (priv_size) {
@@ -141,6 +142,13 @@ struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
                return ERR_PTR(-ENOMEM);
        }
 
+       for_each_possible_cpu(i) {
+               struct pcpu_tstats *vport_stats;
+               vport_stats = per_cpu_ptr(vport->percpu_stats, i);
+               u64_stats_init(&vport_stats->syncp);
+       }
+
+
        spin_lock_init(&vport->stats_lock);
 
        return vport;