]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'akpm' (patches from Andrew)
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 12 Oct 2016 00:34:10 +0000 (17:34 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 12 Oct 2016 00:34:10 +0000 (17:34 -0700)
Merge more updates from Andrew Morton:

 - a few block updates that fell in my lap

 - lib/ updates

 - checkpatch

 - autofs

 - ipc

 - a ton of misc other things

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (100 commits)
  mm: split gfp_mask and mapping flags into separate fields
  fs: use mapping_set_error instead of opencoded set_bit
  treewide: remove redundant #include <linux/kconfig.h>
  hung_task: allow hung_task_panic when hung_task_warnings is 0
  kthread: add kerneldoc for kthread_create()
  kthread: better support freezable kthread workers
  kthread: allow to modify delayed kthread work
  kthread: allow to cancel kthread work
  kthread: initial support for delayed kthread work
  kthread: detect when a kthread work is used by more workers
  kthread: add kthread_destroy_worker()
  kthread: add kthread_create_worker*()
  kthread: allow to call __kthread_create_on_node() with va_list args
  kthread/smpboot: do not park in kthread_create_on_cpu()
  kthread: kthread worker API cleanup
  kthread: rename probe_kthread_data() to kthread_probe_data()
  scripts/tags.sh: enable code completion in VIM
  mm: kmemleak: avoid using __va() on addresses that don't have a lowmem mapping
  kdump, vmcoreinfo: report memory sections virtual addresses
  ipc/sem.c: add cond_resched in exit_sme
  ...

200 files changed:
Documentation/DMA-attributes.txt
Documentation/RCU/lockdep-splat.txt
Documentation/dev-tools/kmemleak.rst
Documentation/filesystems/autofs4-mount-control.txt
Documentation/filesystems/autofs4.txt
Documentation/kernel-parameters.txt
arch/arm/include/asm/trusted_foundations.h
arch/arm/kernel/process.c
arch/arm64/include/asm/alternative.h
arch/arm64/kernel/process.c
arch/mips/cavium-octeon/setup.c
arch/mips/include/asm/kexec.h
arch/mips/include/asm/mach-loongson64/loongson.h
arch/mips/kernel/crash.c
arch/mips/kernel/machine_kexec.c
arch/mips/math-emu/cp1emu.c
arch/mips/net/bpf_jit.c
arch/powerpc/kernel/iommu.c
arch/tile/mm/mmap.c
arch/unicore32/kernel/process.c
arch/x86/include/asm/kexec.h
arch/x86/include/asm/smp.h
arch/x86/kernel/crash.c
arch/x86/kernel/machine_kexec_64.c
arch/x86/kernel/process.c
arch/x86/kernel/smp.c
arch/x86/kernel/sys_x86_64.c
arch/x86/kvm/i8254.c
block/blk-lib.c
block/ioctl.c
crypto/crypto_engine.c
drivers/block/loop.c
drivers/char/random.c
drivers/char/virtio_console.c
drivers/infiniband/sw/rdmavt/cq.c
drivers/input/rmi4/rmi_bus.c
drivers/input/rmi4/rmi_driver.c
drivers/input/rmi4/rmi_f01.c
drivers/input/rmi4/rmi_f11.c
drivers/irqchip/irq-bcm6345-l1.c
drivers/irqchip/irq-bcm7038-l1.c
drivers/irqchip/irq-bcm7120-l2.c
drivers/irqchip/irq-brcmstb-l2.c
drivers/md/dm-rq.c
drivers/md/dm.c
drivers/media/dvb-frontends/af9013.h
drivers/media/dvb-frontends/af9033.h
drivers/media/dvb-frontends/ascot2e.h
drivers/media/dvb-frontends/atbm8830.h
drivers/media/dvb-frontends/au8522.h
drivers/media/dvb-frontends/cx22702.h
drivers/media/dvb-frontends/cx24113.h
drivers/media/dvb-frontends/cx24116.h
drivers/media/dvb-frontends/cx24117.h
drivers/media/dvb-frontends/cx24120.h
drivers/media/dvb-frontends/cx24123.h
drivers/media/dvb-frontends/cxd2820r.h
drivers/media/dvb-frontends/cxd2841er.h
drivers/media/dvb-frontends/dib3000mc.h
drivers/media/dvb-frontends/dib7000m.h
drivers/media/dvb-frontends/dib7000p.h
drivers/media/dvb-frontends/drxd.h
drivers/media/dvb-frontends/drxk.h
drivers/media/dvb-frontends/ds3000.h
drivers/media/dvb-frontends/dvb_dummy_fe.h
drivers/media/dvb-frontends/ec100.h
drivers/media/dvb-frontends/hd29l2.h
drivers/media/dvb-frontends/helene.h
drivers/media/dvb-frontends/horus3a.h
drivers/media/dvb-frontends/ix2505v.h
drivers/media/dvb-frontends/lg2160.h
drivers/media/dvb-frontends/lgdt3305.h
drivers/media/dvb-frontends/lgs8gl5.h
drivers/media/dvb-frontends/lgs8gxx.h
drivers/media/dvb-frontends/lnbh24.h
drivers/media/dvb-frontends/lnbh25.h
drivers/media/dvb-frontends/lnbp21.h
drivers/media/dvb-frontends/lnbp22.h
drivers/media/dvb-frontends/m88rs2000.h
drivers/media/dvb-frontends/mb86a20s.h
drivers/media/dvb-frontends/s5h1409.h
drivers/media/dvb-frontends/s5h1411.h
drivers/media/dvb-frontends/s5h1432.h
drivers/media/dvb-frontends/s921.h
drivers/media/dvb-frontends/si21xx.h
drivers/media/dvb-frontends/sp2.h
drivers/media/dvb-frontends/stb6000.h
drivers/media/dvb-frontends/stv0288.h
drivers/media/dvb-frontends/stv0367.h
drivers/media/dvb-frontends/stv0900.h
drivers/media/dvb-frontends/stv6110.h
drivers/media/dvb-frontends/tda10048.h
drivers/media/dvb-frontends/tda18271c2dd.h
drivers/media/dvb-frontends/ts2020.h
drivers/media/dvb-frontends/zl10036.h
drivers/media/dvb-frontends/zl10039.h
drivers/media/pci/cx23885/altera-ci.h
drivers/media/pci/ivtv/ivtv-driver.c
drivers/media/pci/ivtv/ivtv-irq.c
drivers/media/tuners/fc0011.h
drivers/media/tuners/fc0012.h
drivers/media/tuners/fc0013.h
drivers/media/tuners/max2165.h
drivers/media/tuners/mc44s803.h
drivers/media/tuners/mxl5005s.h
drivers/media/tuners/r820t.h
drivers/media/tuners/si2157.h
drivers/media/tuners/tda18212.h
drivers/media/tuners/tda18218.h
drivers/media/tuners/xc5000.h
drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h
drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
drivers/media/usb/dvb-usb/dibusb-common.c
drivers/media/usb/hdpvr/hdpvr-video.c
drivers/mtd/mtdcore.c
drivers/mtd/mtdpart.c
drivers/net/dsa/b53/b53_mmap.c
drivers/net/ethernet/microchip/encx24j600.c
drivers/net/ethernet/sun/ldmvsw.c
drivers/net/ethernet/wiznet/w5100.c
drivers/net/ethernet/wiznet/w5300.c
drivers/nvme/host/pci.c
drivers/pps/Kconfig
drivers/rapidio/rio_cm.c
drivers/spi/spi.c
drivers/staging/lustre/lustre/llite/vvp_page.c
drivers/tty/serial/sc16is7xx.c
drivers/usb/early/ehci-dbgp.c
drivers/usb/gadget/udc/bcm63xx_udc.c
drivers/usb/host/pci-quirks.c
fs/afs/write.c
fs/autofs4/autofs_i.h
fs/autofs4/dev-ioctl.c
fs/autofs4/inode.c
fs/autofs4/root.c
fs/block_dev.c
fs/buffer.c
fs/exofs/inode.c
fs/ext4/page-io.c
fs/f2fs/data.c
fs/jbd2/commit.c
fs/lockd/procfs.h
fs/ocfs2/dlm/dlmmaster.c
fs/open.c
fs/pipe.c
fs/select.c
include/linux/auto_dev-ioctl.h
include/linux/auto_fs.h
include/linux/ctype.h
include/linux/dma-mapping.h
include/linux/export.h
include/linux/fs.h
include/linux/gpio/driver.h
include/linux/kexec.h
include/linux/kmemleak.h
include/linux/kthread.h
include/linux/pagemap.h
include/linux/radix-tree.h
include/linux/random.h
include/linux/relay.h
include/linux/sem.h
include/uapi/linux/auto_dev-ioctl.h [new file with mode: 0644]
include/uapi/linux/auto_fs.h
init/Kconfig
ipc/msg.c
ipc/sem.c
kernel/configs/android-base.config
kernel/configs/android-recommended.config
kernel/hung_task.c
kernel/kprobes.c
kernel/kthread.c
kernel/panic.c
kernel/ptrace.c
kernel/relay.c
kernel/smpboot.c
kernel/workqueue.c
lib/Makefile
lib/bitmap.c
lib/kstrtox.c
lib/strncpy_from_user.c
mm/bootmem.c
mm/cma.c
mm/kmemleak.c
mm/memblock.c
mm/nobootmem.c
net/batman-adv/debugfs.h
scripts/checkpatch.pl
scripts/const_structs.checkpatch [new file with mode: 0644]
scripts/tags.sh
sound/soc/intel/baytrail/sst-baytrail-ipc.c
sound/soc/intel/common/sst-acpi.h
sound/soc/intel/common/sst-ipc.c
sound/soc/intel/haswell/sst-haswell-ipc.c
sound/soc/intel/skylake/skl-sst-ipc.c
tools/testing/nvdimm/config_check.c
tools/testing/radix-tree/Makefile
tools/testing/radix-tree/iteration_check.c [new file with mode: 0644]
tools/testing/radix-tree/main.c
tools/testing/radix-tree/regression1.c
tools/testing/radix-tree/test.h

index 2d455a5cf6718639062e237ff859fc045d7f4911..98bf7ac29aad8fff65e88bc6b1e6c2a6fc3a5033 100644 (file)
@@ -126,3 +126,20 @@ means that we won't try quite as hard to get them.
 
 NOTE: At the moment DMA_ATTR_ALLOC_SINGLE_PAGES is only implemented on ARM,
 though ARM64 patches will likely be posted soon.
+
+DMA_ATTR_NO_WARN
+----------------
+
+This tells the DMA-mapping subsystem to suppress allocation failure reports
+(similarly to __GFP_NOWARN).
+
+On some architectures allocation failures are reported with error messages
+to the system logs.  Although this can help to identify and debug problems,
+drivers which handle failures (eg, retry later) have no problems with them,
+and can actually flood the system logs with error messages that aren't any
+problem at all, depending on the implementation of the retry mechanism.
+
+So, this provides a way for drivers to avoid those error messages on calls
+where allocation failures are not a problem, and shouldn't bother the logs.
+
+NOTE: At the moment DMA_ATTR_NO_WARN is only implemented on PowerPC.
index bf9061142827fca8d7f19c6cb013f322b47bc4e2..238e9f61352f6187670675cb2457af282cc03af6 100644 (file)
@@ -57,7 +57,7 @@ Call Trace:
  [<ffffffff817db154>] kernel_thread_helper+0x4/0x10
  [<ffffffff81066430>] ? finish_task_switch+0x80/0x110
  [<ffffffff817d9c04>] ? retint_restore_args+0xe/0xe
- [<ffffffff81097510>] ? __init_kthread_worker+0x70/0x70
+ [<ffffffff81097510>] ? __kthread_init_worker+0x70/0x70
  [<ffffffff817db150>] ? gs_change+0xb/0xb
 
 Line 2776 of block/cfq-iosched.c in v3.0-rc5 is as follows:
index 1788722d549503c3164e43cdbe9b26e091d2f919..b2391b8291691b9ac6752a450a1222898c1b29a1 100644 (file)
@@ -162,6 +162,15 @@ See the include/linux/kmemleak.h header for the functions prototype.
 - ``kmemleak_alloc_recursive`` - as kmemleak_alloc but checks the recursiveness
 - ``kmemleak_free_recursive``   - as kmemleak_free but checks the recursiveness
 
+The following functions take a physical address as the object pointer
+and only perform the corresponding action if the address has a lowmem
+mapping:
+
+- ``kmemleak_alloc_phys``
+- ``kmemleak_free_part_phys``
+- ``kmemleak_not_leak_phys``
+- ``kmemleak_ignore_phys``
+
 Dealing with false positives/negatives
 --------------------------------------
 
index aff22113a9866384279600728f6d3a2c92558248..50a3e01a36f80c14b85cb6c1307531a00065a733 100644 (file)
@@ -179,8 +179,19 @@ struct autofs_dev_ioctl {
                                 * including this struct */
        __s32 ioctlfd;          /* automount command fd */
 
-       __u32 arg1;             /* Command parameters */
-       __u32 arg2;
+       union {
+               struct args_protover            protover;
+               struct args_protosubver         protosubver;
+               struct args_openmount           openmount;
+               struct args_ready               ready;
+               struct args_fail                fail;
+               struct args_setpipefd           setpipefd;
+               struct args_timeout             timeout;
+               struct args_requester           requester;
+               struct args_expire              expire;
+               struct args_askumount           askumount;
+               struct args_ismountpoint        ismountpoint;
+       };
 
        char path[0];
 };
@@ -192,8 +203,8 @@ optionally be used to check a specific mount corresponding to a given
 mount point file descriptor, and when requesting the uid and gid of the
 last successful mount on a directory within the autofs file system.
 
-The fields arg1 and arg2 are used to communicate parameters and results of
-calls made as described below.
+The union is used to communicate parameters and results of calls made
+as described below.
 
 The path field is used to pass a path where it is needed and the size field
 is used account for the increased structure length when translating the
@@ -245,9 +256,9 @@ AUTOFS_DEV_IOCTL_PROTOVER_CMD and AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD
 Get the major and minor version of the autofs4 protocol version understood
 by loaded module. This call requires an initialized struct autofs_dev_ioctl
 with the ioctlfd field set to a valid autofs mount point descriptor
-and sets the requested version number in structure field arg1. These
-commands return 0 on success or one of the negative error codes if
-validation fails.
+and sets the requested version number in version field of struct args_protover
+or sub_version field of struct args_protosubver. These commands return
+0 on success or one of the negative error codes if validation fails.
 
 
 AUTOFS_DEV_IOCTL_OPENMOUNT and AUTOFS_DEV_IOCTL_CLOSEMOUNT
@@ -256,9 +267,9 @@ AUTOFS_DEV_IOCTL_OPENMOUNT and AUTOFS_DEV_IOCTL_CLOSEMOUNT
 Obtain and release a file descriptor for an autofs managed mount point
 path. The open call requires an initialized struct autofs_dev_ioctl with
 the path field set and the size field adjusted appropriately as well
-as the arg1 field set to the device number of the autofs mount. The
-device number can be obtained from the mount options shown in
-/proc/mounts. The close call requires an initialized struct
+as the devid field of struct args_openmount set to the device number of
+the autofs mount. The device number can be obtained from the mount options
+shown in /proc/mounts. The close call requires an initialized struct
 autofs_dev_ioct with the ioctlfd field set to the descriptor obtained
 from the open call. The release of the file descriptor can also be done
 with close(2) so any open descriptors will also be closed at process exit.
@@ -272,10 +283,10 @@ AUTOFS_DEV_IOCTL_READY_CMD and AUTOFS_DEV_IOCTL_FAIL_CMD
 Return mount and expire result status from user space to the kernel.
 Both of these calls require an initialized struct autofs_dev_ioctl
 with the ioctlfd field set to the descriptor obtained from the open
-call and the arg1 field set to the wait queue token number, received
-by user space in the foregoing mount or expire request. The arg2 field
-is set to the status to be returned. For the ready call this is always
-0 and for the fail call it is set to the errno of the operation.
+call and the token field of struct args_ready or struct args_fail set
+to the wait queue token number, received by user space in the foregoing
+mount or expire request. The status field of struct args_fail is set to
+the errno of the operation. It is set to 0 on success.
 
 
 AUTOFS_DEV_IOCTL_SETPIPEFD_CMD
@@ -290,9 +301,10 @@ mount be catatonic (see next call).
 
 The call requires an initialized struct autofs_dev_ioctl with the
 ioctlfd field set to the descriptor obtained from the open call and
-the arg1 field set to descriptor of the pipe. On success the call
-also sets the process group id used to identify the controlling process
-(eg. the owning automount(8) daemon) to the process group of the caller.
+the pipefd field of struct args_setpipefd set to descriptor of the pipe.
+On success the call also sets the process group id used to identify the
+controlling process (eg. the owning automount(8) daemon) to the process
+group of the caller.
 
 
 AUTOFS_DEV_IOCTL_CATATONIC_CMD
@@ -323,9 +335,8 @@ mount on the given path dentry.
 
 The call requires an initialized struct autofs_dev_ioctl with the path
 field set to the mount point in question and the size field adjusted
-appropriately as well as the arg1 field set to the device number of the
-containing autofs mount. Upon return the struct field arg1 contains the
-uid and arg2 the gid.
+appropriately. Upon return the uid field of struct args_requester contains
+the uid and gid field the gid.
 
 When reconstructing an autofs mount tree with active mounts we need to
 re-connect to mounts that may have used the original process uid and
@@ -343,8 +354,9 @@ this ioctl is called until no further expire candidates are found.
 The call requires an initialized struct autofs_dev_ioctl with the
 ioctlfd field set to the descriptor obtained from the open call. In
 addition an immediate expire, independent of the mount timeout, can be
-requested by setting the arg1 field to 1. If no expire candidates can
-be found the ioctl returns -1 with errno set to EAGAIN.
+requested by setting the how field of struct args_expire to 1. If no
+expire candidates can be found the ioctl returns -1 with errno set to
+EAGAIN.
 
 This call causes the kernel module to check the mount corresponding
 to the given ioctlfd for mounts that can be expired, issues an expire
@@ -357,7 +369,8 @@ Checks if an autofs mount point is in use.
 
 The call requires an initialized struct autofs_dev_ioctl with the
 ioctlfd field set to the descriptor obtained from the open call and
-it returns the result in the arg1 field, 1 for busy and 0 otherwise.
+it returns the result in the may_umount field of struct args_askumount,
+1 for busy and 0 otherwise.
 
 
 AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD
@@ -369,12 +382,12 @@ The call requires an initialized struct autofs_dev_ioctl. There are two
 possible variations. Both use the path field set to the path of the mount
 point to check and the size field adjusted appropriately. One uses the
 ioctlfd field to identify a specific mount point to check while the other
-variation uses the path and optionally arg1 set to an autofs mount type.
-The call returns 1 if this is a mount point and sets arg1 to the device
-number of the mount and field arg2 to the relevant super block magic
-number (described below) or 0 if it isn't a mountpoint. In both cases
-the the device number (as returned by new_encode_dev()) is returned
-in field arg1.
+variation uses the path and optionally in.type field of struct args_ismountpoint
+set to an autofs mount type. The call returns 1 if this is a mount point
+and sets out.devid field to the device number of the mount and out.magic
+field to the relevant super block magic number (described below) or 0 if
+it isn't a mountpoint. In both cases the the device number (as returned
+by new_encode_dev()) is returned in out.devid field.
 
 If supplied with a file descriptor we're looking for a specific mount,
 not necessarily at the top of the mounted stack. In this case the path
index 39d02e19fb6288f79769f356d7759a997bf9461f..8fac3fe7b8c971c0c39e54283c2fa0698922aaff 100644 (file)
@@ -203,9 +203,9 @@ initiated or is being considered, otherwise it returns 0.
 Mountpoint expiry
 -----------------
 
-The VFS has a mechansim for automatically expiring unused mounts,
+The VFS has a mechanism for automatically expiring unused mounts,
 much as it can expire any unused dentry information from the dcache.
-This is guided by the MNT_SHRINKABLE flag.  This  only applies to
+This is guided by the MNT_SHRINKABLE flag.  This only applies to
 mounts that were created by `d_automount()` returning a filesystem to be
 mounted.  As autofs doesn't return such a filesystem but leaves the
 mounting to the automount daemon, it must involve the automount daemon
@@ -298,7 +298,7 @@ remove directories and symlinks using normal filesystem operations.
 autofs knows whether a process requesting some operation is the daemon
 or not based on its process-group id number (see getpgid(1)).
 
-When an autofs filesystem it mounted the pgid of the mounting
+When an autofs filesystem is mounted the pgid of the mounting
 processes is recorded unless the "pgrp=" option is given, in which
 case that number is recorded instead.  Any request arriving from a
 process in that process group is considered to come from the daemon.
@@ -450,7 +450,7 @@ Commands are:
     numbers for existing filesystems can be found in
     `/proc/self/mountinfo`.
 - **AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD**: same as `close(ioctlfd)`.
-- **AUTOFS_DEV_IOCTL_SETPIPEFD_CMD**: if the  filesystem is in
+- **AUTOFS_DEV_IOCTL_SETPIPEFD_CMD**: if the filesystem is in
     catatonic mode, this can provide the write end of a new pipe
     in `arg1` to re-establish communication with a daemon.  The
     process group of the calling process is used to identify the
index 705fb915cbf7fd05b6b6355f71a75ae13b14f597..a1489e14f8eec3eb14081274ac545e21c09d77d5 100644 (file)
@@ -33,6 +33,37 @@ can also be entered as
 Double-quotes can be used to protect spaces in values, e.g.:
        param="spaces in here"
 
+cpu lists:
+----------
+
+Some kernel parameters take a list of CPUs as a value, e.g.  isolcpus,
+nohz_full, irqaffinity, rcu_nocbs.  The format of this list is:
+
+       <cpu number>,...,<cpu number>
+
+or
+
+       <cpu number>-<cpu number>
+       (must be a positive range in ascending order)
+
+or a mixture
+
+<cpu number>,...,<cpu number>-<cpu number>
+
+Note that for the special case of a range one can split the range into equal
+sized groups and for each group use some amount from the beginning of that
+group:
+
+       <cpu number>-cpu number>:<used size>/<group size>
+
+For example one can add to the command line following parameter:
+
+       isolcpus=1,2,10-20,100-2000:2/25
+
+where the final item represents CPUs 100,101,125,126,150,151,...
+
+
+
 This document may not be entirely up to date and comprehensive. The command
 "modinfo -p ${modulename}" shows a current list of all parameters of a loadable
 module. Loadable modules, after being loaded into the running kernel, also
@@ -1789,13 +1820,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        See Documentation/filesystems/nfs/nfsroot.txt.
 
        irqaffinity=    [SMP] Set the default irq affinity mask
-                       Format:
-                       <cpu number>,...,<cpu number>
-                       or
-                       <cpu number>-<cpu number>
-                       (must be a positive range in ascending order)
-                       or a mixture
-                       <cpu number>,...,<cpu number>-<cpu number>
+                       The argument is a cpu list, as described above.
 
        irqfixup        [HW]
                        When an interrupt is not handled search all handlers
@@ -1812,13 +1837,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        Format: <RDP>,<reset>,<pci_scan>,<verbosity>
 
        isolcpus=       [KNL,SMP] Isolate CPUs from the general scheduler.
-                       Format:
-                       <cpu number>,...,<cpu number>
-                       or
-                       <cpu number>-<cpu number>
-                       (must be a positive range in ascending order)
-                       or a mixture
-                       <cpu number>,...,<cpu number>-<cpu number>
+                       The argument is a cpu list, as described above.
 
                        This option can be used to specify one or more CPUs
                        to isolate from the general SMP balancing and scheduling
@@ -2680,6 +2699,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        Default: on
 
        nohz_full=      [KNL,BOOT]
+                       The argument is a cpu list, as described above.
                        In kernels built with CONFIG_NO_HZ_FULL=y, set
                        the specified list of CPUs whose tick will be stopped
                        whenever possible. The boot CPU will be forced outside
@@ -3285,6 +3305,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        See Documentation/blockdev/ramdisk.txt.
 
        rcu_nocbs=      [KNL]
+                       The argument is a cpu list, as described above.
+
                        In kernels built with CONFIG_RCU_NOCB_CPU=y, set
                        the specified list of CPUs to be no-callback CPUs.
                        Invocation of these CPUs' RCU callbacks will
index 624e1d436c6ce8d387bef77963c1315bc702a938..00748350cf72c263c862f00987ed40131451b07a 100644 (file)
@@ -26,7 +26,6 @@
 #ifndef __ASM_ARM_TRUSTED_FOUNDATIONS_H
 #define __ASM_ARM_TRUSTED_FOUNDATIONS_H
 
-#include <linux/kconfig.h>
 #include <linux/printk.h>
 #include <linux/bug.h>
 #include <linux/of.h>
index 612eb530f33fcd19bc4539facb26fc30a2583979..91d2d5b014145d5fdc4071d027fb2358b9831347 100644 (file)
@@ -318,8 +318,7 @@ unsigned long get_wchan(struct task_struct *p)
 
 unsigned long arch_randomize_brk(struct mm_struct *mm)
 {
-       unsigned long range_end = mm->brk + 0x02000000;
-       return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
+       return randomize_page(mm->brk, 0x02000000);
 }
 
 #ifdef CONFIG_MMU
index 55101bd86b9839b06fef8857510ccdc3095170f8..39feb85a6931093b064fa548b1778808186d0924 100644 (file)
@@ -7,7 +7,6 @@
 #ifndef __ASSEMBLY__
 
 #include <linux/init.h>
-#include <linux/kconfig.h>
 #include <linux/types.h>
 #include <linux/stddef.h>
 #include <linux/stringify.h>
index a4f5f766af08b010e66ec04889c098a5267eee05..27b2f1387df40b61b4aa059be5650d329964da6b 100644 (file)
@@ -372,12 +372,8 @@ unsigned long arch_align_stack(unsigned long sp)
 
 unsigned long arch_randomize_brk(struct mm_struct *mm)
 {
-       unsigned long range_end = mm->brk;
-
        if (is_compat_task())
-               range_end += 0x02000000;
+               return randomize_page(mm->brk, 0x02000000);
        else
-               range_end += 0x40000000;
-
-       return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
+               return randomize_page(mm->brk, 0x40000000);
 }
index cb16fcc5f8f05228c3cf94a1593800749f6534fa..5537f95b28c9be169ffd42a42094eaeae68ce064 100644 (file)
@@ -267,6 +267,17 @@ static void octeon_crash_shutdown(struct pt_regs *regs)
        default_machine_crash_shutdown(regs);
 }
 
+#ifdef CONFIG_SMP
+void octeon_crash_smp_send_stop(void)
+{
+       int cpu;
+
+       /* disable watchdogs */
+       for_each_online_cpu(cpu)
+               cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0);
+}
+#endif
+
 #endif /* CONFIG_KEXEC */
 
 #ifdef CONFIG_CAVIUM_RESERVE32
@@ -911,6 +922,9 @@ void __init prom_init(void)
        _machine_kexec_shutdown = octeon_shutdown;
        _machine_crash_shutdown = octeon_crash_shutdown;
        _machine_kexec_prepare = octeon_kexec_prepare;
+#ifdef CONFIG_SMP
+       _crash_smp_send_stop = octeon_crash_smp_send_stop;
+#endif
 #endif
 
        octeon_user_io_init();
index ee25ebbf2a28809c73f68311c24284c53fe87907..493a3cc7c39ad5a412d6d460061b740260dfff9c 100644 (file)
@@ -45,6 +45,7 @@ extern const unsigned char kexec_smp_wait[];
 extern unsigned long secondary_kexec_args[4];
 extern void (*relocated_kexec_smp_wait) (void *);
 extern atomic_t kexec_ready_to_reboot;
+extern void (*_crash_smp_send_stop)(void);
 #endif
 #endif
 
index d1ff774ac4b6560c4048311c1aca1821291ca437..c68c0cc879c6b22756c0f20ea66d469ee56969b1 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/io.h>
 #include <linux/init.h>
 #include <linux/irq.h>
-#include <linux/kconfig.h>
 #include <boot_param.h>
 
 /* loongson internal northbridge initialization */
index 610f0f3bdb3455cfa42629e98f8cc883524fe8ba..1723b17622976da35170caba062fe956a0fcd245 100644 (file)
@@ -47,9 +47,14 @@ static void crash_shutdown_secondary(void *passed_regs)
 
 static void crash_kexec_prepare_cpus(void)
 {
+       static int cpus_stopped;
        unsigned int msecs;
+       unsigned int ncpus;
 
-       unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
+       if (cpus_stopped)
+               return;
+
+       ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
 
        dump_send_ipi(crash_shutdown_secondary);
        smp_wmb();
@@ -64,6 +69,17 @@ static void crash_kexec_prepare_cpus(void)
                cpu_relax();
                mdelay(1);
        }
+
+       cpus_stopped = 1;
+}
+
+/* Override the weak function in kernel/panic.c */
+void crash_smp_send_stop(void)
+{
+       if (_crash_smp_send_stop)
+               _crash_smp_send_stop();
+
+       crash_kexec_prepare_cpus();
 }
 
 #else /* !defined(CONFIG_SMP)  */
index 50980bf3983ef3654d4f24c1dbf95e4576d0c3a8..59725204105c2b50aa476e1d0e5cb2e61d73cb7c 100644 (file)
@@ -25,6 +25,7 @@ void (*_machine_crash_shutdown)(struct pt_regs *regs) = NULL;
 #ifdef CONFIG_SMP
 void (*relocated_kexec_smp_wait) (void *);
 atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0);
+void (*_crash_smp_send_stop)(void) = NULL;
 #endif
 
 int
index 36775d20b0e740b7b02ac09f446afaf960e3d5a7..f8b7bf836437708b414548625e1c4451cf747020 100644 (file)
@@ -35,7 +35,6 @@
  */
 #include <linux/sched.h>
 #include <linux/debugfs.h>
-#include <linux/kconfig.h>
 #include <linux/percpu-defs.h>
 #include <linux/perf_event.h>
 
index 39e7b472f0d8b103197d904550a3db150c861051..49a2e2226fee84f2a284373a427b5fe155bf261d 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/errno.h>
 #include <linux/filter.h>
 #include <linux/if_vlan.h>
-#include <linux/kconfig.h>
 #include <linux/moduleloader.h>
 #include <linux/netdevice.h>
 #include <linux/string.h>
index 37d6e741be826da51474ab50fe3027cc06400cd5..5f202a566ec5f0296a22a71ac238a63a6a90ba7f 100644 (file)
@@ -479,7 +479,8 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
 
                /* Handle failure */
                if (unlikely(entry == DMA_ERROR_CODE)) {
-                       if (printk_ratelimit())
+                       if (!(attrs & DMA_ATTR_NO_WARN) &&
+                           printk_ratelimit())
                                dev_info(dev, "iommu_alloc failed, tbl %p "
                                         "vaddr %lx npages %lu\n", tbl, vaddr,
                                         npages);
@@ -776,7 +777,8 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
                                         mask >> tbl->it_page_shift, align,
                                         attrs);
                if (dma_handle == DMA_ERROR_CODE) {
-                       if (printk_ratelimit())  {
+                       if (!(attrs & DMA_ATTR_NO_WARN) &&
+                           printk_ratelimit())  {
                                dev_info(dev, "iommu_alloc failed, tbl %p "
                                         "vaddr %p npages %d\n", tbl, vaddr,
                                         npages);
index 851a94e6ae58061824917c82ee2ae0d8fe0f4b41..ef61c597898bc02e92cf352663b0d819bea2164c 100644 (file)
@@ -88,6 +88,5 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
 
 unsigned long arch_randomize_brk(struct mm_struct *mm)
 {
-       unsigned long range_end = mm->brk + 0x02000000;
-       return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
+       return randomize_page(mm->brk, 0x02000000);
 }
index 00299c927852ea72cc743e97bff6e2f3273c9bf8..d7c6b676b3a56a44cea03b73e401d04b0860eba1 100644 (file)
@@ -295,8 +295,7 @@ unsigned long get_wchan(struct task_struct *p)
 
 unsigned long arch_randomize_brk(struct mm_struct *mm)
 {
-       unsigned long range_end = mm->brk + 0x02000000;
-       return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
+       return randomize_page(mm->brk, 0x02000000);
 }
 
 /*
index d2434c1cad0558e2664d8f7587ed4701d049edd5..282630e4c6ea4696e54c6ea650751d913ce49c11 100644 (file)
@@ -210,6 +210,7 @@ struct kexec_entry64_regs {
 
 typedef void crash_vmclear_fn(void);
 extern crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss;
+extern void kdump_nmi_shootdown_cpus(void);
 
 #endif /* __ASSEMBLY__ */
 
index 19980b36f394b18e6816629390130fa3eb789115..026ea82ecc60492e1ee27eae439fb2b9c6402899 100644 (file)
@@ -47,6 +47,7 @@ struct smp_ops {
        void (*smp_cpus_done)(unsigned max_cpus);
 
        void (*stop_other_cpus)(int wait);
+       void (*crash_stop_other_cpus)(void);
        void (*smp_send_reschedule)(int cpu);
 
        int (*cpu_up)(unsigned cpu, struct task_struct *tidle);
index 9616cf76940cd48b674fe326a24597767ebc9fd7..650830e39e3a7c8f8e01755f4ecf2bd5f83bb799 100644 (file)
@@ -133,15 +133,31 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
        disable_local_APIC();
 }
 
-static void kdump_nmi_shootdown_cpus(void)
+void kdump_nmi_shootdown_cpus(void)
 {
        nmi_shootdown_cpus(kdump_nmi_callback);
 
        disable_local_APIC();
 }
 
+/* Override the weak function in kernel/panic.c */
+void crash_smp_send_stop(void)
+{
+       static int cpus_stopped;
+
+       if (cpus_stopped)
+               return;
+
+       if (smp_ops.crash_stop_other_cpus)
+               smp_ops.crash_stop_other_cpus();
+       else
+               smp_send_stop();
+
+       cpus_stopped = 1;
+}
+
 #else
-static void kdump_nmi_shootdown_cpus(void)
+void crash_smp_send_stop(void)
 {
        /* There are no cpus to shootdown */
 }
@@ -160,7 +176,7 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
        /* The kernel is broken so disable interrupts */
        local_irq_disable();
 
-       kdump_nmi_shootdown_cpus();
+       crash_smp_send_stop();
 
        /*
         * VMCLEAR VMCSs loaded on this cpu if needed.
index 5a294e48b18529045ae4371f4ef8dc281773aea8..8c1f218926d783ecb8d288f9fc340379695089a7 100644 (file)
@@ -337,6 +337,9 @@ void arch_crash_save_vmcoreinfo(void)
 #endif
        vmcoreinfo_append_str("KERNELOFFSET=%lx\n",
                              kaslr_offset());
+       VMCOREINFO_PAGE_OFFSET(PAGE_OFFSET);
+       VMCOREINFO_VMALLOC_START(VMALLOC_START);
+       VMCOREINFO_VMEMMAP_START(VMEMMAP_START);
 }
 
 /* arch-dependent functionality related to kexec file-based syscall */
index 28cea7802ecbc456ac3b7f8b8aeb44e0fee00ba0..0888a879120fcb948ec894d96ddee17c36997572 100644 (file)
@@ -509,8 +509,7 @@ unsigned long arch_align_stack(unsigned long sp)
 
 unsigned long arch_randomize_brk(struct mm_struct *mm)
 {
-       unsigned long range_end = mm->brk + 0x02000000;
-       return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
+       return randomize_page(mm->brk, 0x02000000);
 }
 
 /*
index 658777cf38512872b4e5d049a8618e4bbd1b869b..68f8cc222f255aa1cf5266e2d84d7ceeb2417977 100644 (file)
@@ -32,6 +32,8 @@
 #include <asm/nmi.h>
 #include <asm/mce.h>
 #include <asm/trace/irq_vectors.h>
+#include <asm/kexec.h>
+
 /*
  *     Some notes on x86 processor bugs affecting SMP operation:
  *
@@ -342,6 +344,9 @@ struct smp_ops smp_ops = {
        .smp_cpus_done          = native_smp_cpus_done,
 
        .stop_other_cpus        = native_stop_other_cpus,
+#if defined(CONFIG_KEXEC_CORE)
+       .crash_stop_other_cpus  = kdump_nmi_shootdown_cpus,
+#endif
        .smp_send_reschedule    = native_smp_send_reschedule,
 
        .cpu_up                 = native_cpu_up,
index 10e0272d789a189b7215100a1d66a676d9b4bbfa..a55ed63b9f91b0d45dbb476a22af9a19c4ab5fc8 100644 (file)
@@ -101,7 +101,6 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
                           unsigned long *end)
 {
        if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
-               unsigned long new_begin;
                /* This is usually used needed to map code in small
                   model, so it needs to be in the first 31bit. Limit
                   it to that.  This means we need to move the
@@ -112,9 +111,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
                *begin = 0x40000000;
                *end = 0x80000000;
                if (current->flags & PF_RANDOMIZE) {
-                       new_begin = randomize_range(*begin, *begin + 0x02000000, 0);
-                       if (new_begin)
-                               *begin = new_begin;
+                       *begin = randomize_page(*begin, 0x02000000);
                }
        } else {
                *begin = current->mm->mmap_legacy_base;
index 5fb6c620180e19ebae73bee9cf6e83e3c65854e8..16a7134eedacce123c55c82c0e8f31b095bc14f0 100644 (file)
@@ -212,7 +212,7 @@ static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian)
         */
        smp_mb();
        if (atomic_dec_if_positive(&ps->pending) > 0)
-               queue_kthread_work(&pit->worker, &pit->expired);
+               kthread_queue_work(&pit->worker, &pit->expired);
 }
 
 void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
@@ -233,7 +233,7 @@ void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
 static void destroy_pit_timer(struct kvm_pit *pit)
 {
        hrtimer_cancel(&pit->pit_state.timer);
-       flush_kthread_work(&pit->expired);
+       kthread_flush_work(&pit->expired);
 }
 
 static void pit_do_work(struct kthread_work *work)
@@ -272,7 +272,7 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
        if (atomic_read(&ps->reinject))
                atomic_inc(&ps->pending);
 
-       queue_kthread_work(&pt->worker, &pt->expired);
+       kthread_queue_work(&pt->worker, &pt->expired);
 
        if (ps->is_periodic) {
                hrtimer_add_expires_ns(&ps->timer, ps->period);
@@ -324,7 +324,7 @@ static void create_pit_timer(struct kvm_pit *pit, u32 val, int is_period)
 
        /* TODO The new value only affected after the retriggered */
        hrtimer_cancel(&ps->timer);
-       flush_kthread_work(&pit->expired);
+       kthread_flush_work(&pit->expired);
        ps->period = interval;
        ps->is_periodic = is_period;
 
@@ -667,13 +667,13 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
        pid_nr = pid_vnr(pid);
        put_pid(pid);
 
-       init_kthread_worker(&pit->worker);
+       kthread_init_worker(&pit->worker);
        pit->worker_task = kthread_run(kthread_worker_fn, &pit->worker,
                                       "kvm-pit/%d", pid_nr);
        if (IS_ERR(pit->worker_task))
                goto fail_kthread;
 
-       init_kthread_work(&pit->expired, pit_do_work);
+       kthread_init_work(&pit->expired, pit_do_work);
 
        pit->kvm = kvm;
 
@@ -730,7 +730,7 @@ void kvm_free_pit(struct kvm *kvm)
                kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->speaker_dev);
                kvm_pit_set_reinject(pit, false);
                hrtimer_cancel(&pit->pit_state.timer);
-               flush_kthread_work(&pit->expired);
+               kthread_flush_work(&pit->expired);
                kthread_stop(pit->worker_task);
                kvm_free_irq_source_id(kvm, pit->irq_source_id);
                kfree(pit);
index 083e56f72308e7ebed8721ba300eb59c22f953cf..46fe9248410d74c0c457695a81b3febdc31c0a41 100644 (file)
@@ -31,6 +31,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
        unsigned int granularity;
        enum req_op op;
        int alignment;
+       sector_t bs_mask;
 
        if (!q)
                return -ENXIO;
@@ -50,6 +51,10 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
                op = REQ_OP_DISCARD;
        }
 
+       bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
+       if ((sector | nr_sects) & bs_mask)
+               return -EINVAL;
+
        /* Zero-sector (unknown) and one-sector granularities are the same.  */
        granularity = max(q->limits.discard_granularity >> 9, 1U);
        alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
@@ -150,10 +155,15 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
        unsigned int max_write_same_sectors;
        struct bio *bio = NULL;
        int ret = 0;
+       sector_t bs_mask;
 
        if (!q)
                return -ENXIO;
 
+       bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
+       if ((sector | nr_sects) & bs_mask)
+               return -EINVAL;
+
        /* Ensure that max_write_same_sectors doesn't overflow bi_size */
        max_write_same_sectors = UINT_MAX >> 9;
 
@@ -202,6 +212,11 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
        int ret;
        struct bio *bio = NULL;
        unsigned int sz;
+       sector_t bs_mask;
+
+       bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
+       if ((sector | nr_sects) & bs_mask)
+               return -EINVAL;
 
        while (nr_sects != 0) {
                bio = next_bio(bio, min(nr_sects, (sector_t)BIO_MAX_PAGES),
index ed2397f8de9dc3c96f00f13c82c0e0e041fab06e..755119c3c1b9129890eedc6abbe670ac4b88ecdf 100644 (file)
@@ -225,7 +225,8 @@ static int blk_ioctl_zeroout(struct block_device *bdev, fmode_t mode,
                unsigned long arg)
 {
        uint64_t range[2];
-       uint64_t start, len;
+       struct address_space *mapping;
+       uint64_t start, end, len;
 
        if (!(mode & FMODE_WRITE))
                return -EBADF;
@@ -235,18 +236,23 @@ static int blk_ioctl_zeroout(struct block_device *bdev, fmode_t mode,
 
        start = range[0];
        len = range[1];
+       end = start + len - 1;
 
        if (start & 511)
                return -EINVAL;
        if (len & 511)
                return -EINVAL;
-       start >>= 9;
-       len >>= 9;
-
-       if (start + len > (i_size_read(bdev->bd_inode) >> 9))
+       if (end >= (uint64_t)i_size_read(bdev->bd_inode))
+               return -EINVAL;
+       if (end < start)
                return -EINVAL;
 
-       return blkdev_issue_zeroout(bdev, start, len, GFP_KERNEL, false);
+       /* Invalidate the page cache, including dirty pages */
+       mapping = bdev->bd_inode->i_mapping;
+       truncate_inode_pages_range(mapping, start, end);
+
+       return blkdev_issue_zeroout(bdev, start >> 9, len >> 9, GFP_KERNEL,
+                                   false);
 }
 
 static int put_ushort(unsigned long arg, unsigned short val)
index bfb92ace2c91a9a3786ab3c2cedb00224d2b62f0..6989ba0046df275cb6b8d14470b8618caf40beb4 100644 (file)
@@ -47,7 +47,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
 
        /* If another context is idling then defer */
        if (engine->idling) {
-               queue_kthread_work(&engine->kworker, &engine->pump_requests);
+               kthread_queue_work(&engine->kworker, &engine->pump_requests);
                goto out;
        }
 
@@ -58,7 +58,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
 
                /* Only do teardown in the thread */
                if (!in_kthread) {
-                       queue_kthread_work(&engine->kworker,
+                       kthread_queue_work(&engine->kworker,
                                           &engine->pump_requests);
                        goto out;
                }
@@ -189,7 +189,7 @@ int crypto_transfer_cipher_request(struct crypto_engine *engine,
        ret = ablkcipher_enqueue_request(&engine->queue, req);
 
        if (!engine->busy && need_pump)
-               queue_kthread_work(&engine->kworker, &engine->pump_requests);
+               kthread_queue_work(&engine->kworker, &engine->pump_requests);
 
        spin_unlock_irqrestore(&engine->queue_lock, flags);
        return ret;
@@ -231,7 +231,7 @@ int crypto_transfer_hash_request(struct crypto_engine *engine,
        ret = ahash_enqueue_request(&engine->queue, req);
 
        if (!engine->busy && need_pump)
-               queue_kthread_work(&engine->kworker, &engine->pump_requests);
+               kthread_queue_work(&engine->kworker, &engine->pump_requests);
 
        spin_unlock_irqrestore(&engine->queue_lock, flags);
        return ret;
@@ -284,7 +284,7 @@ void crypto_finalize_cipher_request(struct crypto_engine *engine,
 
        req->base.complete(&req->base, err);
 
-       queue_kthread_work(&engine->kworker, &engine->pump_requests);
+       kthread_queue_work(&engine->kworker, &engine->pump_requests);
 }
 EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request);
 
@@ -321,7 +321,7 @@ void crypto_finalize_hash_request(struct crypto_engine *engine,
 
        req->base.complete(&req->base, err);
 
-       queue_kthread_work(&engine->kworker, &engine->pump_requests);
+       kthread_queue_work(&engine->kworker, &engine->pump_requests);
 }
 EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
 
@@ -345,7 +345,7 @@ int crypto_engine_start(struct crypto_engine *engine)
        engine->running = true;
        spin_unlock_irqrestore(&engine->queue_lock, flags);
 
-       queue_kthread_work(&engine->kworker, &engine->pump_requests);
+       kthread_queue_work(&engine->kworker, &engine->pump_requests);
 
        return 0;
 }
@@ -422,7 +422,7 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
        crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
        spin_lock_init(&engine->queue_lock);
 
-       init_kthread_worker(&engine->kworker);
+       kthread_init_worker(&engine->kworker);
        engine->kworker_task = kthread_run(kthread_worker_fn,
                                           &engine->kworker, "%s",
                                           engine->name);
@@ -430,7 +430,7 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
                dev_err(dev, "failed to create crypto request pump task\n");
                return NULL;
        }
-       init_kthread_work(&engine->pump_requests, crypto_pump_work);
+       kthread_init_work(&engine->pump_requests, crypto_pump_work);
 
        if (engine->rt) {
                dev_info(dev, "will run requests pump with realtime priority\n");
@@ -455,7 +455,7 @@ int crypto_engine_exit(struct crypto_engine *engine)
        if (ret)
                return ret;
 
-       flush_kthread_worker(&engine->kworker);
+       kthread_flush_worker(&engine->kworker);
        kthread_stop(engine->kworker_task);
 
        return 0;
index cbdb3b162718878a84e9eb69aebb8b0a05616e7a..fa1b7a90ba11deecda3ba5853b545fc2fc0dc579 100644 (file)
@@ -840,13 +840,13 @@ static void loop_config_discard(struct loop_device *lo)
 
 static void loop_unprepare_queue(struct loop_device *lo)
 {
-       flush_kthread_worker(&lo->worker);
+       kthread_flush_worker(&lo->worker);
        kthread_stop(lo->worker_task);
 }
 
 static int loop_prepare_queue(struct loop_device *lo)
 {
-       init_kthread_worker(&lo->worker);
+       kthread_init_worker(&lo->worker);
        lo->worker_task = kthread_run(kthread_worker_fn,
                        &lo->worker, "loop%d", lo->lo_number);
        if (IS_ERR(lo->worker_task))
@@ -1658,7 +1658,7 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
                break;
        }
 
-       queue_kthread_work(&lo->worker, &cmd->work);
+       kthread_queue_work(&lo->worker, &cmd->work);
 
        return BLK_MQ_RQ_QUEUE_OK;
 }
@@ -1696,7 +1696,7 @@ static int loop_init_request(void *data, struct request *rq,
        struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
 
        cmd->rq = rq;
-       init_kthread_work(&cmd->work, loop_queue_work);
+       kthread_init_work(&cmd->work, loop_queue_work);
 
        return 0;
 }
index 3efb3bf0ab839578156e33907eb20c741c08155f..d131e152c8ce6a3e70a7e0fe287275269f0c40c6 100644 (file)
@@ -2100,23 +2100,37 @@ unsigned long get_random_long(void)
 }
 EXPORT_SYMBOL(get_random_long);
 
-/*
- * randomize_range() returns a start address such that
+/**
+ * randomize_page - Generate a random, page aligned address
+ * @start:     The smallest acceptable address the caller will take.
+ * @range:     The size of the area, starting at @start, within which the
+ *             random address must fall.
+ *
+ * If @start + @range would overflow, @range is capped.
  *
- *    [...... <range> .....]
- *  start                  end
+ * NOTE: Historical use of randomize_range, which this replaces, presumed that
+ * @start was already page aligned.  We now align it regardless.
  *
- * a <range> with size "len" starting at the return value is inside in the
- * area defined by [start, end], but is otherwise randomized.
+ * Return: A page aligned address within [start, start + range).  On error,
+ * @start is returned.
  */
 unsigned long
-randomize_range(unsigned long start, unsigned long end, unsigned long len)
+randomize_page(unsigned long start, unsigned long range)
 {
-       unsigned long range = end - len - start;
+       if (!PAGE_ALIGNED(start)) {
+               range -= PAGE_ALIGN(start) - start;
+               start = PAGE_ALIGN(start);
+       }
 
-       if (end <= start + len)
-               return 0;
-       return PAGE_ALIGN(get_random_int() % range + start);
+       if (start > ULONG_MAX - range)
+               range = ULONG_MAX - start;
+
+       range >>= PAGE_SHIFT;
+
+       if (range == 0)
+               return start;
+
+       return start + (get_random_long() % range << PAGE_SHIFT);
 }
 
 /* Interface for in-kernel drivers of true hardware RNGs.
index 8114744bf30c95745d29bdaa4c5513413f5d734f..d433b1db1fdd79469ae7b744cf8f2514c3a0fab0 100644 (file)
@@ -38,7 +38,6 @@
 #include <linux/workqueue.h>
 #include <linux/module.h>
 #include <linux/dma-mapping.h>
-#include <linux/kconfig.h>
 #include "../tty/hvc/hvc_console.h"
 
 #define is_rproc_enabled IS_ENABLED(CONFIG_REMOTEPROC)
index f2f229efbe64d7d8ddcdf4f985ad23be76ec6977..6d9904a4a0abe7efb9e882942cd76f96dc920806 100644 (file)
@@ -129,7 +129,7 @@ void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
                if (likely(worker)) {
                        cq->notify = RVT_CQ_NONE;
                        cq->triggered++;
-                       queue_kthread_work(worker, &cq->comptask);
+                       kthread_queue_work(worker, &cq->comptask);
                }
        }
 
@@ -265,7 +265,7 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
        cq->ibcq.cqe = entries;
        cq->notify = RVT_CQ_NONE;
        spin_lock_init(&cq->lock);
-       init_kthread_work(&cq->comptask, send_complete);
+       kthread_init_work(&cq->comptask, send_complete);
        cq->queue = wc;
 
        ret = &cq->ibcq;
@@ -295,7 +295,7 @@ int rvt_destroy_cq(struct ib_cq *ibcq)
        struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
        struct rvt_dev_info *rdi = cq->rdi;
 
-       flush_kthread_work(&cq->comptask);
+       kthread_flush_work(&cq->comptask);
        spin_lock(&rdi->n_cqs_lock);
        rdi->n_cqs_allocated--;
        spin_unlock(&rdi->n_cqs_lock);
@@ -514,7 +514,7 @@ int rvt_driver_cq_init(struct rvt_dev_info *rdi)
        rdi->worker = kzalloc(sizeof(*rdi->worker), GFP_KERNEL);
        if (!rdi->worker)
                return -ENOMEM;
-       init_kthread_worker(rdi->worker);
+       kthread_init_worker(rdi->worker);
        task = kthread_create_on_node(
                kthread_worker_fn,
                rdi->worker,
@@ -547,7 +547,7 @@ void rvt_cq_exit(struct rvt_dev_info *rdi)
        /* blocks future queuing from send_complete() */
        rdi->worker = NULL;
        smp_wmb(); /* See rdi_cq_enter */
-       flush_kthread_worker(worker);
+       kthread_flush_worker(worker);
        kthread_stop(worker->task);
        kfree(worker);
 }
index 09c769c6e91fa1d62a5a8c328bdbfdbf9efedc7a..ef8c747c35e76f0c77eaf86300f724e30cf6e0ff 100644 (file)
@@ -9,7 +9,6 @@
 
 #include <linux/kernel.h>
 #include <linux/device.h>
-#include <linux/kconfig.h>
 #include <linux/list.h>
 #include <linux/pm.h>
 #include <linux/rmi.h>
index c83bce89028b2a2d270ffa3aed232788b8b491b6..4a88312fbd25405051f9ef422b4a71d95059c7a2 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/bitmap.h>
 #include <linux/delay.h>
 #include <linux/fs.h>
-#include <linux/kconfig.h>
 #include <linux/pm.h>
 #include <linux/slab.h>
 #include <linux/of.h>
index fac81fc9bcf6e51acb57f52a883d97d57dbd90c9..b5d2dfc23bad9fc7d5a626762518817713adbefa 100644 (file)
@@ -8,7 +8,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/kconfig.h>
 #include <linux/rmi.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
index 20c7134b3d3b9d12d68694766edea77783be8c44..f798f427a46fde82f9580fa964c41452963c3260 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/device.h>
 #include <linux/input.h>
 #include <linux/input/mt.h>
-#include <linux/kconfig.h>
 #include <linux/rmi.h>
 #include <linux/slab.h>
 #include <linux/of.h>
index b844c89a95067e2a109e5b4520f8fc9bf9249572..daa4ae89e466edea622c36bbf02ec6b987444be1 100644 (file)
@@ -52,7 +52,6 @@
 
 #include <linux/bitops.h>
 #include <linux/cpumask.h>
-#include <linux/kconfig.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
index 0fea985ef1dc2da87d0cc7b0fe83ccf57a85aed2..353c54986211805dd865e93c4540eb0a452def44 100644 (file)
@@ -12,7 +12,6 @@
 #define pr_fmt(fmt)    KBUILD_MODNAME  ": " fmt
 
 #include <linux/bitops.h>
-#include <linux/kconfig.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
index 0ec92631e23ca0a4c3720d0d6cf32a8fd4bb837e..64c2692070ef2a0c413e4d5760e2ee1d0f795630 100644 (file)
@@ -13,7 +13,6 @@
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/module.h>
-#include <linux/kconfig.h>
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
 #include <linux/of.h>
index 1d4a5b46d9aeee05e8c5851748c154177e3c198e..bddf169c4b37b7c9d2a91518aa233480dbb52f5d 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/module.h>
-#include <linux/kconfig.h>
 #include <linux/platform_device.h>
 #include <linux/spinlock.h>
 #include <linux/of.h>
index 5eacce1ef88b6e4435f236c27933ae528cac8004..dc75bea0d541b3a2892d688c7eaf3093e1168790 100644 (file)
@@ -581,7 +581,7 @@ static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
        if (!md->init_tio_pdu)
                memset(&tio->info, 0, sizeof(tio->info));
        if (md->kworker_task)
-               init_kthread_work(&tio->work, map_tio_request);
+               kthread_init_work(&tio->work, map_tio_request);
 }
 
 static struct dm_rq_target_io *dm_old_prep_tio(struct request *rq,
@@ -831,7 +831,7 @@ static void dm_old_request_fn(struct request_queue *q)
                tio = tio_from_request(rq);
                /* Establish tio->ti before queuing work (map_tio_request) */
                tio->ti = ti;
-               queue_kthread_work(&md->kworker, &tio->work);
+               kthread_queue_work(&md->kworker, &tio->work);
                BUG_ON(!irqs_disabled());
        }
 }
@@ -853,7 +853,7 @@ int dm_old_init_request_queue(struct mapped_device *md)
        blk_queue_prep_rq(md->queue, dm_old_prep_fn);
 
        /* Initialize the request-based DM worker thread */
-       init_kthread_worker(&md->kworker);
+       kthread_init_worker(&md->kworker);
        md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
                                       "kdmwork-%s", dm_device_name(md));
        if (IS_ERR(md->kworker_task))
index be35258324c114b1d2a5042e9404bc6a108e1d4d..147af9536d0c10d4054f42306383e6fe118a6ce4 100644 (file)
@@ -1891,7 +1891,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
        spin_unlock_irq(q->queue_lock);
 
        if (dm_request_based(md) && md->kworker_task)
-               flush_kthread_worker(&md->kworker);
+               kthread_flush_worker(&md->kworker);
 
        /*
         * Take suspend_lock so that presuspend and postsuspend methods
@@ -2147,7 +2147,7 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
        if (dm_request_based(md)) {
                dm_stop_queue(md->queue);
                if (md->kworker_task)
-                       flush_kthread_worker(&md->kworker);
+                       kthread_flush_worker(&md->kworker);
        }
 
        flush_workqueue(md->wq);
index 1dcc936e1661829789e17a2fea4b65a9e6aa0d7b..dcdd163ace8576e71d98e575959b3332227e6ba8 100644 (file)
@@ -25,7 +25,6 @@
 #ifndef AF9013_H
 #define AF9013_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 
 /* AF9013/5 GPIOs (mostly guessed)
index 6ad22b69a63605e6b14c5516614fc40640e9085e..5b83e4f96297126ae5b3da337b6dc250645e91e8 100644 (file)
@@ -22,8 +22,6 @@
 #ifndef AF9033_H
 #define AF9033_H
 
-#include <linux/kconfig.h>
-
 /*
  * I2C address (TODO: are these in 8-bit format?)
  * 0x38, 0x3a, 0x3c, 0x3e
index 6da4ae6d6cc3acc04ae751c52ede36bbe2f9ead4..dc61bf7d1b0982e10b969976dbc5b014a8ee7ead 100644 (file)
@@ -22,7 +22,6 @@
 #ifndef __DVB_ASCOT2E_H__
 #define __DVB_ASCOT2E_H__
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 #include <linux/i2c.h>
 
index 5446d13fdfe88e6e06e3ae17afbd7208bd806d5f..bb862387080f7f0607493050c06372b5e0bc2cf1 100644 (file)
@@ -22,7 +22,6 @@
 #ifndef __ATBM8830_H__
 #define __ATBM8830_H__
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 #include <linux/i2c.h>
 
index 78bf3f73e58d4759c2d479b5e42285a18b33bfc8..21c51a4c519a2c0a195344d6f61b7320d498dc6a 100644 (file)
@@ -22,7 +22,6 @@
 #ifndef __AU8522_H__
 #define __AU8522_H__
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 
 enum au8522_if_freq {
index 68b69a7660d2a0bae09fa03b6588f1ef54ba1d81..a1956a9ba406e48741c633fdb6232e20e4a90d4a 100644 (file)
@@ -28,7 +28,6 @@
 #ifndef CX22702_H
 #define CX22702_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 
 struct cx22702_config {
index 962919b9b6e62455829578629869d7d120db3179..194c703611b459c9c3da2c89bdb68aa116638c4d 100644 (file)
@@ -22,8 +22,6 @@
 #ifndef CX24113_H
 #define CX24113_H
 
-#include <linux/kconfig.h>
-
 struct dvb_frontend;
 
 struct cx24113_config {
index f6dbabc1d62b7e56cc00696ded605ca1500cf2f7..9ff8df8d44b8e23f5272152af9f89402dde0857a 100644 (file)
@@ -21,7 +21,6 @@
 #ifndef CX24116_H
 #define CX24116_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 
 struct cx24116_config {
index 1648ab43216879f3482f7ebaa7302819b9a1c6d3..445f13faf63a42d67ae3d4c32dd4e9558c3d1f6f 100644 (file)
@@ -22,7 +22,6 @@
 #ifndef CX24117_H
 #define CX24117_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 
 struct cx24117_config {
index f0970423e16f5f2f7ee55b1c4c1d58b326160534..de4ca9aa09233b1b542effc952c9f14db700534a 100644 (file)
@@ -20,7 +20,6 @@
 #ifndef CX24120_H
 #define CX24120_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 #include <linux/firmware.h>
 
index 975f3c926fe840e60d731f4851d41a57c585a574..aac23444aa9ab19cbb84f4ba2e01848bbe87b3c2 100644 (file)
@@ -21,7 +21,6 @@
 #ifndef CX24123_H
 #define CX24123_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 
 struct cx24123_config {
index d77afe0b8a9e1efd2cc243efe01b09d83a37f8ba..f3ff8f6eb3bb91ab7eab2695a7374acf1ab15641 100644 (file)
@@ -22,7 +22,6 @@
 #ifndef CXD2820R_H
 #define CXD2820R_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 
 #define CXD2820R_GPIO_D (0 << 0) /* disable */
index 62ad5f07390ba6f1340fed44966bf4e0e05cfbf4..7f1acfb8f4f51a39c2a4c9c01b365ecaf2a6a946 100644 (file)
@@ -22,7 +22,6 @@
 #ifndef CXD2841ER_H
 #define CXD2841ER_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 
 enum cxd2841er_xtal {
index b37e69e6a58c7ca63e05167780de8e9cadd86738..67a6d50865fbac7ec2b2069fb94c49f4e1475d07 100644 (file)
@@ -13,8 +13,6 @@
 #ifndef DIB3000MC_H
 #define DIB3000MC_H
 
-#include <linux/kconfig.h>
-
 #include "dibx000_common.h"
 
 struct dib3000mc_config {
index 6468c278cc4d371da8828bdf0924393bba0a4251..8f84dfa9bb58cd5200d4b736f105222ecf441d7d 100644 (file)
@@ -1,8 +1,6 @@
 #ifndef DIB7000M_H
 #define DIB7000M_H
 
-#include <linux/kconfig.h>
-
 #include "dibx000_common.h"
 
 struct dib7000m_config {
index baa278928cf3405ffb390818666423dc9bdd50d9..205fbbff632bcbfe6d7e2851dc957fd901c18adb 100644 (file)
@@ -1,8 +1,6 @@
 #ifndef DIB7000P_H
 #define DIB7000P_H
 
-#include <linux/kconfig.h>
-
 #include "dibx000_common.h"
 
 struct dib7000p_config {
index a47c22d6667eb88ba41bdaa68bbbafe642ef696f..f0507cdbb5034f57d3b942675dcb57b1857a2438 100644 (file)
@@ -24,7 +24,6 @@
 #ifndef _DRXD_H_
 #define _DRXD_H_
 
-#include <linux/kconfig.h>
 #include <linux/types.h>
 #include <linux/i2c.h>
 
index 8f0b9eec528f6ff0290dc462b8ed780b14991eaa..a629897eb90511f6230cd0d2775f3b7479836d4f 100644 (file)
@@ -1,7 +1,6 @@
 #ifndef _DRXK_H_
 #define _DRXK_H_
 
-#include <linux/kconfig.h>
 #include <linux/types.h>
 #include <linux/i2c.h>
 
index 153169da90179dbadc0b71978834b38415b67543..82e8c2531f26f5590033208f947b1db0afb15b23 100644 (file)
@@ -22,7 +22,6 @@
 #ifndef DS3000_H
 #define DS3000_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 
 struct ds3000_config {
index 15e4ceab869ab32d572ce96a387fd1e051b235e6..50f1af512b626dbc3a22bb26e66a0983ca38eb81 100644 (file)
@@ -22,7 +22,6 @@
 #ifndef DVB_DUMMY_FE_H
 #define DVB_DUMMY_FE_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 #include "dvb_frontend.h"
 
index 9544bab5cd1d14d6c14100d2370131ce13f84652..e894bdcf35a3a6fa8a4d5f1423a8788dafd91c5d 100644 (file)
@@ -22,7 +22,6 @@
 #ifndef EC100_H
 #define EC100_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 
 struct ec100_config {
index 48e9ab74c8831580f15446579c346763a66bddec..a14d6f36dbf62a35e9dfb1d1d94aabcb59cd049a 100644 (file)
@@ -23,7 +23,6 @@
 #ifndef HD29L2_H
 #define HD29L2_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 
 struct hd29l2_config {
index e1b9224cfc5580ed6c3d6e1bf242c37558c5fe8d..333615491d9e8ee8ce1725e82dead3888c0cdb0f 100644 (file)
@@ -21,7 +21,6 @@
 #ifndef __DVB_HELENE_H__
 #define __DVB_HELENE_H__
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 #include <linux/i2c.h>
 
index c1e2d1834b782096ff3e35fefa3db8bb5c2bb786..672a556df71a184f1a2a21a0d302378500ac7bfe 100644 (file)
@@ -22,7 +22,6 @@
 #ifndef __DVB_HORUS3A_H__
 #define __DVB_HORUS3A_H__
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 #include <linux/i2c.h>
 
index af107a2dd3578aa61c9591e1778d3e516c028311..5eab39744b23558f4754a9a46bda197007cbc224 100644 (file)
@@ -20,7 +20,6 @@
 #ifndef DVB_IX2505V_H
 #define DVB_IX2505V_H
 
-#include <linux/kconfig.h>
 #include <linux/i2c.h>
 #include "dvb_frontend.h"
 
index d20bd909de394b0585c6b77a4996e210d73728f4..8c74ddc6b88a1a138f288d2509bef7cbf40445f2 100644 (file)
@@ -22,7 +22,6 @@
 #ifndef _LG2160_H_
 #define _LG2160_H_
 
-#include <linux/kconfig.h>
 #include <linux/i2c.h>
 #include "dvb_frontend.h"
 
index f91a1b49ce2f93d06bf7cfd75cd8563b1bcb3884..e7dceb60e5727a89934afd2fc58cb6a69dbb7e8b 100644 (file)
@@ -22,7 +22,6 @@
 #ifndef _LGDT3305_H_
 #define _LGDT3305_H_
 
-#include <linux/kconfig.h>
 #include <linux/i2c.h>
 #include "dvb_frontend.h"
 
index a5b3faf121f0f0d7e3a46b29c80f5a7def2caa96..f36a7fd0b1025515aa3bd37d41d4c2d1851f1c88 100644 (file)
@@ -23,7 +23,6 @@
 #ifndef LGS8GL5_H
 #define LGS8GL5_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 
 struct lgs8gl5_config {
index 368c9928ef7fd6cc23d54160683b7f795d52b939..7519c02103995ea818ab542bc98abc70ad586579 100644 (file)
@@ -26,7 +26,6 @@
 #ifndef __LGS8GXX_H__
 #define __LGS8GXX_H__
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 #include <linux/i2c.h>
 
index a088b8ec1e535e0578397200cd504a92d1149c4a..24431dfdce1fa7273b63b1421d3dcadc5bb05fd1 100644 (file)
@@ -23,8 +23,6 @@
 #ifndef _LNBH24_H
 #define _LNBH24_H
 
-#include <linux/kconfig.h>
-
 /* system register bits */
 #define LNBH24_OLF     0x01
 #define LNBH24_OTF     0x02
index 1f329ef05accea6601a093e33078b5104ef2cfa2..f13fd0308b3e8dd23095964f74bce8d881265ebf 100644 (file)
@@ -22,7 +22,6 @@
 #define LNBH25_H
 
 #include <linux/i2c.h>
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 
 /* 22 kHz tone enabled. Tone output controlled by DSQIN pin */
index cd9101f6e5799366c30d99d458bfabd232ab64fc..4bb6439068ecca8b77263a21f79bcd94199c6da6 100644 (file)
@@ -27,8 +27,6 @@
 #ifndef _LNBP21_H
 #define _LNBP21_H
 
-#include <linux/kconfig.h>
-
 /* system register bits */
 /* [RO] 0=OK; 1=over current limit flag */
 #define LNBP21_OLF     0x01
index 5d01d92814c21b7feb30357f0ab68ec30d869cd4..0cb72126c498d9294e33d10d12ed7bd17ae776b7 100644 (file)
@@ -28,8 +28,6 @@
 #ifndef _LNBP22_H
 #define _LNBP22_H
 
-#include <linux/kconfig.h>
-
 /* Enable */
 #define LNBP22_EN        0x10
 /* Voltage selection */
index de7430178e9e142c2c75be0e9606087f836ab0f9..1a313b0f5875db5e2b20a2cc6fa0856e54a754ca 100644 (file)
@@ -20,7 +20,6 @@
 #ifndef M88RS2000_H
 #define M88RS2000_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 #include "dvb_frontend.h"
 
index a113282d69567805d58b7d33bedd94c65c81fa5c..dfb02db2126c1061b00225c698d3305c8d3c9220 100644 (file)
@@ -16,7 +16,6 @@
 #ifndef MB86A20S_H
 #define MB86A20S_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 
 /**
index f58b9ca5557ad7c7cf36a4e26ca33b1f8af81cfe..b38557c451b9dccbd889b51e55101d7ae9c07387 100644 (file)
@@ -22,7 +22,6 @@
 #ifndef __S5H1409_H__
 #define __S5H1409_H__
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 
 struct s5h1409_config {
index f3a87f7ec3601579386835c360322059670eac6f..791bab0e16e9b9506630c48d1735c5a95d9246d3 100644 (file)
@@ -22,7 +22,6 @@
 #ifndef __S5H1411_H__
 #define __S5H1411_H__
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 
 #define S5H1411_I2C_TOP_ADDR (0x32 >> 1)
index f490c5ee58015d0015a31ecf055f2053c079b41d..b81c9bd4e422388d3f8c3b94d31c31d37d88ef38 100644 (file)
@@ -22,7 +22,6 @@
 #ifndef __S5H1432_H__
 #define __S5H1432_H__
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 
 #define S5H1432_I2C_TOP_ADDR (0x02 >> 1)
index f5b722d8081b86289268876efc7a5102585b839c..a47ed894d4aec9bcec67479a4767dcbf502f1208 100644 (file)
@@ -17,7 +17,6 @@
 #ifndef S921_H
 #define S921_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 
 struct s921_config {
index ef5f351ca68ea17c74b12a9616782e18ea172543..b1be62f1983a8a0766485d5f8275cc6be5db6c65 100644 (file)
@@ -1,7 +1,6 @@
 #ifndef SI21XX_H
 #define SI21XX_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 #include "dvb_frontend.h"
 
index 6cceea022d49ec35e2bddb8f9954f3cdd6ea412e..3901cd74b3f7d8b5329bebec63373aa89e5b21f4 100644 (file)
@@ -17,7 +17,6 @@
 #ifndef SP2_H
 #define SP2_H
 
-#include <linux/kconfig.h>
 #include "dvb_ca_en50221.h"
 
 /*
index da581b652cb93866b82dd881709d3e36f7388c28..78e75dfc317f271e92ff786c34ffa9183bae6830 100644 (file)
@@ -23,7 +23,6 @@
 #ifndef __DVB_STB6000_H__
 #define __DVB_STB6000_H__
 
-#include <linux/kconfig.h>
 #include <linux/i2c.h>
 #include "dvb_frontend.h"
 
index b58603c00c80a24b6853588d951b38c355ee5222..803acb917282334bf288818ff681fd76e4892acd 100644 (file)
@@ -27,7 +27,6 @@
 #ifndef STV0288_H
 #define STV0288_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 #include "dvb_frontend.h"
 
index 92b3e85fb818c0e22523265fe92dc08486e0eded..b88166a9716faf40c1ffefd14ffc9dbc27209c5a 100644 (file)
@@ -26,7 +26,6 @@
 #ifndef STV0367_H
 #define STV0367_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 #include "dvb_frontend.h"
 
index c90bf00ea9ce14c0d32c55a29cb23061bb7df6c6..9ca2da90c7d7605f97d5c68537a43b742be1fe36 100644 (file)
@@ -26,7 +26,6 @@
 #ifndef STV0900_H
 #define STV0900_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 #include "dvb_frontend.h"
 
index f3c8a5c6b77d1af7f664ac51b4c96522783a1690..4604f793d954d7cdaf3fad52acd85fb0ce60db49 100644 (file)
@@ -25,7 +25,6 @@
 #ifndef __DVB_STV6110_H__
 #define __DVB_STV6110_H__
 
-#include <linux/kconfig.h>
 #include <linux/i2c.h>
 #include "dvb_frontend.h"
 
index bc77a7311de1610394c8c4c856873c6f3b7c66d9..a2cebb0ccebaeb724b01b70eb79a4c108652a48d 100644 (file)
@@ -22,7 +22,6 @@
 #ifndef TDA10048_H
 #define TDA10048_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 #include <linux/firmware.h>
 
index 7ebd8eaff4eb5d2610382e6f37ec53b8ff61bee8..e6ccf240f54cc2a4a216256de64614a0b6ae11db 100644 (file)
@@ -1,8 +1,6 @@
 #ifndef _TDA18271C2DD_H_
 #define _TDA18271C2DD_H_
 
-#include <linux/kconfig.h>
-
 #if IS_REACHABLE(CONFIG_DVB_TDA18271C2DD)
 struct dvb_frontend *tda18271c2dd_attach(struct dvb_frontend *fe,
                                         struct i2c_adapter *i2c, u8 adr);
index 9220e5cf0d216942d08baea7ee447586d1c86a82..facc54f0a6af97b4f285143085d7b09aa07fc9be 100644 (file)
@@ -22,7 +22,6 @@
 #ifndef TS2020_H
 #define TS2020_H
 
-#include <linux/kconfig.h>
 #include <linux/dvb/frontend.h>
 
 struct ts2020_config {
index 670e76a654ee4f8a7556075d3539e96236a50d11..c568d8d59de30c3f8f57a09e0282054740151dd0 100644 (file)
@@ -21,7 +21,6 @@
 #ifndef DVB_ZL10036_H
 #define DVB_ZL10036_H
 
-#include <linux/kconfig.h>
 #include <linux/i2c.h>
 #include "dvb_frontend.h"
 
index 070929444e7111453403844d87d0a9b0321c762e..66e708569375781f1f7da684064563e348ce78ad 100644 (file)
@@ -22,8 +22,6 @@
 #ifndef ZL10039_H
 #define ZL10039_H
 
-#include <linux/kconfig.h>
-
 #if IS_REACHABLE(CONFIG_DVB_ZL10039)
 struct dvb_frontend *zl10039_attach(struct dvb_frontend *fe,
                                        u8 i2c_addr,
index 6c511723fd1ba5b291d28b67c33d20f7aaf82cee..57a40c84b46e82a6d37bb514ff3e6b76bca653af 100644 (file)
@@ -20,8 +20,6 @@
 #ifndef __ALTERA_CI_H
 #define __ALTERA_CI_H
 
-#include <linux/kconfig.h>
-
 #define ALT_DATA       0x000000ff
 #define ALT_TDI                0x00008000
 #define ALT_TDO                0x00004000
index 374033a5bdaf5384afd22f3415f7c5add20d25ee..ee48c3e09de41d5987612afe7efb2962804195d1 100644 (file)
@@ -750,7 +750,7 @@ static int ivtv_init_struct1(struct ivtv *itv)
        spin_lock_init(&itv->lock);
        spin_lock_init(&itv->dma_reg_lock);
 
-       init_kthread_worker(&itv->irq_worker);
+       kthread_init_worker(&itv->irq_worker);
        itv->irq_worker_task = kthread_run(kthread_worker_fn, &itv->irq_worker,
                                           "%s", itv->v4l2_dev.name);
        if (IS_ERR(itv->irq_worker_task)) {
@@ -760,7 +760,7 @@ static int ivtv_init_struct1(struct ivtv *itv)
        /* must use the FIFO scheduler as it is realtime sensitive */
        sched_setscheduler(itv->irq_worker_task, SCHED_FIFO, &param);
 
-       init_kthread_work(&itv->irq_work, ivtv_irq_work_handler);
+       kthread_init_work(&itv->irq_work, ivtv_irq_work_handler);
 
        /* Initial settings */
        itv->cxhdl.port = CX2341X_PORT_MEMORY;
@@ -1441,7 +1441,7 @@ static void ivtv_remove(struct pci_dev *pdev)
        del_timer_sync(&itv->dma_timer);
 
        /* Kill irq worker */
-       flush_kthread_worker(&itv->irq_worker);
+       kthread_flush_worker(&itv->irq_worker);
        kthread_stop(itv->irq_worker_task);
 
        ivtv_streams_cleanup(itv);
index 36ca2d67c812189ffe9f1cc3b0a85c1251e4cbf3..6efe1f71262c76459a07457b8e58867cd73b5e5f 100644 (file)
@@ -1062,7 +1062,7 @@ irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
        }
 
        if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags)) {
-               queue_kthread_work(&itv->irq_worker, &itv->irq_work);
+               kthread_queue_work(&itv->irq_worker, &itv->irq_work);
        }
 
        spin_unlock(&itv->dma_reg_lock);
index 81bb568d694338f7f967fef7b3c799b39a6122d2..438cf897acd1fc14b9e23fe6239a59d58dec6009 100644 (file)
@@ -1,7 +1,6 @@
 #ifndef LINUX_FC0011_H_
 #define LINUX_FC0011_H_
 
-#include <linux/kconfig.h>
 #include "dvb_frontend.h"
 
 
index 9ad32859bab0679264577a813d497f91ce4781cd..4a23e418daf08495d38335a9fbd49e061e78645c 100644 (file)
@@ -21,7 +21,6 @@
 #ifndef _FC0012_H_
 #define _FC0012_H_
 
-#include <linux/kconfig.h>
 #include "dvb_frontend.h"
 #include "fc001x-common.h"
 
index e130bd7a32302477d1f3a8bec41e9ff2b98aa8ad..8c34105c938342e29d97898d70943a6da945d075 100644 (file)
@@ -22,7 +22,6 @@
 #ifndef _FC0013_H_
 #define _FC0013_H_
 
-#include <linux/kconfig.h>
 #include "dvb_frontend.h"
 #include "fc001x-common.h"
 
index 5054f01a78fbba435e587af3a404ae3f136bd81f..aadd9fea59e4fffe062af7f50206be6936493c29 100644 (file)
@@ -22,8 +22,6 @@
 #ifndef __MAX2165_H__
 #define __MAX2165_H__
 
-#include <linux/kconfig.h>
-
 struct dvb_frontend;
 struct i2c_adapter;
 
index b3e614be657dbe32d8eef52d9278ca9e645f546c..6b40df33928458be61baf4ac9bb9a8a3fef6337f 100644 (file)
@@ -22,8 +22,6 @@
 #ifndef MC44S803_H
 #define MC44S803_H
 
-#include <linux/kconfig.h>
-
 struct dvb_frontend;
 struct i2c_adapter;
 
index 5764b12c5c7c6f45619763f90b0381889e8f00c7..d842734f2dcd865b169ed38e00428ae1f68dc11c 100644 (file)
@@ -23,8 +23,6 @@
 #ifndef __MXL5005S_H
 #define __MXL5005S_H
 
-#include <linux/kconfig.h>
-
 #include <linux/i2c.h>
 #include "dvb_frontend.h"
 
index b1e5661af1c7e03d7a0f9f1d850598eaa29b6e65..fdcab91405de6690a6fbf0f73fa7af76cdbb2ee5 100644 (file)
@@ -21,7 +21,6 @@
 #ifndef R820T_H
 #define R820T_H
 
-#include <linux/kconfig.h>
 #include "dvb_frontend.h"
 
 enum r820t_chip {
index 5f1a60bf7ced322299fe26701a8760e0e16dcb3a..76807f5b3cf804ff8f9aa6a1ee1f1c151c098db1 100644 (file)
@@ -17,7 +17,6 @@
 #ifndef SI2157_H
 #define SI2157_H
 
-#include <linux/kconfig.h>
 #include <media/media-device.h>
 #include "dvb_frontend.h"
 
index e58c9096d79c5e5a08a6bcdb5a644fa19490edbf..6391dafd0c9d02b48ba81df0e243f82ab61c4a75 100644 (file)
@@ -21,7 +21,6 @@
 #ifndef TDA18212_H
 #define TDA18212_H
 
-#include <linux/kconfig.h>
 #include "dvb_frontend.h"
 
 struct tda18212_config {
index 1eacb4f84e937d5adbc819b28b794c9aa3b40540..076b5f2e888d4a4b4ef5e97ef27cc82c0cdc6ea7 100644 (file)
@@ -21,7 +21,6 @@
 #ifndef TDA18218_H
 #define TDA18218_H
 
-#include <linux/kconfig.h>
 #include "dvb_frontend.h"
 
 struct tda18218_config {
index 00ba29e21fb96ffa2d0aae6874b99273e87b769b..336bd49eb09b2d4d59e64b61cfc703deec1227a0 100644 (file)
@@ -22,7 +22,6 @@
 #ifndef __XC5000_H__
 #define __XC5000_H__
 
-#include <linux/kconfig.h>
 #include <linux/firmware.h>
 
 struct dvb_frontend;
index 7065aca81252cc6275bafa2e36f6d9f013340166..e6eae9d88e9f2a36dbead11d6ee615a5ea5518bb 100644 (file)
@@ -21,7 +21,6 @@
 #ifndef __MXL111SF_DEMOD_H__
 #define __MXL111SF_DEMOD_H__
 
-#include <linux/kconfig.h>
 #include "dvb_frontend.h"
 #include "mxl111sf.h"
 
index 509b5507121849fea1c0526dd1cbc4494ec49930..e96d9a444ed1aae155a439503bf99323d0d493b4 100644 (file)
@@ -21,7 +21,6 @@
 #ifndef __MXL111SF_TUNER_H__
 #define __MXL111SF_TUNER_H__
 
-#include <linux/kconfig.h>
 #include "dvb_frontend.h"
 #include "mxl111sf.h"
 
index 4b08c2a47ae21f2f8fd99f588726f557b71092ad..18ed3bfbb5e2a95f5127b41d0e1f15cce01b1f07 100644 (file)
@@ -9,7 +9,6 @@
  * see Documentation/dvb/README.dvb-usb for more information
  */
 
-#include <linux/kconfig.h>
 #include "dibusb.h"
 
 /* Max transfer size done by I2C transfer functions */
index 6d43d75493ea0ea35570c8716e1de51aaf5d881b..474c11e1d4951fb533fc49260f6faabfa1f303d2 100644 (file)
@@ -10,7 +10,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/kconfig.h>
 #include <linux/errno.h>
 #include <linux/init.h>
 #include <linux/slab.h>
index 4f270482cfd016aaaedd7e9924ac76c291a2f25d..d46e4adf6d2baa681ba782706615daa47d60b0cf 100644 (file)
@@ -39,7 +39,6 @@
 #include <linux/gfp.h>
 #include <linux/slab.h>
 #include <linux/reboot.h>
-#include <linux/kconfig.h>
 #include <linux/leds.h>
 
 #include <linux/mtd/mtd.h>
index c1f34f04e338c960aefa0d119f7c16f13eee7d6d..fccdd49bb96407d844e11d67a454ae015194bdea 100644 (file)
@@ -30,7 +30,6 @@
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
 #include <linux/err.h>
-#include <linux/kconfig.h>
 
 #include "mtdcore.h"
 
index cc9e6bd83e0e5151b3ad4736b3bfe3d5fc2c2cc9..76fb8552c9d93a6d3e2c424ee3ad0ef55be04adf 100644 (file)
@@ -17,7 +17,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/kconfig.h>
 #include <linux/module.h>
 #include <linux/io.h>
 #include <linux/platform_device.h>
index 42e34076d2de650399a134fb21e64c119470ee64..b14f0305aa318023a530856ad03a04b962cfd539 100644 (file)
@@ -821,7 +821,7 @@ static void encx24j600_set_multicast_list(struct net_device *dev)
        }
 
        if (oldfilter != priv->rxfilter)
-               queue_kthread_work(&priv->kworker, &priv->setrx_work);
+               kthread_queue_work(&priv->kworker, &priv->setrx_work);
 }
 
 static void encx24j600_hw_tx(struct encx24j600_priv *priv)
@@ -879,7 +879,7 @@ static netdev_tx_t encx24j600_tx(struct sk_buff *skb, struct net_device *dev)
        /* Remember the skb for deferred processing */
        priv->tx_skb = skb;
 
-       queue_kthread_work(&priv->kworker, &priv->tx_work);
+       kthread_queue_work(&priv->kworker, &priv->tx_work);
 
        return NETDEV_TX_OK;
 }
@@ -1037,9 +1037,9 @@ static int encx24j600_spi_probe(struct spi_device *spi)
                goto out_free;
        }
 
-       init_kthread_worker(&priv->kworker);
-       init_kthread_work(&priv->tx_work, encx24j600_tx_proc);
-       init_kthread_work(&priv->setrx_work, encx24j600_setrx_proc);
+       kthread_init_worker(&priv->kworker);
+       kthread_init_work(&priv->tx_work, encx24j600_tx_proc);
+       kthread_init_work(&priv->setrx_work, encx24j600_setrx_proc);
 
        priv->kworker_task = kthread_run(kthread_worker_fn, &priv->kworker,
                                         "encx24j600");
index e15bf84fc6b2ad5b0bc0a80ad703caa89ee2c63f..0ac449acaf5b734052ee3d4d2b98e90475148c9a 100644 (file)
@@ -11,7 +11,6 @@
 #include <linux/highmem.h>
 #include <linux/if_vlan.h>
 #include <linux/init.h>
-#include <linux/kconfig.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
index 37ab46cdbec4b9059bd3eec7eb1e148eaa3cb3dd..d2349a1bc6baeec283c5df070cbf0d4b49f0b5e3 100644 (file)
@@ -9,7 +9,6 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/kconfig.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/platform_device.h>
index 0b37ce9f28f1d4493bef915344d855f296e29953..ca31a57dbc862de2e99f351a3e110b7426e2d94e 100644 (file)
@@ -10,7 +10,6 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/kconfig.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/platform_device.h>
index 68ef1875e8a88cdc6e7bb67a751c6421ddbf0d17..0fc99f0f257110a063f3e58722b2f0ec08da655e 100644 (file)
@@ -515,7 +515,8 @@ static int nvme_map_data(struct nvme_dev *dev, struct request *req,
                goto out;
 
        ret = BLK_MQ_RQ_QUEUE_BUSY;
-       if (!dma_map_sg(dev->dev, iod->sg, iod->nents, dma_dir))
+       if (!dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, dma_dir,
+                               DMA_ATTR_NO_WARN))
                goto out;
 
        if (!nvme_setup_prps(dev, req, size))
index 7512e98e9311055b431ce5eca775a9de508a31be..564a51abeecec3a313e94ffdc1a4e9e0900682bf 100644 (file)
@@ -31,7 +31,7 @@ config PPS_DEBUG
 
 config NTP_PPS
        bool "PPS kernel consumer support"
-       depends on !NO_HZ
+       depends on !NO_HZ_COMMON
        help
          This option adds support for direct in-kernel time
          synchronization using an external PPS signal.
index cebc296463ad17efe25fc6fa8b4bde593f1fdff3..bad0e0ea4f3059e51b6cfaffc78c859c1be3aecb 100644 (file)
@@ -1841,24 +1841,19 @@ static int cm_chan_msg_send(void __user *arg)
 {
        struct rio_cm_msg msg;
        void *buf;
-       int ret = 0;
+       int ret;
 
        if (copy_from_user(&msg, arg, sizeof(msg)))
                return -EFAULT;
        if (msg.size > RIO_MAX_MSG_SIZE)
                return -EINVAL;
 
-       buf = kmalloc(msg.size, GFP_KERNEL);
-       if (!buf)
-               return -ENOMEM;
-
-       if (copy_from_user(buf, (void __user *)(uintptr_t)msg.msg, msg.size)) {
-               ret = -EFAULT;
-               goto out;
-       }
+       buf = memdup_user((void __user *)(uintptr_t)msg.msg, msg.size);
+       if (IS_ERR(buf))
+               return PTR_ERR(buf);
 
        ret = riocm_ch_send(msg.ch_num, buf, msg.size);
-out:
+
        kfree(buf);
        return ret;
 }
index 8146ccd35a1ac890aaa021beb955b4309be0fa56..5787b723b593f79bb5e55f3b68abcb2f19d4b5cb 100644 (file)
@@ -1112,7 +1112,7 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
 
        /* If another context is idling the device then defer */
        if (master->idling) {
-               queue_kthread_work(&master->kworker, &master->pump_messages);
+               kthread_queue_work(&master->kworker, &master->pump_messages);
                spin_unlock_irqrestore(&master->queue_lock, flags);
                return;
        }
@@ -1126,7 +1126,7 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
 
                /* Only do teardown in the thread */
                if (!in_kthread) {
-                       queue_kthread_work(&master->kworker,
+                       kthread_queue_work(&master->kworker,
                                           &master->pump_messages);
                        spin_unlock_irqrestore(&master->queue_lock, flags);
                        return;
@@ -1250,7 +1250,7 @@ static int spi_init_queue(struct spi_master *master)
        master->running = false;
        master->busy = false;
 
-       init_kthread_worker(&master->kworker);
+       kthread_init_worker(&master->kworker);
        master->kworker_task = kthread_run(kthread_worker_fn,
                                           &master->kworker, "%s",
                                           dev_name(&master->dev));
@@ -1258,7 +1258,7 @@ static int spi_init_queue(struct spi_master *master)
                dev_err(&master->dev, "failed to create message pump task\n");
                return PTR_ERR(master->kworker_task);
        }
-       init_kthread_work(&master->pump_messages, spi_pump_messages);
+       kthread_init_work(&master->pump_messages, spi_pump_messages);
 
        /*
         * Master config will indicate if this controller should run the
@@ -1331,7 +1331,7 @@ void spi_finalize_current_message(struct spi_master *master)
        spin_lock_irqsave(&master->queue_lock, flags);
        master->cur_msg = NULL;
        master->cur_msg_prepared = false;
-       queue_kthread_work(&master->kworker, &master->pump_messages);
+       kthread_queue_work(&master->kworker, &master->pump_messages);
        spin_unlock_irqrestore(&master->queue_lock, flags);
 
        trace_spi_message_done(mesg);
@@ -1357,7 +1357,7 @@ static int spi_start_queue(struct spi_master *master)
        master->cur_msg = NULL;
        spin_unlock_irqrestore(&master->queue_lock, flags);
 
-       queue_kthread_work(&master->kworker, &master->pump_messages);
+       kthread_queue_work(&master->kworker, &master->pump_messages);
 
        return 0;
 }
@@ -1404,7 +1404,7 @@ static int spi_destroy_queue(struct spi_master *master)
        ret = spi_stop_queue(master);
 
        /*
-        * flush_kthread_worker will block until all work is done.
+        * kthread_flush_worker will block until all work is done.
         * If the reason that stop_queue timed out is that the work will never
         * finish, then it does no good to call flush/stop thread, so
         * return anyway.
@@ -1414,7 +1414,7 @@ static int spi_destroy_queue(struct spi_master *master)
                return ret;
        }
 
-       flush_kthread_worker(&master->kworker);
+       kthread_flush_worker(&master->kworker);
        kthread_stop(master->kworker_task);
 
        return 0;
@@ -1438,7 +1438,7 @@ static int __spi_queued_transfer(struct spi_device *spi,
 
        list_add_tail(&msg->queue, &master->queue);
        if (!master->busy && need_pump)
-               queue_kthread_work(&master->kworker, &master->pump_messages);
+               kthread_queue_work(&master->kworker, &master->pump_messages);
 
        spin_unlock_irqrestore(&master->queue_lock, flags);
        return 0;
index 5d79efc1aafe5cd106a663c62893d4f8ff5c854a..046e84d7a15890e5b624763fe03f66a441eb7740 100644 (file)
@@ -241,10 +241,7 @@ static void vvp_vmpage_error(struct inode *inode, struct page *vmpage, int ioret
                obj->vob_discard_page_warned = 0;
        } else {
                SetPageError(vmpage);
-               if (ioret == -ENOSPC)
-                       set_bit(AS_ENOSPC, &inode->i_mapping->flags);
-               else
-                       set_bit(AS_EIO, &inode->i_mapping->flags);
+               mapping_set_error(inode->i_mapping, ioret);
 
                if ((ioret == -ESHUTDOWN || ioret == -EINTR) &&
                    obj->vob_discard_page_warned == 0) {
index a9d94f7cf683d54ecdb6b5e7b4e7c1a8cfa3f241..2675792a8f5963a37b82d708b0ce87f8f070d5dd 100644 (file)
@@ -708,7 +708,7 @@ static irqreturn_t sc16is7xx_irq(int irq, void *dev_id)
 {
        struct sc16is7xx_port *s = (struct sc16is7xx_port *)dev_id;
 
-       queue_kthread_work(&s->kworker, &s->irq_work);
+       kthread_queue_work(&s->kworker, &s->irq_work);
 
        return IRQ_HANDLED;
 }
@@ -784,7 +784,7 @@ static void sc16is7xx_ier_clear(struct uart_port *port, u8 bit)
 
        one->config.flags |= SC16IS7XX_RECONF_IER;
        one->config.ier_clear |= bit;
-       queue_kthread_work(&s->kworker, &one->reg_work);
+       kthread_queue_work(&s->kworker, &one->reg_work);
 }
 
 static void sc16is7xx_stop_tx(struct uart_port *port)
@@ -802,7 +802,7 @@ static void sc16is7xx_start_tx(struct uart_port *port)
        struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
        struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
 
-       queue_kthread_work(&s->kworker, &one->tx_work);
+       kthread_queue_work(&s->kworker, &one->tx_work);
 }
 
 static unsigned int sc16is7xx_tx_empty(struct uart_port *port)
@@ -828,7 +828,7 @@ static void sc16is7xx_set_mctrl(struct uart_port *port, unsigned int mctrl)
        struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
 
        one->config.flags |= SC16IS7XX_RECONF_MD;
-       queue_kthread_work(&s->kworker, &one->reg_work);
+       kthread_queue_work(&s->kworker, &one->reg_work);
 }
 
 static void sc16is7xx_break_ctl(struct uart_port *port, int break_state)
@@ -957,7 +957,7 @@ static int sc16is7xx_config_rs485(struct uart_port *port,
 
        port->rs485 = *rs485;
        one->config.flags |= SC16IS7XX_RECONF_RS485;
-       queue_kthread_work(&s->kworker, &one->reg_work);
+       kthread_queue_work(&s->kworker, &one->reg_work);
 
        return 0;
 }
@@ -1030,7 +1030,7 @@ static void sc16is7xx_shutdown(struct uart_port *port)
 
        sc16is7xx_power(port, 0);
 
-       flush_kthread_worker(&s->kworker);
+       kthread_flush_worker(&s->kworker);
 }
 
 static const char *sc16is7xx_type(struct uart_port *port)
@@ -1176,8 +1176,8 @@ static int sc16is7xx_probe(struct device *dev,
        s->devtype = devtype;
        dev_set_drvdata(dev, s);
 
-       init_kthread_worker(&s->kworker);
-       init_kthread_work(&s->irq_work, sc16is7xx_ist);
+       kthread_init_worker(&s->kworker);
+       kthread_init_work(&s->irq_work, sc16is7xx_ist);
        s->kworker_task = kthread_run(kthread_worker_fn, &s->kworker,
                                      "sc16is7xx");
        if (IS_ERR(s->kworker_task)) {
@@ -1234,8 +1234,8 @@ static int sc16is7xx_probe(struct device *dev,
                                     SC16IS7XX_EFCR_RXDISABLE_BIT |
                                     SC16IS7XX_EFCR_TXDISABLE_BIT);
                /* Initialize kthread work structs */
-               init_kthread_work(&s->p[i].tx_work, sc16is7xx_tx_proc);
-               init_kthread_work(&s->p[i].reg_work, sc16is7xx_reg_proc);
+               kthread_init_work(&s->p[i].tx_work, sc16is7xx_tx_proc);
+               kthread_init_work(&s->p[i].reg_work, sc16is7xx_reg_proc);
                /* Register port */
                uart_add_one_port(&sc16is7xx_uart, &s->p[i].port);
 
@@ -1301,7 +1301,7 @@ static int sc16is7xx_remove(struct device *dev)
                sc16is7xx_power(&s->p[i].port, 0);
        }
 
-       flush_kthread_worker(&s->kworker);
+       kthread_flush_worker(&s->kworker);
        kthread_stop(s->kworker_task);
 
        if (!IS_ERR(s->clk))
index 12731e67d2c7a01a9a6198129b39d0d684007556..ea73afb026d888f6202fa7dd17ee4c8a9583227f 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/usb/ehci_def.h>
 #include <linux/delay.h>
 #include <linux/serial_core.h>
-#include <linux/kconfig.h>
 #include <linux/kgdb.h>
 #include <linux/kthread.h>
 #include <asm/io.h>
index f5fccb3e415246038ebc7ac89a421786452d5862..f78503203f42378569baeb82d856ba01a6bc80e3 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/errno.h>
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
-#include <linux/kconfig.h>
 #include <linux/kernel.h>
 #include <linux/list.h>
 #include <linux/module.h>
index 35af362534400c8645c0c0b9a21f8be4b6536061..d793f548dfe26aef387d13b703454abe67beb5be 100644 (file)
@@ -9,7 +9,6 @@
  */
 
 #include <linux/types.h>
-#include <linux/kconfig.h>
 #include <linux/kernel.h>
 #include <linux/pci.h>
 #include <linux/delay.h>
index 14d506efd1aaabebd33d4e08a3891e001da2b80f..f865c3f05bea5074a91a547e4682523e8aab88be 100644 (file)
@@ -398,8 +398,7 @@ no_more:
                switch (ret) {
                case -EDQUOT:
                case -ENOSPC:
-                       set_bit(AS_ENOSPC,
-                               &wb->vnode->vfs_inode.i_mapping->flags);
+                       mapping_set_error(wb->vnode->vfs_inode.i_mapping, -ENOSPC);
                        break;
                case -EROFS:
                case -EIO:
@@ -409,7 +408,7 @@ no_more:
                case -ENOMEDIUM:
                case -ENXIO:
                        afs_kill_pages(wb->vnode, true, first, last);
-                       set_bit(AS_EIO, &wb->vnode->vfs_inode.i_mapping->flags);
+                       mapping_set_error(wb->vnode->vfs_inode.i_mapping, -EIO);
                        break;
                case -EACCES:
                case -EPERM:
index a439548de785dde9497ed13b2063b696ed773d7f..a1fba4285277dde3af647d778b8aefd4c36d2261 100644 (file)
@@ -20,7 +20,8 @@
 #define AUTOFS_IOC_COUNT     32
 
 #define AUTOFS_DEV_IOCTL_IOC_FIRST     (AUTOFS_DEV_IOCTL_VERSION)
-#define AUTOFS_DEV_IOCTL_IOC_COUNT     (AUTOFS_IOC_COUNT - 11)
+#define AUTOFS_DEV_IOCTL_IOC_COUNT \
+       (AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD - AUTOFS_DEV_IOCTL_VERSION_CMD)
 
 #include <linux/kernel.h>
 #include <linux/slab.h>
@@ -33,8 +34,6 @@
 #include <asm/current.h>
 #include <linux/uaccess.h>
 
-/* #define DEBUG */
-
 #ifdef pr_fmt
 #undef pr_fmt
 #endif
@@ -111,8 +110,6 @@ struct autofs_sb_info {
        int max_proto;
        unsigned long exp_timeout;
        unsigned int type;
-       int reghost_enabled;
-       int needs_reghost;
        struct super_block *sb;
        struct mutex wq_mutex;
        struct mutex pipe_mutex;
@@ -271,4 +268,4 @@ static inline void autofs4_del_expiring(struct dentry *dentry)
        }
 }
 
-extern void autofs4_kill_sb(struct super_block *);
+void autofs4_kill_sb(struct super_block *);
index c7fcc743884374cf695f530036701610d78d79a7..fc09eb77ddf37a4ae27af402553220e4287af92e 100644 (file)
@@ -75,7 +75,7 @@ static int check_dev_ioctl_version(int cmd, struct autofs_dev_ioctl *param)
        if ((param->ver_major != AUTOFS_DEV_IOCTL_VERSION_MAJOR) ||
            (param->ver_minor > AUTOFS_DEV_IOCTL_VERSION_MINOR)) {
                pr_warn("ioctl control interface version mismatch: "
-                       "kernel(%u.%u), user(%u.%u), cmd(%d)\n",
+                       "kernel(%u.%u), user(%u.%u), cmd(0x%08x)\n",
                        AUTOFS_DEV_IOCTL_VERSION_MAJOR,
                        AUTOFS_DEV_IOCTL_VERSION_MINOR,
                        param->ver_major, param->ver_minor, cmd);
@@ -172,6 +172,17 @@ static struct autofs_sb_info *autofs_dev_ioctl_sbi(struct file *f)
        return sbi;
 }
 
+/* Return autofs dev ioctl version */
+static int autofs_dev_ioctl_version(struct file *fp,
+                                   struct autofs_sb_info *sbi,
+                                   struct autofs_dev_ioctl *param)
+{
+       /* This should have already been set. */
+       param->ver_major = AUTOFS_DEV_IOCTL_VERSION_MAJOR;
+       param->ver_minor = AUTOFS_DEV_IOCTL_VERSION_MINOR;
+       return 0;
+}
+
 /* Return autofs module protocol version */
 static int autofs_dev_ioctl_protover(struct file *fp,
                                     struct autofs_sb_info *sbi,
@@ -586,41 +597,25 @@ out:
 
 static ioctl_fn lookup_dev_ioctl(unsigned int cmd)
 {
-       static struct {
-               int cmd;
-               ioctl_fn fn;
-       } _ioctls[] = {
-               {cmd_idx(AUTOFS_DEV_IOCTL_VERSION_CMD), NULL},
-               {cmd_idx(AUTOFS_DEV_IOCTL_PROTOVER_CMD),
-                        autofs_dev_ioctl_protover},
-               {cmd_idx(AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD),
-                        autofs_dev_ioctl_protosubver},
-               {cmd_idx(AUTOFS_DEV_IOCTL_OPENMOUNT_CMD),
-                        autofs_dev_ioctl_openmount},
-               {cmd_idx(AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD),
-                        autofs_dev_ioctl_closemount},
-               {cmd_idx(AUTOFS_DEV_IOCTL_READY_CMD),
-                        autofs_dev_ioctl_ready},
-               {cmd_idx(AUTOFS_DEV_IOCTL_FAIL_CMD),
-                        autofs_dev_ioctl_fail},
-               {cmd_idx(AUTOFS_DEV_IOCTL_SETPIPEFD_CMD),
-                        autofs_dev_ioctl_setpipefd},
-               {cmd_idx(AUTOFS_DEV_IOCTL_CATATONIC_CMD),
-                        autofs_dev_ioctl_catatonic},
-               {cmd_idx(AUTOFS_DEV_IOCTL_TIMEOUT_CMD),
-                        autofs_dev_ioctl_timeout},
-               {cmd_idx(AUTOFS_DEV_IOCTL_REQUESTER_CMD),
-                        autofs_dev_ioctl_requester},
-               {cmd_idx(AUTOFS_DEV_IOCTL_EXPIRE_CMD),
-                        autofs_dev_ioctl_expire},
-               {cmd_idx(AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD),
-                        autofs_dev_ioctl_askumount},
-               {cmd_idx(AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD),
-                        autofs_dev_ioctl_ismountpoint}
+       static ioctl_fn _ioctls[] = {
+               autofs_dev_ioctl_version,
+               autofs_dev_ioctl_protover,
+               autofs_dev_ioctl_protosubver,
+               autofs_dev_ioctl_openmount,
+               autofs_dev_ioctl_closemount,
+               autofs_dev_ioctl_ready,
+               autofs_dev_ioctl_fail,
+               autofs_dev_ioctl_setpipefd,
+               autofs_dev_ioctl_catatonic,
+               autofs_dev_ioctl_timeout,
+               autofs_dev_ioctl_requester,
+               autofs_dev_ioctl_expire,
+               autofs_dev_ioctl_askumount,
+               autofs_dev_ioctl_ismountpoint,
        };
        unsigned int idx = cmd_idx(cmd);
 
-       return (idx >= ARRAY_SIZE(_ioctls)) ? NULL : _ioctls[idx].fn;
+       return (idx >= ARRAY_SIZE(_ioctls)) ? NULL : _ioctls[idx];
 }
 
 /* ioctl dispatcher */
@@ -642,7 +637,7 @@ static int _autofs_dev_ioctl(unsigned int command,
        cmd = _IOC_NR(command);
 
        if (_IOC_TYPE(command) != _IOC_TYPE(AUTOFS_DEV_IOCTL_IOC_FIRST) ||
-           cmd - cmd_first >= AUTOFS_DEV_IOCTL_IOC_COUNT) {
+           cmd - cmd_first > AUTOFS_DEV_IOCTL_IOC_COUNT) {
                return -ENOTTY;
        }
 
@@ -655,14 +650,11 @@ static int _autofs_dev_ioctl(unsigned int command,
        if (err)
                goto out;
 
-       /* The validate routine above always sets the version */
-       if (cmd == AUTOFS_DEV_IOCTL_VERSION_CMD)
-               goto done;
-
        fn = lookup_dev_ioctl(cmd);
        if (!fn) {
                pr_warn("unknown command 0x%08x\n", command);
-               return -ENOTTY;
+               err = -ENOTTY;
+               goto out;
        }
 
        fp = NULL;
@@ -671,9 +663,11 @@ static int _autofs_dev_ioctl(unsigned int command,
        /*
         * For obvious reasons the openmount can't have a file
         * descriptor yet. We don't take a reference to the
-        * file during close to allow for immediate release.
+        * file during close to allow for immediate release,
+        * and the same for retrieving ioctl version.
         */
-       if (cmd != AUTOFS_DEV_IOCTL_OPENMOUNT_CMD &&
+       if (cmd != AUTOFS_DEV_IOCTL_VERSION_CMD &&
+           cmd != AUTOFS_DEV_IOCTL_OPENMOUNT_CMD &&
            cmd != AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD) {
                fp = fget(param->ioctlfd);
                if (!fp) {
@@ -706,7 +700,6 @@ cont:
 
        if (fp)
                fput(fp);
-done:
        if (err >= 0 && copy_to_user(user, param, AUTOFS_DEV_IOCTL_SIZE))
                err = -EFAULT;
 out:
index ca9cbd6362e00144d0d3f2b59c9bbd85933e663e..438b5bf675b6a2a65651b4b06a397865d9d94af1 100644 (file)
@@ -274,6 +274,23 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent)
                goto fail_dput;
        }
 
+       /* Test versions first */
+       if (sbi->max_proto < AUTOFS_MIN_PROTO_VERSION ||
+           sbi->min_proto > AUTOFS_MAX_PROTO_VERSION) {
+               pr_err("kernel does not match daemon version "
+                      "daemon (%d, %d) kernel (%d, %d)\n",
+                      sbi->min_proto, sbi->max_proto,
+                      AUTOFS_MIN_PROTO_VERSION, AUTOFS_MAX_PROTO_VERSION);
+               goto fail_dput;
+       }
+
+       /* Establish highest kernel protocol version */
+       if (sbi->max_proto > AUTOFS_MAX_PROTO_VERSION)
+               sbi->version = AUTOFS_MAX_PROTO_VERSION;
+       else
+               sbi->version = sbi->max_proto;
+       sbi->sub_version = AUTOFS_PROTO_SUBVERSION;
+
        if (pgrp_set) {
                sbi->oz_pgrp = find_get_pid(pgrp);
                if (!sbi->oz_pgrp) {
@@ -291,29 +308,12 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent)
        root_inode->i_fop = &autofs4_root_operations;
        root_inode->i_op = &autofs4_dir_inode_operations;
 
-       /* Couldn't this be tested earlier? */
-       if (sbi->max_proto < AUTOFS_MIN_PROTO_VERSION ||
-           sbi->min_proto > AUTOFS_MAX_PROTO_VERSION) {
-               pr_err("kernel does not match daemon version "
-                      "daemon (%d, %d) kernel (%d, %d)\n",
-                      sbi->min_proto, sbi->max_proto,
-                      AUTOFS_MIN_PROTO_VERSION, AUTOFS_MAX_PROTO_VERSION);
-               goto fail_dput;
-       }
-
-       /* Establish highest kernel protocol version */
-       if (sbi->max_proto > AUTOFS_MAX_PROTO_VERSION)
-               sbi->version = AUTOFS_MAX_PROTO_VERSION;
-       else
-               sbi->version = sbi->max_proto;
-       sbi->sub_version = AUTOFS_PROTO_SUBVERSION;
-
        pr_debug("pipe fd = %d, pgrp = %u\n", pipefd, pid_nr(sbi->oz_pgrp));
        pipe = fget(pipefd);
 
        if (!pipe) {
                pr_err("could not open pipe file descriptor\n");
-               goto fail_dput;
+               goto fail_put_pid;
        }
        ret = autofs_prepare_pipe(pipe);
        if (ret < 0)
@@ -334,14 +334,14 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent)
 fail_fput:
        pr_err("pipe file descriptor does not contain proper ops\n");
        fput(pipe);
-       /* fall through */
+fail_put_pid:
+       put_pid(sbi->oz_pgrp);
 fail_dput:
        dput(root);
        goto fail_free;
 fail_ino:
-       kfree(ino);
+       autofs4_free_ino(ino);
 fail_free:
-       put_pid(sbi->oz_pgrp);
        kfree(sbi);
        s->s_fs_info = NULL;
        return ret;
@@ -368,7 +368,8 @@ struct inode *autofs4_get_inode(struct super_block *sb, umode_t mode)
                inode->i_fop = &autofs4_dir_operations;
        } else if (S_ISLNK(mode)) {
                inode->i_op = &autofs4_symlink_inode_operations;
-       }
+       } else
+               WARN_ON(1);
 
        return inode;
 }
index 623510e84c968c8e8cc2f34c23d1b897f75246b7..a11f7317487773617dd585197e7482f5e22a3045 100644 (file)
@@ -577,8 +577,6 @@ static int autofs4_dir_symlink(struct inode *dir,
        inode = autofs4_get_inode(dir->i_sb, S_IFLNK | 0555);
        if (!inode) {
                kfree(cp);
-               if (!dentry->d_fsdata)
-                       kfree(ino);
                return -ENOMEM;
        }
        inode->i_private = cp;
@@ -842,7 +840,7 @@ static inline int autofs4_ask_umount(struct vfsmount *mnt, int __user *p)
        if (may_umount(mnt))
                status = 1;
 
-       pr_debug("returning %d\n", status);
+       pr_debug("may umount %d\n", status);
 
        status = put_user(status, p);
 
index 376e4e42632416dd544a1d3ca13048bcd8f85358..05b553368bb4f1a6cd10c8adc6c07b2f92c5f3b4 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/cleancache.h>
 #include <linux/dax.h>
 #include <linux/badblocks.h>
+#include <linux/falloc.h>
 #include <asm/uaccess.h>
 #include "internal.h"
 
@@ -1775,6 +1776,81 @@ static const struct address_space_operations def_blk_aops = {
        .is_dirty_writeback = buffer_check_dirty_writeback,
 };
 
+#define        BLKDEV_FALLOC_FL_SUPPORTED                                      \
+               (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |           \
+                FALLOC_FL_ZERO_RANGE | FALLOC_FL_NO_HIDE_STALE)
+
+static long blkdev_fallocate(struct file *file, int mode, loff_t start,
+                            loff_t len)
+{
+       struct block_device *bdev = I_BDEV(bdev_file_inode(file));
+       struct request_queue *q = bdev_get_queue(bdev);
+       struct address_space *mapping;
+       loff_t end = start + len - 1;
+       loff_t isize;
+       int error;
+
+       /* Fail if we don't recognize the flags. */
+       if (mode & ~BLKDEV_FALLOC_FL_SUPPORTED)
+               return -EOPNOTSUPP;
+
+       /* Don't go off the end of the device. */
+       isize = i_size_read(bdev->bd_inode);
+       if (start >= isize)
+               return -EINVAL;
+       if (end >= isize) {
+               if (mode & FALLOC_FL_KEEP_SIZE) {
+                       len = isize - start;
+                       end = start + len - 1;
+               } else
+                       return -EINVAL;
+       }
+
+       /*
+        * Don't allow IO that isn't aligned to logical block size.
+        */
+       if ((start | len) & (bdev_logical_block_size(bdev) - 1))
+               return -EINVAL;
+
+       /* Invalidate the page cache, including dirty pages. */
+       mapping = bdev->bd_inode->i_mapping;
+       truncate_inode_pages_range(mapping, start, end);
+
+       switch (mode) {
+       case FALLOC_FL_ZERO_RANGE:
+       case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE:
+               error = blkdev_issue_zeroout(bdev, start >> 9, len >> 9,
+                                           GFP_KERNEL, false);
+               break;
+       case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE:
+               /* Only punch if the device can do zeroing discard. */
+               if (!blk_queue_discard(q) || !q->limits.discard_zeroes_data)
+                       return -EOPNOTSUPP;
+               error = blkdev_issue_discard(bdev, start >> 9, len >> 9,
+                                            GFP_KERNEL, 0);
+               break;
+       case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE | FALLOC_FL_NO_HIDE_STALE:
+               if (!blk_queue_discard(q))
+                       return -EOPNOTSUPP;
+               error = blkdev_issue_discard(bdev, start >> 9, len >> 9,
+                                            GFP_KERNEL, 0);
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+       if (error)
+               return error;
+
+       /*
+        * Invalidate again; if someone wandered in and dirtied a page,
+        * the caller will be given -EBUSY.  The third argument is
+        * inclusive, so the rounding here is safe.
+        */
+       return invalidate_inode_pages2_range(mapping,
+                                            start >> PAGE_SHIFT,
+                                            end >> PAGE_SHIFT);
+}
+
 const struct file_operations def_blk_fops = {
        .open           = blkdev_open,
        .release        = blkdev_close,
@@ -1789,6 +1865,7 @@ const struct file_operations def_blk_fops = {
 #endif
        .splice_read    = generic_file_splice_read,
        .splice_write   = iter_file_splice_write,
+       .fallocate      = blkdev_fallocate,
 };
 
 int ioctl_by_bdev(struct block_device *bdev, unsigned cmd, unsigned long arg)
index 7dad8713fac817e60d613ec55fede7ddca6f2192..b205a629001df1bc936a13aef23fcdbeba04e89d 100644 (file)
@@ -351,7 +351,7 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
                set_buffer_uptodate(bh);
        } else {
                buffer_io_error(bh, ", lost async page write");
-               set_bit(AS_EIO, &page->mapping->flags);
+               mapping_set_error(page->mapping, -EIO);
                set_buffer_write_io_error(bh);
                clear_buffer_uptodate(bh);
                SetPageError(page);
@@ -3249,7 +3249,7 @@ drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
        bh = head;
        do {
                if (buffer_write_io_error(bh) && page->mapping)
-                       set_bit(AS_EIO, &page->mapping->flags);
+                       mapping_set_error(page->mapping, -EIO);
                if (buffer_busy(bh))
                        goto failed;
                bh = bh->b_this_page;
index d42ff527ab217f456f4ec4f17e13386320f1d8fc..d8072bc074a44f74c09c47c2905eb0d1c7ac87dc 100644 (file)
@@ -778,7 +778,7 @@ try_again:
 fail:
        EXOFS_DBGMSG("Error: writepage_strip(0x%lx, 0x%lx)=>%d\n",
                     inode->i_ino, page->index, ret);
-       set_bit(AS_EIO, &page->mapping->flags);
+       mapping_set_error(page->mapping, -EIO);
        unlock_page(page);
        return ret;
 }
index b4cbee936cf8a59808abaed876730671d4715d65..0094923e5ebf56a6a65198cf5a7040078b73240a 100644 (file)
@@ -88,7 +88,7 @@ static void ext4_finish_bio(struct bio *bio)
 
                if (bio->bi_error) {
                        SetPageError(page);
-                       set_bit(AS_EIO, &page->mapping->flags);
+                       mapping_set_error(page->mapping, -EIO);
                }
                bh = head = page_buffers(page);
                /*
index 0d0177c9149c191d7fe64ebb9ff55d7ee03476e4..9ae194fd2fdb81cbc54bfed9220da527d6b21a89 100644 (file)
@@ -75,7 +75,7 @@ static void f2fs_write_end_io(struct bio *bio)
                fscrypt_pullback_bio_page(&page, true);
 
                if (unlikely(bio->bi_error)) {
-                       set_bit(AS_EIO, &page->mapping->flags);
+                       mapping_set_error(page->mapping, -EIO);
                        f2fs_stop_checkpoint(sbi, true);
                }
                end_page_writeback(page);
index 5bb565f9989ccfde3ba9726e1d39285e5a408d60..31f8ca0466392eef588a2e61fbc5b34b132c5345 100644 (file)
@@ -269,8 +269,7 @@ static int journal_finish_inode_data_buffers(journal_t *journal,
                         * filemap_fdatawait_range(), set it again so
                         * that user process can get -EIO from fsync().
                         */
-                       set_bit(AS_EIO,
-                               &jinode->i_vfs_inode->i_mapping->flags);
+                       mapping_set_error(jinode->i_vfs_inode->i_mapping, -EIO);
 
                        if (!ret)
                                ret = err;
index 2257a13110275272b346eda6c734942ad806b265..184a15edd18d7426c5ac77903d283790d1462a50 100644 (file)
@@ -6,8 +6,6 @@
 #ifndef _LOCKD_PROCFS_H
 #define _LOCKD_PROCFS_H
 
-#include <linux/kconfig.h>
-
 #if IS_ENABLED(CONFIG_PROC_FS)
 int lockd_create_procfs(void);
 void lockd_remove_procfs(void);
index 6ea06f8a7d295acd45e24f36a8fd50046897d78a..3f828a187049909070c0f3934f4b10a5a6ee5d3e 100644 (file)
@@ -3188,6 +3188,9 @@ int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
                                    migrate->new_master,
                                    migrate->master);
 
+       if (ret < 0)
+               kmem_cache_free(dlm_mle_cache, mle);
+
        spin_unlock(&dlm->master_lock);
 unlock:
        spin_unlock(&dlm->spinlock);
index 8aeb08bb278bd92978a823ec63a2c9593bfe7460..a7719cfb7257e9b19b2ff50efeb5366f98f4d24b 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -300,7 +300,8 @@ int vfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
         * Let individual file system decide if it supports preallocation
         * for directories or not.
         */
-       if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
+       if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode) &&
+           !S_ISBLK(inode->i_mode))
                return -ENODEV;
 
        /* Check for wrap through zero too */
index 1f559f0608e1bb1f186446b4bbe27f9bb3439e9d..8e0d9f26dfadc4b5849a9f46a933a89ea2aa49ca 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -601,54 +601,63 @@ pipe_fasync(int fd, struct file *filp, int on)
        return retval;
 }
 
-static void account_pipe_buffers(struct pipe_inode_info *pipe,
+static unsigned long account_pipe_buffers(struct user_struct *user,
                                  unsigned long old, unsigned long new)
 {
-       atomic_long_add(new - old, &pipe->user->pipe_bufs);
+       return atomic_long_add_return(new - old, &user->pipe_bufs);
 }
 
-static bool too_many_pipe_buffers_soft(struct user_struct *user)
+static bool too_many_pipe_buffers_soft(unsigned long user_bufs)
 {
-       return pipe_user_pages_soft &&
-              atomic_long_read(&user->pipe_bufs) >= pipe_user_pages_soft;
+       return pipe_user_pages_soft && user_bufs >= pipe_user_pages_soft;
 }
 
-static bool too_many_pipe_buffers_hard(struct user_struct *user)
+static bool too_many_pipe_buffers_hard(unsigned long user_bufs)
 {
-       return pipe_user_pages_hard &&
-              atomic_long_read(&user->pipe_bufs) >= pipe_user_pages_hard;
+       return pipe_user_pages_hard && user_bufs >= pipe_user_pages_hard;
 }
 
 struct pipe_inode_info *alloc_pipe_info(void)
 {
        struct pipe_inode_info *pipe;
+       unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
+       struct user_struct *user = get_current_user();
+       unsigned long user_bufs;
 
        pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT);
-       if (pipe) {
-               unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
-               struct user_struct *user = get_current_user();
-
-               if (!too_many_pipe_buffers_hard(user)) {
-                       if (too_many_pipe_buffers_soft(user))
-                               pipe_bufs = 1;
-                       pipe->bufs = kcalloc(pipe_bufs,
-                                            sizeof(struct pipe_buffer),
-                                            GFP_KERNEL_ACCOUNT);
-               }
+       if (pipe == NULL)
+               goto out_free_uid;
 
-               if (pipe->bufs) {
-                       init_waitqueue_head(&pipe->wait);
-                       pipe->r_counter = pipe->w_counter = 1;
-                       pipe->buffers = pipe_bufs;
-                       pipe->user = user;
-                       account_pipe_buffers(pipe, 0, pipe_bufs);
-                       mutex_init(&pipe->mutex);
-                       return pipe;
-               }
-               free_uid(user);
-               kfree(pipe);
+       if (pipe_bufs * PAGE_SIZE > pipe_max_size && !capable(CAP_SYS_RESOURCE))
+               pipe_bufs = pipe_max_size >> PAGE_SHIFT;
+
+       user_bufs = account_pipe_buffers(user, 0, pipe_bufs);
+
+       if (too_many_pipe_buffers_soft(user_bufs)) {
+               user_bufs = account_pipe_buffers(user, pipe_bufs, 1);
+               pipe_bufs = 1;
+       }
+
+       if (too_many_pipe_buffers_hard(user_bufs))
+               goto out_revert_acct;
+
+       pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer),
+                            GFP_KERNEL_ACCOUNT);
+
+       if (pipe->bufs) {
+               init_waitqueue_head(&pipe->wait);
+               pipe->r_counter = pipe->w_counter = 1;
+               pipe->buffers = pipe_bufs;
+               pipe->user = user;
+               mutex_init(&pipe->mutex);
+               return pipe;
        }
 
+out_revert_acct:
+       (void) account_pipe_buffers(user, pipe_bufs, 0);
+       kfree(pipe);
+out_free_uid:
+       free_uid(user);
        return NULL;
 }
 
@@ -656,7 +665,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
 {
        int i;
 
-       account_pipe_buffers(pipe, pipe->buffers, 0);
+       (void) account_pipe_buffers(pipe->user, pipe->buffers, 0);
        free_uid(pipe->user);
        for (i = 0; i < pipe->buffers; i++) {
                struct pipe_buffer *buf = pipe->bufs + i;
@@ -1007,13 +1016,55 @@ const struct file_operations pipefifo_fops = {
        .fasync         = pipe_fasync,
 };
 
+/*
+ * Currently we rely on the pipe array holding a power-of-2 number
+ * of pages.
+ */
+static inline unsigned int round_pipe_size(unsigned int size)
+{
+       unsigned long nr_pages;
+
+       nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+       return roundup_pow_of_two(nr_pages) << PAGE_SHIFT;
+}
+
 /*
  * Allocate a new array of pipe buffers and copy the info over. Returns the
  * pipe size if successful, or return -ERROR on error.
  */
-static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
+static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
 {
        struct pipe_buffer *bufs;
+       unsigned int size, nr_pages;
+       unsigned long user_bufs;
+       long ret = 0;
+
+       size = round_pipe_size(arg);
+       nr_pages = size >> PAGE_SHIFT;
+
+       if (!nr_pages)
+               return -EINVAL;
+
+       /*
+        * If trying to increase the pipe capacity, check that an
+        * unprivileged user is not trying to exceed various limits
+        * (soft limit check here, hard limit check just below).
+        * Decreasing the pipe capacity is always permitted, even
+        * if the user is currently over a limit.
+        */
+       if (nr_pages > pipe->buffers &&
+                       size > pipe_max_size && !capable(CAP_SYS_RESOURCE))
+               return -EPERM;
+
+       user_bufs = account_pipe_buffers(pipe->user, pipe->buffers, nr_pages);
+
+       if (nr_pages > pipe->buffers &&
+                       (too_many_pipe_buffers_hard(user_bufs) ||
+                        too_many_pipe_buffers_soft(user_bufs)) &&
+                       !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
+               ret = -EPERM;
+               goto out_revert_acct;
+       }
 
        /*
         * We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't
@@ -1021,13 +1072,17 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
         * again like we would do for growing. If the pipe currently
         * contains more buffers than arg, then return busy.
         */
-       if (nr_pages < pipe->nrbufs)
-               return -EBUSY;
+       if (nr_pages < pipe->nrbufs) {
+               ret = -EBUSY;
+               goto out_revert_acct;
+       }
 
        bufs = kcalloc(nr_pages, sizeof(*bufs),
                       GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
-       if (unlikely(!bufs))
-               return -ENOMEM;
+       if (unlikely(!bufs)) {
+               ret = -ENOMEM;
+               goto out_revert_acct;
+       }
 
        /*
         * The pipe array wraps around, so just start the new one at zero
@@ -1050,24 +1105,15 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
                        memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer));
        }
 
-       account_pipe_buffers(pipe, pipe->buffers, nr_pages);
        pipe->curbuf = 0;
        kfree(pipe->bufs);
        pipe->bufs = bufs;
        pipe->buffers = nr_pages;
        return nr_pages * PAGE_SIZE;
-}
-
-/*
- * Currently we rely on the pipe array holding a power-of-2 number
- * of pages.
- */
-static inline unsigned int round_pipe_size(unsigned int size)
-{
-       unsigned long nr_pages;
 
-       nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
-       return roundup_pow_of_two(nr_pages) << PAGE_SHIFT;
+out_revert_acct:
+       (void) account_pipe_buffers(pipe->user, nr_pages, pipe->buffers);
+       return ret;
 }
 
 /*
@@ -1109,28 +1155,9 @@ long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
        __pipe_lock(pipe);
 
        switch (cmd) {
-       case F_SETPIPE_SZ: {
-               unsigned int size, nr_pages;
-
-               size = round_pipe_size(arg);
-               nr_pages = size >> PAGE_SHIFT;
-
-               ret = -EINVAL;
-               if (!nr_pages)
-                       goto out;
-
-               if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) {
-                       ret = -EPERM;
-                       goto out;
-               } else if ((too_many_pipe_buffers_hard(pipe->user) ||
-                           too_many_pipe_buffers_soft(pipe->user)) &&
-                          !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
-                       ret = -EPERM;
-                       goto out;
-               }
-               ret = pipe_set_size(pipe, nr_pages);
+       case F_SETPIPE_SZ:
+               ret = pipe_set_size(pipe, arg);
                break;
-               }
        case F_GETPIPE_SZ:
                ret = pipe->buffers * PAGE_SIZE;
                break;
@@ -1139,7 +1166,6 @@ long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
                break;
        }
 
-out:
        __pipe_unlock(pipe);
        return ret;
 }
index 8ed9da50896a156a463ae4d0c657e566809b09a3..3d4f85defeab043998a6b6b4e03264386481e601 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/sched/rt.h>
 #include <linux/freezer.h>
 #include <net/busy_poll.h>
+#include <linux/vmalloc.h>
 
 #include <asm/uaccess.h>
 
@@ -554,7 +555,7 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
        fd_set_bits fds;
        void *bits;
        int ret, max_fds;
-       unsigned int size;
+       size_t size, alloc_size;
        struct fdtable *fdt;
        /* Allocate small arguments on the stack to save memory and be faster */
        long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
@@ -581,7 +582,14 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
        if (size > sizeof(stack_fds) / 6) {
                /* Not enough space in on-stack array; must use kmalloc */
                ret = -ENOMEM;
-               bits = kmalloc(6 * size, GFP_KERNEL);
+               if (size > (SIZE_MAX / 6))
+                       goto out_nofds;
+
+               alloc_size = 6 * size;
+               bits = kmalloc(alloc_size, GFP_KERNEL|__GFP_NOWARN);
+               if (!bits && alloc_size > PAGE_SIZE)
+                       bits = vmalloc(alloc_size);
+
                if (!bits)
                        goto out_nofds;
        }
@@ -618,7 +626,7 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
 
 out:
        if (bits != stack_fds)
-               kfree(bits);
+               kvfree(bits);
 out_nofds:
        return ret;
 }
index 7caaf298f5399c90616838ccaf1f904066cb29e4..28c15050ebe606e3c6105ee994264de759082e49 100644 (file)
 #ifndef _LINUX_AUTO_DEV_IOCTL_H
 #define _LINUX_AUTO_DEV_IOCTL_H
 
-#include <linux/auto_fs.h>
-#include <linux/string.h>
-
-#define AUTOFS_DEVICE_NAME             "autofs"
-
-#define AUTOFS_DEV_IOCTL_VERSION_MAJOR 1
-#define AUTOFS_DEV_IOCTL_VERSION_MINOR 0
-
-#define AUTOFS_DEVID_LEN               16
-
-#define AUTOFS_DEV_IOCTL_SIZE          sizeof(struct autofs_dev_ioctl)
-
-/*
- * An ioctl interface for autofs mount point control.
- */
-
-struct args_protover {
-       __u32   version;
-};
-
-struct args_protosubver {
-       __u32   sub_version;
-};
-
-struct args_openmount {
-       __u32   devid;
-};
-
-struct args_ready {
-       __u32   token;
-};
-
-struct args_fail {
-       __u32   token;
-       __s32   status;
-};
-
-struct args_setpipefd {
-       __s32   pipefd;
-};
-
-struct args_timeout {
-       __u64   timeout;
-};
-
-struct args_requester {
-       __u32   uid;
-       __u32   gid;
-};
-
-struct args_expire {
-       __u32   how;
-};
-
-struct args_askumount {
-       __u32   may_umount;
-};
-
-struct args_ismountpoint {
-       union {
-               struct args_in {
-                       __u32   type;
-               } in;
-               struct args_out {
-                       __u32   devid;
-                       __u32   magic;
-               } out;
-       };
-};
-
-/*
- * All the ioctls use this structure.
- * When sending a path size must account for the total length
- * of the chunk of memory otherwise is is the size of the
- * structure.
- */
-
-struct autofs_dev_ioctl {
-       __u32 ver_major;
-       __u32 ver_minor;
-       __u32 size;             /* total size of data passed in
-                                * including this struct */
-       __s32 ioctlfd;          /* automount command fd */
-
-       /* Command parameters */
-
-       union {
-               struct args_protover            protover;
-               struct args_protosubver         protosubver;
-               struct args_openmount           openmount;
-               struct args_ready               ready;
-               struct args_fail                fail;
-               struct args_setpipefd           setpipefd;
-               struct args_timeout             timeout;
-               struct args_requester           requester;
-               struct args_expire              expire;
-               struct args_askumount           askumount;
-               struct args_ismountpoint        ismountpoint;
-       };
-
-       char path[0];
-};
-
-static inline void init_autofs_dev_ioctl(struct autofs_dev_ioctl *in)
-{
-       memset(in, 0, sizeof(struct autofs_dev_ioctl));
-       in->ver_major = AUTOFS_DEV_IOCTL_VERSION_MAJOR;
-       in->ver_minor = AUTOFS_DEV_IOCTL_VERSION_MINOR;
-       in->size = sizeof(struct autofs_dev_ioctl);
-       in->ioctlfd = -1;
-}
-
-/*
- * If you change this make sure you make the corresponding change
- * to autofs-dev-ioctl.c:lookup_ioctl()
- */
-enum {
-       /* Get various version info */
-       AUTOFS_DEV_IOCTL_VERSION_CMD = 0x71,
-       AUTOFS_DEV_IOCTL_PROTOVER_CMD,
-       AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD,
-
-       /* Open mount ioctl fd */
-       AUTOFS_DEV_IOCTL_OPENMOUNT_CMD,
-
-       /* Close mount ioctl fd */
-       AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD,
-
-       /* Mount/expire status returns */
-       AUTOFS_DEV_IOCTL_READY_CMD,
-       AUTOFS_DEV_IOCTL_FAIL_CMD,
-
-       /* Activate/deactivate autofs mount */
-       AUTOFS_DEV_IOCTL_SETPIPEFD_CMD,
-       AUTOFS_DEV_IOCTL_CATATONIC_CMD,
-
-       /* Expiry timeout */
-       AUTOFS_DEV_IOCTL_TIMEOUT_CMD,
-
-       /* Get mount last requesting uid and gid */
-       AUTOFS_DEV_IOCTL_REQUESTER_CMD,
-
-       /* Check for eligible expire candidates */
-       AUTOFS_DEV_IOCTL_EXPIRE_CMD,
-
-       /* Request busy status */
-       AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD,
-
-       /* Check if path is a mountpoint */
-       AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD,
-};
-
-#define AUTOFS_IOCTL 0x93
-
-#define AUTOFS_DEV_IOCTL_VERSION \
-       _IOWR(AUTOFS_IOCTL, \
-             AUTOFS_DEV_IOCTL_VERSION_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_PROTOVER \
-       _IOWR(AUTOFS_IOCTL, \
-             AUTOFS_DEV_IOCTL_PROTOVER_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_PROTOSUBVER \
-       _IOWR(AUTOFS_IOCTL, \
-             AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_OPENMOUNT \
-       _IOWR(AUTOFS_IOCTL, \
-             AUTOFS_DEV_IOCTL_OPENMOUNT_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_CLOSEMOUNT \
-       _IOWR(AUTOFS_IOCTL, \
-             AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_READY \
-       _IOWR(AUTOFS_IOCTL, \
-             AUTOFS_DEV_IOCTL_READY_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_FAIL \
-       _IOWR(AUTOFS_IOCTL, \
-             AUTOFS_DEV_IOCTL_FAIL_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_SETPIPEFD \
-       _IOWR(AUTOFS_IOCTL, \
-             AUTOFS_DEV_IOCTL_SETPIPEFD_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_CATATONIC \
-       _IOWR(AUTOFS_IOCTL, \
-             AUTOFS_DEV_IOCTL_CATATONIC_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_TIMEOUT \
-       _IOWR(AUTOFS_IOCTL, \
-             AUTOFS_DEV_IOCTL_TIMEOUT_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_REQUESTER \
-       _IOWR(AUTOFS_IOCTL, \
-             AUTOFS_DEV_IOCTL_REQUESTER_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_EXPIRE \
-       _IOWR(AUTOFS_IOCTL, \
-             AUTOFS_DEV_IOCTL_EXPIRE_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_ASKUMOUNT \
-       _IOWR(AUTOFS_IOCTL, \
-             AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD, struct autofs_dev_ioctl)
-
-#define AUTOFS_DEV_IOCTL_ISMOUNTPOINT \
-       _IOWR(AUTOFS_IOCTL, \
-             AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD, struct autofs_dev_ioctl)
-
+#include <uapi/linux/auto_dev-ioctl.h>
 #endif /* _LINUX_AUTO_DEV_IOCTL_H */
index b4066bb890836c88be9b41f30de83f3d809114db..b8f814c95cf5108df246902b86e4d06072c1c70e 100644 (file)
@@ -10,7 +10,6 @@
 #define _LINUX_AUTO_FS_H
 
 #include <linux/fs.h>
-#include <linux/limits.h>
 #include <linux/ioctl.h>
 #include <uapi/linux/auto_fs.h>
 #endif /* _LINUX_AUTO_FS_H */
index 653589e3e30e8b03dd1844011bae7ca19e41136b..f13e4ff6835aed2e746792340590995a53f010b8 100644 (file)
@@ -22,7 +22,10 @@ extern const unsigned char _ctype[];
 #define isalnum(c)     ((__ismask(c)&(_U|_L|_D)) != 0)
 #define isalpha(c)     ((__ismask(c)&(_U|_L)) != 0)
 #define iscntrl(c)     ((__ismask(c)&(_C)) != 0)
-#define isdigit(c)     ((__ismask(c)&(_D)) != 0)
+static inline int isdigit(int c)
+{
+       return '0' <= c && c <= '9';
+}
 #define isgraph(c)     ((__ismask(c)&(_P|_U|_L|_D)) != 0)
 #define islower(c)     ((__ismask(c)&(_L)) != 0)
 #define isprint(c)     ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0)
index 0f90eb5e3c6b95e54add6bde8c91bbe4ae70b73e..08528afdf58b31ee0e0d584161f4a685da69b174 100644 (file)
  * that gives better TLB efficiency.
  */
 #define DMA_ATTR_ALLOC_SINGLE_PAGES    (1UL << 7)
+/*
+ * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
+ * allocation failure reports (similarly to __GFP_NOWARN).
+ */
+#define DMA_ATTR_NO_WARN       (1UL << 8)
 
 /*
  * A dma_addr_t can hold any valid DMA or bus address for the platform.
index c565f87f005e9321d8198ba2d353f3447bcc5d84..d7df4922da1d08843253086b144199fd299fd1c5 100644 (file)
@@ -78,7 +78,6 @@ extern struct module __this_module;
 
 #elif defined(CONFIG_TRIM_UNUSED_KSYMS)
 
-#include <linux/kconfig.h>
 #include <generated/autoksyms.h>
 
 #define __EXPORT_SYMBOL(sym, sec)                              \
index c145219286a8bfff3ab7f3f6171fc519aed111a5..bc65d5918140d3ab1d077eb5ed8f4655a8586c25 100644 (file)
@@ -440,8 +440,9 @@ struct address_space {
        unsigned long           nrexceptional;
        pgoff_t                 writeback_index;/* writeback starts here */
        const struct address_space_operations *a_ops;   /* methods */
-       unsigned long           flags;          /* error bits/gfp mask */
+       unsigned long           flags;          /* error bits */
        spinlock_t              private_lock;   /* for use by the address_space */
+       gfp_t                   gfp_mask;       /* implicit gfp mask for allocations */
        struct list_head        private_list;   /* ditto */
        void                    *private_data;  /* ditto */
 } __attribute__((aligned(sizeof(long))));
index 1f0be7213e6dfd6a03da165da1e32d93eab1e7df..24e2cc56beb108bb5364390d5b1f9a148982d295 100644 (file)
@@ -8,7 +8,6 @@
 #include <linux/irqdomain.h>
 #include <linux/lockdep.h>
 #include <linux/pinctrl/pinctrl.h>
-#include <linux/kconfig.h>
 
 struct gpio_desc;
 struct of_phandle_args;
index d7437777baaafe382458df9115b005f764a064f5..406c33dcae137a62af7e7d78edf11938270a45c9 100644 (file)
@@ -259,6 +259,12 @@ phys_addr_t paddr_vmcoreinfo_note(void);
        vmcoreinfo_append_str("NUMBER(%s)=%ld\n", #name, (long)name)
 #define VMCOREINFO_CONFIG(name) \
        vmcoreinfo_append_str("CONFIG_%s=y\n", #name)
+#define VMCOREINFO_PAGE_OFFSET(value) \
+       vmcoreinfo_append_str("PAGE_OFFSET=%lx\n", (unsigned long)value)
+#define VMCOREINFO_VMALLOC_START(value) \
+       vmcoreinfo_append_str("VMALLOC_START=%lx\n", (unsigned long)value)
+#define VMCOREINFO_VMEMMAP_START(value) \
+       vmcoreinfo_append_str("VMEMMAP_START=%lx\n", (unsigned long)value)
 
 extern struct kimage *kexec_image;
 extern struct kimage *kexec_crash_image;
index 4894c6888bc6cfb95e50d43366784769faf9b281..1c2a328296200ac391ed5385d2427ffa55d225a5 100644 (file)
@@ -38,6 +38,11 @@ extern void kmemleak_not_leak(const void *ptr) __ref;
 extern void kmemleak_ignore(const void *ptr) __ref;
 extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref;
 extern void kmemleak_no_scan(const void *ptr) __ref;
+extern void kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
+                               gfp_t gfp) __ref;
+extern void kmemleak_free_part_phys(phys_addr_t phys, size_t size) __ref;
+extern void kmemleak_not_leak_phys(phys_addr_t phys) __ref;
+extern void kmemleak_ignore_phys(phys_addr_t phys) __ref;
 
 static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
                                            int min_count, unsigned long flags,
@@ -106,6 +111,19 @@ static inline void kmemleak_erase(void **ptr)
 static inline void kmemleak_no_scan(const void *ptr)
 {
 }
+static inline void kmemleak_alloc_phys(phys_addr_t phys, size_t size,
+                                      int min_count, gfp_t gfp)
+{
+}
+static inline void kmemleak_free_part_phys(phys_addr_t phys, size_t size)
+{
+}
+static inline void kmemleak_not_leak_phys(phys_addr_t phys)
+{
+}
+static inline void kmemleak_ignore_phys(phys_addr_t phys)
+{
+}
 
 #endif /* CONFIG_DEBUG_KMEMLEAK */
 
index e691b6a23f72230bf50652b0ebf40a3fe5ef54c9..a6e82a69c363bb3d7af21e358aaef84bc2f265aa 100644 (file)
@@ -10,6 +10,17 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
                                           int node,
                                           const char namefmt[], ...);
 
+/**
+ * kthread_create - create a kthread on the current node
+ * @threadfn: the function to run in the thread
+ * @data: data pointer for @threadfn()
+ * @namefmt: printf-style format string for the thread name
+ * @...: arguments for @namefmt.
+ *
+ * This macro will create a kthread on the current node, leaving it in
+ * the stopped state.  This is just a helper for kthread_create_on_node();
+ * see the documentation there for more details.
+ */
 #define kthread_create(threadfn, data, namefmt, arg...) \
        kthread_create_on_node(threadfn, data, NUMA_NO_NODE, namefmt, ##arg)
 
@@ -44,7 +55,7 @@ bool kthread_should_stop(void);
 bool kthread_should_park(void);
 bool kthread_freezable_should_stop(bool *was_frozen);
 void *kthread_data(struct task_struct *k);
-void *probe_kthread_data(struct task_struct *k);
+void *kthread_probe_data(struct task_struct *k);
 int kthread_park(struct task_struct *k);
 void kthread_unpark(struct task_struct *k);
 void kthread_parkme(void);
@@ -57,16 +68,23 @@ extern int tsk_fork_get_node(struct task_struct *tsk);
  * Simple work processor based on kthread.
  *
  * This provides easier way to make use of kthreads.  A kthread_work
- * can be queued and flushed using queue/flush_kthread_work()
+ * can be queued and flushed using queue/kthread_flush_work()
  * respectively.  Queued kthread_works are processed by a kthread
  * running kthread_worker_fn().
  */
 struct kthread_work;
 typedef void (*kthread_work_func_t)(struct kthread_work *work);
+void kthread_delayed_work_timer_fn(unsigned long __data);
+
+enum {
+       KTW_FREEZABLE           = 1 << 0,       /* freeze during suspend */
+};
 
 struct kthread_worker {
+       unsigned int            flags;
        spinlock_t              lock;
        struct list_head        work_list;
+       struct list_head        delayed_work_list;
        struct task_struct      *task;
        struct kthread_work     *current_work;
 };
@@ -75,11 +93,19 @@ struct kthread_work {
        struct list_head        node;
        kthread_work_func_t     func;
        struct kthread_worker   *worker;
+       /* Number of canceling calls that are running at the moment. */
+       int                     canceling;
+};
+
+struct kthread_delayed_work {
+       struct kthread_work work;
+       struct timer_list timer;
 };
 
 #define KTHREAD_WORKER_INIT(worker)    {                               \
        .lock = __SPIN_LOCK_UNLOCKED((worker).lock),                    \
        .work_list = LIST_HEAD_INIT((worker).work_list),                \
+       .delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\
        }
 
 #define KTHREAD_WORK_INIT(work, fn)    {                               \
@@ -87,46 +113,88 @@ struct kthread_work {
        .func = (fn),                                                   \
        }
 
+#define KTHREAD_DELAYED_WORK_INIT(dwork, fn) {                         \
+       .work = KTHREAD_WORK_INIT((dwork).work, (fn)),                  \
+       .timer = __TIMER_INITIALIZER(kthread_delayed_work_timer_fn,     \
+                                    0, (unsigned long)&(dwork),        \
+                                    TIMER_IRQSAFE),                    \
+       }
+
 #define DEFINE_KTHREAD_WORKER(worker)                                  \
        struct kthread_worker worker = KTHREAD_WORKER_INIT(worker)
 
 #define DEFINE_KTHREAD_WORK(work, fn)                                  \
        struct kthread_work work = KTHREAD_WORK_INIT(work, fn)
 
+#define DEFINE_KTHREAD_DELAYED_WORK(dwork, fn)                         \
+       struct kthread_delayed_work dwork =                             \
+               KTHREAD_DELAYED_WORK_INIT(dwork, fn)
+
 /*
  * kthread_worker.lock needs its own lockdep class key when defined on
  * stack with lockdep enabled.  Use the following macros in such cases.
  */
 #ifdef CONFIG_LOCKDEP
 # define KTHREAD_WORKER_INIT_ONSTACK(worker)                           \
-       ({ init_kthread_worker(&worker); worker; })
+       ({ kthread_init_worker(&worker); worker; })
 # define DEFINE_KTHREAD_WORKER_ONSTACK(worker)                         \
        struct kthread_worker worker = KTHREAD_WORKER_INIT_ONSTACK(worker)
 #else
 # define DEFINE_KTHREAD_WORKER_ONSTACK(worker) DEFINE_KTHREAD_WORKER(worker)
 #endif
 
-extern void __init_kthread_worker(struct kthread_worker *worker,
+extern void __kthread_init_worker(struct kthread_worker *worker,
                        const char *name, struct lock_class_key *key);
 
-#define init_kthread_worker(worker)                                    \
+#define kthread_init_worker(worker)                                    \
        do {                                                            \
                static struct lock_class_key __key;                     \
-               __init_kthread_worker((worker), "("#worker")->lock", &__key); \
+               __kthread_init_worker((worker), "("#worker")->lock", &__key); \
        } while (0)
 
-#define init_kthread_work(work, fn)                                    \
+#define kthread_init_work(work, fn)                                    \
        do {                                                            \
                memset((work), 0, sizeof(struct kthread_work));         \
                INIT_LIST_HEAD(&(work)->node);                          \
                (work)->func = (fn);                                    \
        } while (0)
 
+#define kthread_init_delayed_work(dwork, fn)                           \
+       do {                                                            \
+               kthread_init_work(&(dwork)->work, (fn));                \
+               __setup_timer(&(dwork)->timer,                          \
+                             kthread_delayed_work_timer_fn,            \
+                             (unsigned long)(dwork),                   \
+                             TIMER_IRQSAFE);                           \
+       } while (0)
+
 int kthread_worker_fn(void *worker_ptr);
 
-bool queue_kthread_work(struct kthread_worker *worker,
+__printf(2, 3)
+struct kthread_worker *
+kthread_create_worker(unsigned int flags, const char namefmt[], ...);
+
+struct kthread_worker *
+kthread_create_worker_on_cpu(int cpu, unsigned int flags,
+                            const char namefmt[], ...);
+
+bool kthread_queue_work(struct kthread_worker *worker,
                        struct kthread_work *work);
-void flush_kthread_work(struct kthread_work *work);
-void flush_kthread_worker(struct kthread_worker *worker);
+
+bool kthread_queue_delayed_work(struct kthread_worker *worker,
+                               struct kthread_delayed_work *dwork,
+                               unsigned long delay);
+
+bool kthread_mod_delayed_work(struct kthread_worker *worker,
+                             struct kthread_delayed_work *dwork,
+                             unsigned long delay);
+
+void kthread_flush_work(struct kthread_work *work);
+void kthread_flush_worker(struct kthread_worker *worker);
+
+bool kthread_cancel_work_sync(struct kthread_work *work);
+bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *work);
+
+void kthread_destroy_worker(struct kthread_worker *worker);
 
 #endif /* _LINUX_KTHREAD_H */
index 747f401cc312cbd0c1e8a749db99e311c6f4cb05..dd15d39e1985908614271dcb8e8712c4fffa4f63 100644 (file)
 #include <linux/hugetlb_inline.h>
 
 /*
- * Bits in mapping->flags.  The lower __GFP_BITS_SHIFT bits are the page
- * allocation mode flags.
+ * Bits in mapping->flags.
  */
 enum mapping_flags {
-       AS_EIO          = __GFP_BITS_SHIFT + 0, /* IO error on async write */
-       AS_ENOSPC       = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */
-       AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */
-       AS_UNEVICTABLE  = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
-       AS_EXITING      = __GFP_BITS_SHIFT + 4, /* final truncate in progress */
+       AS_EIO          = 0,    /* IO error on async write */
+       AS_ENOSPC       = 1,    /* ENOSPC on async write */
+       AS_MM_ALL_LOCKS = 2,    /* under mm_take_all_locks() */
+       AS_UNEVICTABLE  = 3,    /* e.g., ramdisk, SHM_LOCK */
+       AS_EXITING      = 4,    /* final truncate in progress */
        /* writeback related tags are not used */
-       AS_NO_WRITEBACK_TAGS = __GFP_BITS_SHIFT + 5,
+       AS_NO_WRITEBACK_TAGS = 5,
 };
 
 static inline void mapping_set_error(struct address_space *mapping, int error)
@@ -78,7 +77,7 @@ static inline int mapping_use_writeback_tags(struct address_space *mapping)
 
 static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
 {
-       return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
+       return mapping->gfp_mask;
 }
 
 /* Restricts the given gfp_mask to what the mapping allows. */
@@ -94,8 +93,7 @@ static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
  */
 static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
 {
-       m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
-                               (__force unsigned long)mask;
+       m->gfp_mask = mask;
 }
 
 void release_pages(struct page **pages, int nr, bool cold);
index 52b97db938309429e7262f79af6eaef58fd672d2..af3581b8a4518119d484b367869035537607070b 100644 (file)
@@ -461,6 +461,14 @@ static inline struct radix_tree_node *entry_to_node(void *ptr)
  *
  * This function updates @iter->index in the case of a successful lookup.
  * For tagged lookup it also eats @iter->tags.
+ *
+ * There are several cases where 'slot' can be passed in as NULL to this
+ * function.  These cases result from the use of radix_tree_iter_next() or
+ * radix_tree_iter_retry().  In these cases we don't end up dereferencing
+ * 'slot' because either:
+ * a) we are doing tagged iteration and iter->tags has been set to 0, or
+ * b) we are doing non-tagged iteration, and iter->index and iter->next_index
+ *    have been set up so that radix_tree_chunk_size() returns 1 or 0.
  */
 static __always_inline void **
 radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags)
index 3d6e9815cd854d6d093e8082df312b63790b3d11..f7bb7a355cf71381100730a2379ba206bc3258ea 100644 (file)
@@ -34,7 +34,7 @@ extern const struct file_operations random_fops, urandom_fops;
 
 unsigned int get_random_int(void);
 unsigned long get_random_long(void);
-unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
+unsigned long randomize_page(unsigned long start, unsigned long range);
 
 u32 prandom_u32(void);
 void prandom_bytes(void *buf, size_t nbytes);
index ecbb34a382b898cabf40d04237fbfae5534c5bc0..68c1448e56bb3840da7161bebb1de12b8954eb39 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/timer.h>
 #include <linux/wait.h>
 #include <linux/list.h>
+#include <linux/irq_work.h>
 #include <linux/bug.h>
 #include <linux/fs.h>
 #include <linux/poll.h>
@@ -38,7 +39,7 @@ struct rchan_buf
        size_t subbufs_consumed;        /* count of sub-buffers consumed */
        struct rchan *chan;             /* associated channel */
        wait_queue_head_t read_wait;    /* reader wait queue */
-       struct timer_list timer;        /* reader wake-up timer */
+       struct irq_work wakeup_work;    /* reader wakeup */
        struct dentry *dentry;          /* channel file dentry */
        struct kref kref;               /* channel buffer refcount */
        struct page **page_array;       /* array of current buffer pages */
index 976ce3a19f1b23646c4494029929e538f5e0204b..d0efd6e6c20a6a6a39273639dbd33f3d77c2e156 100644 (file)
@@ -21,6 +21,7 @@ struct sem_array {
        struct list_head        list_id;        /* undo requests on this array */
        int                     sem_nsems;      /* no. of semaphores in array */
        int                     complex_count;  /* pending complex operations */
+       bool                    complex_mode;   /* no parallel simple ops */
 };
 
 #ifdef CONFIG_SYSVIPC
diff --git a/include/uapi/linux/auto_dev-ioctl.h b/include/uapi/linux/auto_dev-ioctl.h
new file mode 100644 (file)
index 0000000..021ed33
--- /dev/null
@@ -0,0 +1,221 @@
+/*
+ * Copyright 2008 Red Hat, Inc. All rights reserved.
+ * Copyright 2008 Ian Kent <raven@themaw.net>
+ *
+ * This file is part of the Linux kernel and is made available under
+ * the terms of the GNU General Public License, version 2, or at your
+ * option, any later version, incorporated herein by reference.
+ */
+
+#ifndef _UAPI_LINUX_AUTO_DEV_IOCTL_H
+#define _UAPI_LINUX_AUTO_DEV_IOCTL_H
+
+#include <linux/auto_fs.h>
+#include <linux/string.h>
+
+#define AUTOFS_DEVICE_NAME             "autofs"
+
+#define AUTOFS_DEV_IOCTL_VERSION_MAJOR 1
+#define AUTOFS_DEV_IOCTL_VERSION_MINOR 0
+
+#define AUTOFS_DEV_IOCTL_SIZE          sizeof(struct autofs_dev_ioctl)
+
+/*
+ * An ioctl interface for autofs mount point control.
+ */
+
+struct args_protover {
+       __u32   version;
+};
+
+struct args_protosubver {
+       __u32   sub_version;
+};
+
+struct args_openmount {
+       __u32   devid;
+};
+
+struct args_ready {
+       __u32   token;
+};
+
+struct args_fail {
+       __u32   token;
+       __s32   status;
+};
+
+struct args_setpipefd {
+       __s32   pipefd;
+};
+
+struct args_timeout {
+       __u64   timeout;
+};
+
+struct args_requester {
+       __u32   uid;
+       __u32   gid;
+};
+
+struct args_expire {
+       __u32   how;
+};
+
+struct args_askumount {
+       __u32   may_umount;
+};
+
+struct args_ismountpoint {
+       union {
+               struct args_in {
+                       __u32   type;
+               } in;
+               struct args_out {
+                       __u32   devid;
+                       __u32   magic;
+               } out;
+       };
+};
+
+/*
+ * All the ioctls use this structure.
+ * When sending a path size must account for the total length
+ * of the chunk of memory otherwise is is the size of the
+ * structure.
+ */
+
+struct autofs_dev_ioctl {
+       __u32 ver_major;
+       __u32 ver_minor;
+       __u32 size;             /* total size of data passed in
+                                * including this struct */
+       __s32 ioctlfd;          /* automount command fd */
+
+       /* Command parameters */
+
+       union {
+               struct args_protover            protover;
+               struct args_protosubver         protosubver;
+               struct args_openmount           openmount;
+               struct args_ready               ready;
+               struct args_fail                fail;
+               struct args_setpipefd           setpipefd;
+               struct args_timeout             timeout;
+               struct args_requester           requester;
+               struct args_expire              expire;
+               struct args_askumount           askumount;
+               struct args_ismountpoint        ismountpoint;
+       };
+
+       char path[0];
+};
+
+static inline void init_autofs_dev_ioctl(struct autofs_dev_ioctl *in)
+{
+       memset(in, 0, sizeof(struct autofs_dev_ioctl));
+       in->ver_major = AUTOFS_DEV_IOCTL_VERSION_MAJOR;
+       in->ver_minor = AUTOFS_DEV_IOCTL_VERSION_MINOR;
+       in->size = sizeof(struct autofs_dev_ioctl);
+       in->ioctlfd = -1;
+}
+
+/*
+ * If you change this make sure you make the corresponding change
+ * to autofs-dev-ioctl.c:lookup_ioctl()
+ */
+enum {
+       /* Get various version info */
+       AUTOFS_DEV_IOCTL_VERSION_CMD = 0x71,
+       AUTOFS_DEV_IOCTL_PROTOVER_CMD,
+       AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD,
+
+       /* Open mount ioctl fd */
+       AUTOFS_DEV_IOCTL_OPENMOUNT_CMD,
+
+       /* Close mount ioctl fd */
+       AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD,
+
+       /* Mount/expire status returns */
+       AUTOFS_DEV_IOCTL_READY_CMD,
+       AUTOFS_DEV_IOCTL_FAIL_CMD,
+
+       /* Activate/deactivate autofs mount */
+       AUTOFS_DEV_IOCTL_SETPIPEFD_CMD,
+       AUTOFS_DEV_IOCTL_CATATONIC_CMD,
+
+       /* Expiry timeout */
+       AUTOFS_DEV_IOCTL_TIMEOUT_CMD,
+
+       /* Get mount last requesting uid and gid */
+       AUTOFS_DEV_IOCTL_REQUESTER_CMD,
+
+       /* Check for eligible expire candidates */
+       AUTOFS_DEV_IOCTL_EXPIRE_CMD,
+
+       /* Request busy status */
+       AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD,
+
+       /* Check if path is a mountpoint */
+       AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD,
+};
+
+#define AUTOFS_IOCTL 0x93
+
+#define AUTOFS_DEV_IOCTL_VERSION \
+       _IOWR(AUTOFS_IOCTL, \
+             AUTOFS_DEV_IOCTL_VERSION_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_PROTOVER \
+       _IOWR(AUTOFS_IOCTL, \
+             AUTOFS_DEV_IOCTL_PROTOVER_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_PROTOSUBVER \
+       _IOWR(AUTOFS_IOCTL, \
+             AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_OPENMOUNT \
+       _IOWR(AUTOFS_IOCTL, \
+             AUTOFS_DEV_IOCTL_OPENMOUNT_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_CLOSEMOUNT \
+       _IOWR(AUTOFS_IOCTL, \
+             AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_READY \
+       _IOWR(AUTOFS_IOCTL, \
+             AUTOFS_DEV_IOCTL_READY_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_FAIL \
+       _IOWR(AUTOFS_IOCTL, \
+             AUTOFS_DEV_IOCTL_FAIL_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_SETPIPEFD \
+       _IOWR(AUTOFS_IOCTL, \
+             AUTOFS_DEV_IOCTL_SETPIPEFD_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_CATATONIC \
+       _IOWR(AUTOFS_IOCTL, \
+             AUTOFS_DEV_IOCTL_CATATONIC_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_TIMEOUT \
+       _IOWR(AUTOFS_IOCTL, \
+             AUTOFS_DEV_IOCTL_TIMEOUT_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_REQUESTER \
+       _IOWR(AUTOFS_IOCTL, \
+             AUTOFS_DEV_IOCTL_REQUESTER_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_EXPIRE \
+       _IOWR(AUTOFS_IOCTL, \
+             AUTOFS_DEV_IOCTL_EXPIRE_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_ASKUMOUNT \
+       _IOWR(AUTOFS_IOCTL, \
+             AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD, struct autofs_dev_ioctl)
+
+#define AUTOFS_DEV_IOCTL_ISMOUNTPOINT \
+       _IOWR(AUTOFS_IOCTL, \
+             AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD, struct autofs_dev_ioctl)
+
+#endif /* _UAPI_LINUX_AUTO_DEV_IOCTL_H */
index 9175a1b4dc69a41301f0576939eeb78e75e02887..1bfc3ed8b2841d7a5c099f1a2bd3310720720a6a 100644 (file)
@@ -12,6 +12,7 @@
 #define _UAPI_LINUX_AUTO_FS_H
 
 #include <linux/types.h>
+#include <linux/limits.h>
 #ifndef __KERNEL__
 #include <sys/ioctl.h>
 #endif /* __KERNEL__ */
index d7fc226396652c6e61dc53ec9743a18dccbdf6c2..34407f15e6d34da57be238f69441f1dad9e60764 100644 (file)
@@ -1288,6 +1288,7 @@ config SYSFS_DEPRECATED_V2
 
 config RELAY
        bool "Kernel->user space relay support (formerly relayfs)"
+       select IRQ_WORK
        help
          This option enables support for relay interface support in
          certain file systems (such as debugfs).
index c6521c205cb403a81cc2bf4e6969b2a5d54620c3..e12307d0c920ccd9e50168ed890c5b6057206a52 100644 (file)
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -51,19 +51,14 @@ struct msg_receiver {
        long                    r_msgtype;
        long                    r_maxsize;
 
-       /*
-        * Mark r_msg volatile so that the compiler
-        * does not try to get smart and optimize
-        * it. We rely on this for the lockless
-        * receive algorithm.
-        */
-       struct msg_msg          *volatile r_msg;
+       struct msg_msg          *r_msg;
 };
 
 /* one msg_sender for each sleeping sender */
 struct msg_sender {
        struct list_head        list;
        struct task_struct      *tsk;
+       size_t                  msgsz;
 };
 
 #define SEARCH_ANY             1
@@ -159,45 +154,72 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
        return msq->q_perm.id;
 }
 
-static inline void ss_add(struct msg_queue *msq, struct msg_sender *mss)
+static inline bool msg_fits_inqueue(struct msg_queue *msq, size_t msgsz)
+{
+       return msgsz + msq->q_cbytes <= msq->q_qbytes &&
+               1 + msq->q_qnum <= msq->q_qbytes;
+}
+
+static inline void ss_add(struct msg_queue *msq,
+                         struct msg_sender *mss, size_t msgsz)
 {
        mss->tsk = current;
+       mss->msgsz = msgsz;
        __set_current_state(TASK_INTERRUPTIBLE);
        list_add_tail(&mss->list, &msq->q_senders);
 }
 
 static inline void ss_del(struct msg_sender *mss)
 {
-       if (mss->list.next != NULL)
+       if (mss->list.next)
                list_del(&mss->list);
 }
 
-static void ss_wakeup(struct list_head *h, int kill)
+static void ss_wakeup(struct msg_queue *msq,
+                     struct wake_q_head *wake_q, bool kill)
 {
        struct msg_sender *mss, *t;
+       struct task_struct *stop_tsk = NULL;
+       struct list_head *h = &msq->q_senders;
 
        list_for_each_entry_safe(mss, t, h, list) {
                if (kill)
                        mss->list.next = NULL;
-               wake_up_process(mss->tsk);
+
+               /*
+                * Stop at the first task we don't wakeup,
+                * we've already iterated the original
+                * sender queue.
+                */
+               else if (stop_tsk == mss->tsk)
+                       break;
+               /*
+                * We are not in an EIDRM scenario here, therefore
+                * verify that we really need to wakeup the task.
+                * To maintain current semantics and wakeup order,
+                * move the sender to the tail on behalf of the
+                * blocked task.
+                */
+               else if (!msg_fits_inqueue(msq, mss->msgsz)) {
+                       if (!stop_tsk)
+                               stop_tsk = mss->tsk;
+
+                       list_move_tail(&mss->list, &msq->q_senders);
+                       continue;
+               }
+
+               wake_q_add(wake_q, mss->tsk);
        }
 }
 
-static void expunge_all(struct msg_queue *msq, int res)
+static void expunge_all(struct msg_queue *msq, int res,
+                       struct wake_q_head *wake_q)
 {
        struct msg_receiver *msr, *t;
 
        list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) {
-               msr->r_msg = NULL; /* initialize expunge ordering */
-               wake_up_process(msr->r_tsk);
-               /*
-                * Ensure that the wakeup is visible before setting r_msg as
-                * the receiving end depends on it: either spinning on a nil,
-                * or dealing with -EAGAIN cases. See lockless receive part 1
-                * and 2 in do_msgrcv().
-                */
-               smp_wmb(); /* barrier (B) */
-               msr->r_msg = ERR_PTR(res);
+               wake_q_add(wake_q, msr->r_tsk);
+               WRITE_ONCE(msr->r_msg, ERR_PTR(res));
        }
 }
 
@@ -213,11 +235,13 @@ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
 {
        struct msg_msg *msg, *t;
        struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
+       WAKE_Q(wake_q);
 
-       expunge_all(msq, -EIDRM);
-       ss_wakeup(&msq->q_senders, 1);
+       expunge_all(msq, -EIDRM, &wake_q);
+       ss_wakeup(msq, &wake_q, true);
        msg_rmid(ns, msq);
        ipc_unlock_object(&msq->q_perm);
+       wake_up_q(&wake_q);
        rcu_read_unlock();
 
        list_for_each_entry_safe(msg, t, &msq->q_messages, m_list) {
@@ -372,6 +396,9 @@ static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
                freeque(ns, ipcp);
                goto out_up;
        case IPC_SET:
+       {
+               WAKE_Q(wake_q);
+
                if (msqid64.msg_qbytes > ns->msg_ctlmnb &&
                    !capable(CAP_SYS_RESOURCE)) {
                        err = -EPERM;
@@ -386,15 +413,21 @@ static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
                msq->q_qbytes = msqid64.msg_qbytes;
 
                msq->q_ctime = get_seconds();
-               /* sleeping receivers might be excluded by
+               /*
+                * Sleeping receivers might be excluded by
                 * stricter permissions.
                 */
-               expunge_all(msq, -EAGAIN);
-               /* sleeping senders might be able to send
+               expunge_all(msq, -EAGAIN, &wake_q);
+               /*
+                * Sleeping senders might be able to send
                 * due to a larger queue size.
                 */
-               ss_wakeup(&msq->q_senders, 0);
-               break;
+               ss_wakeup(msq, &wake_q, false);
+               ipc_unlock_object(&msq->q_perm);
+               wake_up_q(&wake_q);
+
+               goto out_unlock1;
+       }
        default:
                err = -EINVAL;
                goto out_unlock1;
@@ -566,7 +599,8 @@ static int testmsg(struct msg_msg *msg, long type, int mode)
        return 0;
 }
 
-static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
+static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg,
+                                struct wake_q_head *wake_q)
 {
        struct msg_receiver *msr, *t;
 
@@ -577,27 +611,14 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
 
                        list_del(&msr->r_list);
                        if (msr->r_maxsize < msg->m_ts) {
-                               /* initialize pipelined send ordering */
-                               msr->r_msg = NULL;
-                               wake_up_process(msr->r_tsk);
-                               /* barrier (B) see barrier comment below */
-                               smp_wmb();
-                               msr->r_msg = ERR_PTR(-E2BIG);
+                               wake_q_add(wake_q, msr->r_tsk);
+                               WRITE_ONCE(msr->r_msg, ERR_PTR(-E2BIG));
                        } else {
-                               msr->r_msg = NULL;
                                msq->q_lrpid = task_pid_vnr(msr->r_tsk);
                                msq->q_rtime = get_seconds();
-                               wake_up_process(msr->r_tsk);
-                               /*
-                                * Ensure that the wakeup is visible before
-                                * setting r_msg, as the receiving can otherwise
-                                * exit - once r_msg is set, the receiver can
-                                * continue. See lockless receive part 1 and 2
-                                * in do_msgrcv(). Barrier (B).
-                                */
-                               smp_wmb();
-                               msr->r_msg = msg;
 
+                               wake_q_add(wake_q, msr->r_tsk);
+                               WRITE_ONCE(msr->r_msg, msg);
                                return 1;
                        }
                }
@@ -613,6 +634,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
        struct msg_msg *msg;
        int err;
        struct ipc_namespace *ns;
+       WAKE_Q(wake_q);
 
        ns = current->nsproxy->ipc_ns;
 
@@ -654,10 +676,8 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
                if (err)
                        goto out_unlock0;
 
-               if (msgsz + msq->q_cbytes <= msq->q_qbytes &&
-                               1 + msq->q_qnum <= msq->q_qbytes) {
+               if (msg_fits_inqueue(msq, msgsz))
                        break;
-               }
 
                /* queue full, wait: */
                if (msgflg & IPC_NOWAIT) {
@@ -666,7 +686,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
                }
 
                /* enqueue the sender and prepare to block */
-               ss_add(msq, &s);
+               ss_add(msq, &s, msgsz);
 
                if (!ipc_rcu_getref(msq)) {
                        err = -EIDRM;
@@ -686,7 +706,6 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
                        err = -EIDRM;
                        goto out_unlock0;
                }
-
                ss_del(&s);
 
                if (signal_pending(current)) {
@@ -695,10 +714,11 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
                }
 
        }
+
        msq->q_lspid = task_tgid_vnr(current);
        msq->q_stime = get_seconds();
 
-       if (!pipelined_send(msq, msg)) {
+       if (!pipelined_send(msq, msg, &wake_q)) {
                /* no one is waiting for this message, enqueue it */
                list_add_tail(&msg->m_list, &msq->q_messages);
                msq->q_cbytes += msgsz;
@@ -712,6 +732,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
 
 out_unlock0:
        ipc_unlock_object(&msq->q_perm);
+       wake_up_q(&wake_q);
 out_unlock1:
        rcu_read_unlock();
        if (msg != NULL)
@@ -829,6 +850,7 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl
        struct msg_queue *msq;
        struct ipc_namespace *ns;
        struct msg_msg *msg, *copy = NULL;
+       WAKE_Q(wake_q);
 
        ns = current->nsproxy->ipc_ns;
 
@@ -893,7 +915,7 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl
                        msq->q_cbytes -= msg->m_ts;
                        atomic_sub(msg->m_ts, &ns->msg_bytes);
                        atomic_dec(&ns->msg_hdrs);
-                       ss_wakeup(&msq->q_senders, 0);
+                       ss_wakeup(msq, &wake_q, false);
 
                        goto out_unlock0;
                }
@@ -919,71 +941,38 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl
                rcu_read_unlock();
                schedule();
 
-               /* Lockless receive, part 1:
-                * Disable preemption.  We don't hold a reference to the queue
-                * and getting a reference would defeat the idea of a lockless
-                * operation, thus the code relies on rcu to guarantee the
-                * existence of msq:
+               /*
+                * Lockless receive, part 1:
+                * We don't hold a reference to the queue and getting a
+                * reference would defeat the idea of a lockless operation,
+                * thus the code relies on rcu to guarantee the existence of
+                * msq:
                 * Prior to destruction, expunge_all(-EIRDM) changes r_msg.
                 * Thus if r_msg is -EAGAIN, then the queue not yet destroyed.
-                * rcu_read_lock() prevents preemption between reading r_msg
-                * and acquiring the q_perm.lock in ipc_lock_object().
                 */
                rcu_read_lock();
 
-               /* Lockless receive, part 2:
-                * Wait until pipelined_send or expunge_all are outside of
-                * wake_up_process(). There is a race with exit(), see
-                * ipc/mqueue.c for the details. The correct serialization
-                * ensures that a receiver cannot continue without the wakeup
-                * being visibible _before_ setting r_msg:
-                *
-                * CPU 0                             CPU 1
-                * <loop receiver>
-                *   smp_rmb(); (A) <-- pair -.      <waker thread>
-                *   <load ->r_msg>           |        msr->r_msg = NULL;
-                *                            |        wake_up_process();
-                * <continue>                 `------> smp_wmb(); (B)
-                *                                     msr->r_msg = msg;
+               /*
+                * Lockless receive, part 2:
+                * The work in pipelined_send() and expunge_all():
+                * - Set pointer to message
+                * - Queue the receiver task for later wakeup
+                * - Wake up the process after the lock is dropped.
                 *
-                * Where (A) orders the message value read and where (B) orders
-                * the write to the r_msg -- done in both pipelined_send and
-                * expunge_all.
-                */
-               for (;;) {
-                       /*
-                        * Pairs with writer barrier in pipelined_send
-                        * or expunge_all.
-                        */
-                       smp_rmb(); /* barrier (A) */
-                       msg = (struct msg_msg *)msr_d.r_msg;
-                       if (msg)
-                               break;
-
-                       /*
-                        * The cpu_relax() call is a compiler barrier
-                        * which forces everything in this loop to be
-                        * re-loaded.
-                        */
-                       cpu_relax();
-               }
-
-               /* Lockless receive, part 3:
-                * If there is a message or an error then accept it without
-                * locking.
+                * Should the process wake up before this wakeup (due to a
+                * signal) it will either see the message and continue ...
                 */
+               msg = READ_ONCE(msr_d.r_msg);
                if (msg != ERR_PTR(-EAGAIN))
                        goto out_unlock1;
 
-               /* Lockless receive, part 3:
-                * Acquire the queue spinlock.
-                */
+                /*
+                 * ... or see -EAGAIN, acquire the lock to check the message
+                 * again.
+                 */
                ipc_lock_object(&msq->q_perm);
 
-               /* Lockless receive, part 4:
-                * Repeat test after acquiring the spinlock.
-                */
-               msg = (struct msg_msg *)msr_d.r_msg;
+               msg = msr_d.r_msg;
                if (msg != ERR_PTR(-EAGAIN))
                        goto out_unlock0;
 
@@ -998,6 +987,7 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl
 
 out_unlock0:
        ipc_unlock_object(&msq->q_perm);
+       wake_up_q(&wake_q);
 out_unlock1:
        rcu_read_unlock();
        if (IS_ERR(msg)) {
index 7c9d4f7683c073de736a0723a0c0d7a1e3178ad2..10b94bc59d4a5ffaa85a89152ac02a1f806db054 100644 (file)
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -162,14 +162,21 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
 
 /*
  * Locking:
+ * a) global sem_lock() for read/write
  *     sem_undo.id_next,
  *     sem_array.complex_count,
- *     sem_array.pending{_alter,_cont},
- *     sem_array.sem_undo: global sem_lock() for read/write
- *     sem_undo.proc_next: only "current" is allowed to read/write that field.
+ *     sem_array.complex_mode
+ *     sem_array.pending{_alter,_const},
+ *     sem_array.sem_undo
  *
+ * b) global or semaphore sem_lock() for read/write:
  *     sem_array.sem_base[i].pending_{const,alter}:
- *             global or semaphore sem_lock() for read/write
+ *     sem_array.complex_mode (for read)
+ *
+ * c) special:
+ *     sem_undo_list.list_proc:
+ *     * undo_list->lock for write
+ *     * rcu for read
  */
 
 #define sc_semmsl      sem_ctls[0]
@@ -260,30 +267,61 @@ static void sem_rcu_free(struct rcu_head *head)
 }
 
 /*
- * Wait until all currently ongoing simple ops have completed.
+ * Enter the mode suitable for non-simple operations:
  * Caller must own sem_perm.lock.
- * New simple ops cannot start, because simple ops first check
- * that sem_perm.lock is free.
- * that a) sem_perm.lock is free and b) complex_count is 0.
  */
-static void sem_wait_array(struct sem_array *sma)
+static void complexmode_enter(struct sem_array *sma)
 {
        int i;
        struct sem *sem;
 
-       if (sma->complex_count)  {
-               /* The thread that increased sma->complex_count waited on
-                * all sem->lock locks. Thus we don't need to wait again.
-                */
+       if (sma->complex_mode)  {
+               /* We are already in complex_mode. Nothing to do */
                return;
        }
 
+       /* We need a full barrier after seting complex_mode:
+        * The write to complex_mode must be visible
+        * before we read the first sem->lock spinlock state.
+        */
+       smp_store_mb(sma->complex_mode, true);
+
        for (i = 0; i < sma->sem_nsems; i++) {
                sem = sma->sem_base + i;
                spin_unlock_wait(&sem->lock);
        }
+       /*
+        * spin_unlock_wait() is not a memory barriers, it is only a
+        * control barrier. The code must pair with spin_unlock(&sem->lock),
+        * thus just the control barrier is insufficient.
+        *
+        * smp_rmb() is sufficient, as writes cannot pass the control barrier.
+        */
+       smp_rmb();
 }
 
+/*
+ * Try to leave the mode that disallows simple operations:
+ * Caller must own sem_perm.lock.
+ */
+static void complexmode_tryleave(struct sem_array *sma)
+{
+       if (sma->complex_count)  {
+               /* Complex ops are sleeping.
+                * We must stay in complex mode
+                */
+               return;
+       }
+       /*
+        * Immediately after setting complex_mode to false,
+        * a simple op can start. Thus: all memory writes
+        * performed by the current operation must be visible
+        * before we set complex_mode to false.
+        */
+       smp_store_release(&sma->complex_mode, false);
+}
+
+#define SEM_GLOBAL_LOCK        (-1)
 /*
  * If the request contains only one semaphore operation, and there are
  * no complex transactions pending, lock only the semaphore involved.
@@ -300,56 +338,42 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
                /* Complex operation - acquire a full lock */
                ipc_lock_object(&sma->sem_perm);
 
-               /* And wait until all simple ops that are processed
-                * right now have dropped their locks.
-                */
-               sem_wait_array(sma);
-               return -1;
+               /* Prevent parallel simple ops */
+               complexmode_enter(sma);
+               return SEM_GLOBAL_LOCK;
        }
 
        /*
         * Only one semaphore affected - try to optimize locking.
-        * The rules are:
-        * - optimized locking is possible if no complex operation
-        *   is either enqueued or processed right now.
-        * - The test for enqueued complex ops is simple:
-        *      sma->complex_count != 0
-        * - Testing for complex ops that are processed right now is
-        *   a bit more difficult. Complex ops acquire the full lock
-        *   and first wait that the running simple ops have completed.
-        *   (see above)
-        *   Thus: If we own a simple lock and the global lock is free
-        *      and complex_count is now 0, then it will stay 0 and
-        *      thus just locking sem->lock is sufficient.
+        * Optimized locking is possible if no complex operation
+        * is either enqueued or processed right now.
+        *
+        * Both facts are tracked by complex_mode.
         */
        sem = sma->sem_base + sops->sem_num;
 
-       if (sma->complex_count == 0) {
+       /*
+        * Initial check for complex_mode. Just an optimization,
+        * no locking, no memory barrier.
+        */
+       if (!sma->complex_mode) {
                /*
                 * It appears that no complex operation is around.
                 * Acquire the per-semaphore lock.
                 */
                spin_lock(&sem->lock);
 
-               /* Then check that the global lock is free */
-               if (!spin_is_locked(&sma->sem_perm.lock)) {
-                       /*
-                        * We need a memory barrier with acquire semantics,
-                        * otherwise we can race with another thread that does:
-                        *      complex_count++;
-                        *      spin_unlock(sem_perm.lock);
-                        */
-                       smp_acquire__after_ctrl_dep();
+               /*
+                * See 51d7d5205d33
+                * ("powerpc: Add smp_mb() to arch_spin_is_locked()"):
+                * A full barrier is required: the write of sem->lock
+                * must be visible before the read is executed
+                */
+               smp_mb();
 
-                       /*
-                        * Now repeat the test of complex_count:
-                        * It can't change anymore until we drop sem->lock.
-                        * Thus: if is now 0, then it will stay 0.
-                        */
-                       if (sma->complex_count == 0) {
-                               /* fast path successful! */
-                               return sops->sem_num;
-                       }
+               if (!smp_load_acquire(&sma->complex_mode)) {
+                       /* fast path successful! */
+                       return sops->sem_num;
                }
                spin_unlock(&sem->lock);
        }
@@ -369,15 +393,16 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
                /* Not a false alarm, thus complete the sequence for a
                 * full lock.
                 */
-               sem_wait_array(sma);
-               return -1;
+               complexmode_enter(sma);
+               return SEM_GLOBAL_LOCK;
        }
 }
 
 static inline void sem_unlock(struct sem_array *sma, int locknum)
 {
-       if (locknum == -1) {
+       if (locknum == SEM_GLOBAL_LOCK) {
                unmerge_queues(sma);
+               complexmode_tryleave(sma);
                ipc_unlock_object(&sma->sem_perm);
        } else {
                struct sem *sem = sma->sem_base + locknum;
@@ -529,6 +554,7 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
        }
 
        sma->complex_count = 0;
+       sma->complex_mode = true; /* dropped by sem_unlock below */
        INIT_LIST_HEAD(&sma->pending_alter);
        INIT_LIST_HEAD(&sma->pending_const);
        INIT_LIST_HEAD(&sma->list_id);
@@ -2079,6 +2105,8 @@ void exit_sem(struct task_struct *tsk)
                struct list_head tasks;
                int semid, i;
 
+               cond_resched();
+
                rcu_read_lock();
                un = list_entry_rcu(ulp->list_proc.next,
                                    struct sem_undo, list_proc);
@@ -2184,10 +2212,10 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
        /*
         * The proc interface isn't aware of sem_lock(), it calls
         * ipc_lock_object() directly (in sysvipc_find_ipc).
-        * In order to stay compatible with sem_lock(), we must wait until
-        * all simple semop() calls have left their critical regions.
+        * In order to stay compatible with sem_lock(), we must
+        * enter / leave complex_mode.
         */
-       sem_wait_array(sma);
+       complexmode_enter(sma);
 
        sem_otime = get_semotime(sma);
 
@@ -2204,6 +2232,8 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
                   sem_otime,
                   sma->sem_ctime);
 
+       complexmode_tryleave(sma);
+
        return 0;
 }
 #endif
index 9f748ed7bea8eb83ca37b38289cdb418c6841672..1a8f34f6360112bab3cc122b8027ec479768a7af 100644 (file)
@@ -11,7 +11,6 @@ CONFIG_ANDROID_LOW_MEMORY_KILLER=y
 CONFIG_ARMV8_DEPRECATED=y
 CONFIG_ASHMEM=y
 CONFIG_AUDIT=y
-CONFIG_BLK_DEV_DM=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_CGROUPS=y
 CONFIG_CGROUP_CPUACCT=y
@@ -19,9 +18,7 @@ CONFIG_CGROUP_DEBUG=y
 CONFIG_CGROUP_FREEZER=y
 CONFIG_CGROUP_SCHED=y
 CONFIG_CP15_BARRIER_EMULATION=y
-CONFIG_DM_CRYPT=y
-CONFIG_DM_VERITY=y
-CONFIG_DM_VERITY_FEC=y
+CONFIG_DEFAULT_SECURITY_SELINUX=y
 CONFIG_EMBEDDED=y
 CONFIG_FB=y
 CONFIG_HIGH_RES_TIMERS=y
@@ -41,7 +38,6 @@ CONFIG_IPV6=y
 CONFIG_IPV6_MIP6=y
 CONFIG_IPV6_MULTIPLE_TABLES=y
 CONFIG_IPV6_OPTIMISTIC_DAD=y
-CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_IPV6_ROUTE_INFO=y
 CONFIG_IP_ADVANCED_ROUTER=y
@@ -135,6 +131,7 @@ CONFIG_PREEMPT=y
 CONFIG_QUOTA=y
 CONFIG_RTC_CLASS=y
 CONFIG_RT_GROUP_SCHED=y
+CONFIG_SECCOMP=y
 CONFIG_SECURITY=y
 CONFIG_SECURITY_NETWORK=y
 CONFIG_SECURITY_SELINUX=y
index e3b953e966d26a1cdc47778158b0ab76a0f52ecf..297756be369c68d9e56c3c5823de5530118671f6 100644 (file)
@@ -6,12 +6,16 @@
 # CONFIG_PM_WAKELOCKS_GC is not set
 # CONFIG_VT is not set
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_BLK_DEV_DM=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=8192
 CONFIG_COMPACTION=y
 CONFIG_DEBUG_RODATA=y
+CONFIG_DM_CRYPT=y
 CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
 CONFIG_DRAGONRISE_FF=y
 CONFIG_ENABLE_DEFAULT_TRACERS=y
 CONFIG_EXT4_FS=y
index 432c3d71d19536b0f468ec84ecc15d2300752103..2b59c82cc3e1bb0813088cf5e22307d81230e13e 100644 (file)
@@ -98,26 +98,26 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
 
        trace_sched_process_hang(t);
 
-       if (!sysctl_hung_task_warnings)
+       if (!sysctl_hung_task_warnings && !sysctl_hung_task_panic)
                return;
 
-       if (sysctl_hung_task_warnings > 0)
-               sysctl_hung_task_warnings--;
-
        /*
         * Ok, the task did not get scheduled for more than 2 minutes,
         * complain:
         */
-       pr_err("INFO: task %s:%d blocked for more than %ld seconds.\n",
-               t->comm, t->pid, timeout);
-       pr_err("      %s %s %.*s\n",
-               print_tainted(), init_utsname()->release,
-               (int)strcspn(init_utsname()->version, " "),
-               init_utsname()->version);
-       pr_err("\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
-               " disables this message.\n");
-       sched_show_task(t);
-       debug_show_all_locks();
+       if (sysctl_hung_task_warnings) {
+               sysctl_hung_task_warnings--;
+               pr_err("INFO: task %s:%d blocked for more than %ld seconds.\n",
+                       t->comm, t->pid, timeout);
+               pr_err("      %s %s %.*s\n",
+                       print_tainted(), init_utsname()->release,
+                       (int)strcspn(init_utsname()->version, " "),
+                       init_utsname()->version);
+               pr_err("\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
+                       " disables this message.\n");
+               sched_show_task(t);
+               debug_show_all_locks();
+       }
 
        touch_nmi_watchdog();
 
index d10ab6b9b5e082e47570ab76b8e5aefa7e58ec5a..d63095472ea98dfbe9932246032597f630886a0f 100644 (file)
@@ -49,7 +49,7 @@
 #include <linux/cpu.h>
 #include <linux/jump_label.h>
 
-#include <asm-generic/sections.h>
+#include <asm/sections.h>
 #include <asm/cacheflush.h>
 #include <asm/errno.h>
 #include <asm/uaccess.h>
index 4ab4c3766a80ddf917cb988ac9c55d48957edbc4..be2cc1f9dd571b8407044de18710c8695e8af220 100644 (file)
@@ -138,7 +138,7 @@ void *kthread_data(struct task_struct *task)
 }
 
 /**
- * probe_kthread_data - speculative version of kthread_data()
+ * kthread_probe_data - speculative version of kthread_data()
  * @task: possible kthread task in question
  *
  * @task could be a kthread task.  Return the data value specified when it
@@ -146,7 +146,7 @@ void *kthread_data(struct task_struct *task)
  * inaccessible for any reason, %NULL is returned.  This function requires
  * that @task itself is safe to dereference.
  */
-void *probe_kthread_data(struct task_struct *task)
+void *kthread_probe_data(struct task_struct *task)
 {
        struct kthread *kthread = to_kthread(task);
        void *data = NULL;
@@ -244,33 +244,10 @@ static void create_kthread(struct kthread_create_info *create)
        }
 }
 
-/**
- * kthread_create_on_node - create a kthread.
- * @threadfn: the function to run until signal_pending(current).
- * @data: data ptr for @threadfn.
- * @node: task and thread structures for the thread are allocated on this node
- * @namefmt: printf-style name for the thread.
- *
- * Description: This helper function creates and names a kernel
- * thread.  The thread will be stopped: use wake_up_process() to start
- * it.  See also kthread_run().  The new thread has SCHED_NORMAL policy and
- * is affine to all CPUs.
- *
- * If thread is going to be bound on a particular cpu, give its node
- * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
- * When woken, the thread will run @threadfn() with @data as its
- * argument. @threadfn() can either call do_exit() directly if it is a
- * standalone thread for which no one will call kthread_stop(), or
- * return when 'kthread_should_stop()' is true (which means
- * kthread_stop() has been called).  The return value should be zero
- * or a negative error number; it will be passed to kthread_stop().
- *
- * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
- */
-struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
-                                          void *data, int node,
-                                          const char namefmt[],
-                                          ...)
+static struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
+                                                   void *data, int node,
+                                                   const char namefmt[],
+                                                   va_list args)
 {
        DECLARE_COMPLETION_ONSTACK(done);
        struct task_struct *task;
@@ -311,11 +288,8 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
        task = create->result;
        if (!IS_ERR(task)) {
                static const struct sched_param param = { .sched_priority = 0 };
-               va_list args;
 
-               va_start(args, namefmt);
                vsnprintf(task->comm, sizeof(task->comm), namefmt, args);
-               va_end(args);
                /*
                 * root may have changed our (kthreadd's) priority or CPU mask.
                 * The kernel thread should not inherit these properties.
@@ -326,6 +300,44 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
        kfree(create);
        return task;
 }
+
+/**
+ * kthread_create_on_node - create a kthread.
+ * @threadfn: the function to run until signal_pending(current).
+ * @data: data ptr for @threadfn.
+ * @node: task and thread structures for the thread are allocated on this node
+ * @namefmt: printf-style name for the thread.
+ *
+ * Description: This helper function creates and names a kernel
+ * thread.  The thread will be stopped: use wake_up_process() to start
+ * it.  See also kthread_run().  The new thread has SCHED_NORMAL policy and
+ * is affine to all CPUs.
+ *
+ * If thread is going to be bound on a particular cpu, give its node
+ * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
+ * When woken, the thread will run @threadfn() with @data as its
+ * argument. @threadfn() can either call do_exit() directly if it is a
+ * standalone thread for which no one will call kthread_stop(), or
+ * return when 'kthread_should_stop()' is true (which means
+ * kthread_stop() has been called).  The return value should be zero
+ * or a negative error number; it will be passed to kthread_stop().
+ *
+ * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
+ */
+struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
+                                          void *data, int node,
+                                          const char namefmt[],
+                                          ...)
+{
+       struct task_struct *task;
+       va_list args;
+
+       va_start(args, namefmt);
+       task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
+       va_end(args);
+
+       return task;
+}
 EXPORT_SYMBOL(kthread_create_on_node);
 
 static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, long state)
@@ -390,10 +402,10 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
                                   cpu);
        if (IS_ERR(p))
                return p;
+       kthread_bind(p, cpu);
+       /* CPU hotplug need to bind once again when unparking the thread. */
        set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
        to_kthread(p)->cpu = cpu;
-       /* Park the thread to get it out of TASK_UNINTERRUPTIBLE state */
-       kthread_park(p);
        return p;
 }
 
@@ -407,6 +419,10 @@ static void __kthread_unpark(struct task_struct *k, struct kthread *kthread)
         * which might be about to be cleared.
         */
        if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
+               /*
+                * Newly created kthread was parked when the CPU was offline.
+                * The binding was lost and we need to set it again.
+                */
                if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
                        __kthread_bind(k, kthread->cpu, TASK_PARKED);
                wake_up_state(k, TASK_PARKED);
@@ -540,39 +556,48 @@ int kthreadd(void *unused)
        return 0;
 }
 
-void __init_kthread_worker(struct kthread_worker *worker,
+void __kthread_init_worker(struct kthread_worker *worker,
                                const char *name,
                                struct lock_class_key *key)
 {
+       memset(worker, 0, sizeof(struct kthread_worker));
        spin_lock_init(&worker->lock);
        lockdep_set_class_and_name(&worker->lock, key, name);
        INIT_LIST_HEAD(&worker->work_list);
-       worker->task = NULL;
+       INIT_LIST_HEAD(&worker->delayed_work_list);
 }
-EXPORT_SYMBOL_GPL(__init_kthread_worker);
+EXPORT_SYMBOL_GPL(__kthread_init_worker);
 
 /**
  * kthread_worker_fn - kthread function to process kthread_worker
  * @worker_ptr: pointer to initialized kthread_worker
  *
- * This function can be used as @threadfn to kthread_create() or
- * kthread_run() with @worker_ptr argument pointing to an initialized
- * kthread_worker.  The started kthread will process work_list until
- * the it is stopped with kthread_stop().  A kthread can also call
- * this function directly after extra initialization.
+ * This function implements the main cycle of kthread worker. It processes
+ * work_list until it is stopped with kthread_stop(). It sleeps when the queue
+ * is empty.
+ *
+ * The works are not allowed to keep any locks, disable preemption or interrupts
+ * when they finish. There is defined a safe point for freezing when one work
+ * finishes and before a new one is started.
  *
- * Different kthreads can be used for the same kthread_worker as long
- * as there's only one kthread attached to it at any given time.  A
- * kthread_worker without an attached kthread simply collects queued
- * kthread_works.
+ * Also the works must not be handled by more than one worker at the same time,
+ * see also kthread_queue_work().
  */
 int kthread_worker_fn(void *worker_ptr)
 {
        struct kthread_worker *worker = worker_ptr;
        struct kthread_work *work;
 
-       WARN_ON(worker->task);
+       /*
+        * FIXME: Update the check and remove the assignment when all kthread
+        * worker users are created using kthread_create_worker*() functions.
+        */
+       WARN_ON(worker->task && worker->task != current);
        worker->task = current;
+
+       if (worker->flags & KTW_FREEZABLE)
+               set_freezable();
+
 repeat:
        set_current_state(TASK_INTERRUPTIBLE);  /* mb paired w/ kthread_stop */
 
@@ -605,12 +630,131 @@ repeat:
 }
 EXPORT_SYMBOL_GPL(kthread_worker_fn);
 
-/* insert @work before @pos in @worker */
-static void insert_kthread_work(struct kthread_worker *worker,
-                              struct kthread_work *work,
-                              struct list_head *pos)
+static struct kthread_worker *
+__kthread_create_worker(int cpu, unsigned int flags,
+                       const char namefmt[], va_list args)
+{
+       struct kthread_worker *worker;
+       struct task_struct *task;
+
+       worker = kzalloc(sizeof(*worker), GFP_KERNEL);
+       if (!worker)
+               return ERR_PTR(-ENOMEM);
+
+       kthread_init_worker(worker);
+
+       if (cpu >= 0) {
+               char name[TASK_COMM_LEN];
+
+               /*
+                * kthread_create_worker_on_cpu() allows to pass a generic
+                * namefmt in compare with kthread_create_on_cpu. We need
+                * to format it here.
+                */
+               vsnprintf(name, sizeof(name), namefmt, args);
+               task = kthread_create_on_cpu(kthread_worker_fn, worker,
+                                            cpu, name);
+       } else {
+               task = __kthread_create_on_node(kthread_worker_fn, worker,
+                                               -1, namefmt, args);
+       }
+
+       if (IS_ERR(task))
+               goto fail_task;
+
+       worker->flags = flags;
+       worker->task = task;
+       wake_up_process(task);
+       return worker;
+
+fail_task:
+       kfree(worker);
+       return ERR_CAST(task);
+}
+
+/**
+ * kthread_create_worker - create a kthread worker
+ * @flags: flags modifying the default behavior of the worker
+ * @namefmt: printf-style name for the kthread worker (task).
+ *
+ * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
+ * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
+ * when the worker was SIGKILLed.
+ */
+struct kthread_worker *
+kthread_create_worker(unsigned int flags, const char namefmt[], ...)
+{
+       struct kthread_worker *worker;
+       va_list args;
+
+       va_start(args, namefmt);
+       worker = __kthread_create_worker(-1, flags, namefmt, args);
+       va_end(args);
+
+       return worker;
+}
+EXPORT_SYMBOL(kthread_create_worker);
+
+/**
+ * kthread_create_worker_on_cpu - create a kthread worker and bind it
+ *     it to a given CPU and the associated NUMA node.
+ * @cpu: CPU number
+ * @flags: flags modifying the default behavior of the worker
+ * @namefmt: printf-style name for the kthread worker (task).
+ *
+ * Use a valid CPU number if you want to bind the kthread worker
+ * to the given CPU and the associated NUMA node.
+ *
+ * A good practice is to add the cpu number also into the worker name.
+ * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
+ *
+ * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
+ * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
+ * when the worker was SIGKILLed.
+ */
+struct kthread_worker *
+kthread_create_worker_on_cpu(int cpu, unsigned int flags,
+                            const char namefmt[], ...)
+{
+       struct kthread_worker *worker;
+       va_list args;
+
+       va_start(args, namefmt);
+       worker = __kthread_create_worker(cpu, flags, namefmt, args);
+       va_end(args);
+
+       return worker;
+}
+EXPORT_SYMBOL(kthread_create_worker_on_cpu);
+
+/*
+ * Returns true when the work could not be queued at the moment.
+ * It happens when it is already pending in a worker list
+ * or when it is being cancelled.
+ */
+static inline bool queuing_blocked(struct kthread_worker *worker,
+                                  struct kthread_work *work)
+{
+       lockdep_assert_held(&worker->lock);
+
+       return !list_empty(&work->node) || work->canceling;
+}
+
+static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
+                                            struct kthread_work *work)
 {
        lockdep_assert_held(&worker->lock);
+       WARN_ON_ONCE(!list_empty(&work->node));
+       /* Do not use a work with >1 worker, see kthread_queue_work() */
+       WARN_ON_ONCE(work->worker && work->worker != worker);
+}
+
+/* insert @work before @pos in @worker */
+static void kthread_insert_work(struct kthread_worker *worker,
+                               struct kthread_work *work,
+                               struct list_head *pos)
+{
+       kthread_insert_work_sanity_check(worker, work);
 
        list_add_tail(&work->node, pos);
        work->worker = worker;
@@ -619,29 +763,133 @@ static void insert_kthread_work(struct kthread_worker *worker,
 }
 
 /**
- * queue_kthread_work - queue a kthread_work
+ * kthread_queue_work - queue a kthread_work
  * @worker: target kthread_worker
  * @work: kthread_work to queue
  *
  * Queue @work to work processor @task for async execution.  @task
  * must have been created with kthread_worker_create().  Returns %true
  * if @work was successfully queued, %false if it was already pending.
+ *
+ * Reinitialize the work if it needs to be used by another worker.
+ * For example, when the worker was stopped and started again.
  */
-bool queue_kthread_work(struct kthread_worker *worker,
+bool kthread_queue_work(struct kthread_worker *worker,
                        struct kthread_work *work)
 {
        bool ret = false;
        unsigned long flags;
 
        spin_lock_irqsave(&worker->lock, flags);
-       if (list_empty(&work->node)) {
-               insert_kthread_work(worker, work, &worker->work_list);
+       if (!queuing_blocked(worker, work)) {
+               kthread_insert_work(worker, work, &worker->work_list);
+               ret = true;
+       }
+       spin_unlock_irqrestore(&worker->lock, flags);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(kthread_queue_work);
+
+/**
+ * kthread_delayed_work_timer_fn - callback that queues the associated kthread
+ *     delayed work when the timer expires.
+ * @__data: pointer to the data associated with the timer
+ *
+ * The format of the function is defined by struct timer_list.
+ * It should have been called from irqsafe timer with irq already off.
+ */
+void kthread_delayed_work_timer_fn(unsigned long __data)
+{
+       struct kthread_delayed_work *dwork =
+               (struct kthread_delayed_work *)__data;
+       struct kthread_work *work = &dwork->work;
+       struct kthread_worker *worker = work->worker;
+
+       /*
+        * This might happen when a pending work is reinitialized.
+        * It means that it is used a wrong way.
+        */
+       if (WARN_ON_ONCE(!worker))
+               return;
+
+       spin_lock(&worker->lock);
+       /* Work must not be used with >1 worker, see kthread_queue_work(). */
+       WARN_ON_ONCE(work->worker != worker);
+
+       /* Move the work from worker->delayed_work_list. */
+       WARN_ON_ONCE(list_empty(&work->node));
+       list_del_init(&work->node);
+       kthread_insert_work(worker, work, &worker->work_list);
+
+       spin_unlock(&worker->lock);
+}
+EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
+
+void __kthread_queue_delayed_work(struct kthread_worker *worker,
+                                 struct kthread_delayed_work *dwork,
+                                 unsigned long delay)
+{
+       struct timer_list *timer = &dwork->timer;
+       struct kthread_work *work = &dwork->work;
+
+       WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn ||
+                    timer->data != (unsigned long)dwork);
+
+       /*
+        * If @delay is 0, queue @dwork->work immediately.  This is for
+        * both optimization and correctness.  The earliest @timer can
+        * expire is on the closest next tick and delayed_work users depend
+        * on that there's no such delay when @delay is 0.
+        */
+       if (!delay) {
+               kthread_insert_work(worker, work, &worker->work_list);
+               return;
+       }
+
+       /* Be paranoid and try to detect possible races already now. */
+       kthread_insert_work_sanity_check(worker, work);
+
+       list_add(&work->node, &worker->delayed_work_list);
+       work->worker = worker;
+       timer_stats_timer_set_start_info(&dwork->timer);
+       timer->expires = jiffies + delay;
+       add_timer(timer);
+}
+
+/**
+ * kthread_queue_delayed_work - queue the associated kthread work
+ *     after a delay.
+ * @worker: target kthread_worker
+ * @dwork: kthread_delayed_work to queue
+ * @delay: number of jiffies to wait before queuing
+ *
+ * If the work has not been pending it starts a timer that will queue
+ * the work after the given @delay. If @delay is zero, it queues the
+ * work immediately.
+ *
+ * Return: %false if the @work has already been pending. It means that
+ * either the timer was running or the work was queued. It returns %true
+ * otherwise.
+ */
+bool kthread_queue_delayed_work(struct kthread_worker *worker,
+                               struct kthread_delayed_work *dwork,
+                               unsigned long delay)
+{
+       struct kthread_work *work = &dwork->work;
+       unsigned long flags;
+       bool ret = false;
+
+       spin_lock_irqsave(&worker->lock, flags);
+
+       if (!queuing_blocked(worker, work)) {
+               __kthread_queue_delayed_work(worker, dwork, delay);
                ret = true;
        }
+
        spin_unlock_irqrestore(&worker->lock, flags);
        return ret;
 }
-EXPORT_SYMBOL_GPL(queue_kthread_work);
+EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
 
 struct kthread_flush_work {
        struct kthread_work     work;
@@ -656,12 +904,12 @@ static void kthread_flush_work_fn(struct kthread_work *work)
 }
 
 /**
- * flush_kthread_work - flush a kthread_work
+ * kthread_flush_work - flush a kthread_work
  * @work: work to flush
  *
  * If @work is queued or executing, wait for it to finish execution.
  */
-void flush_kthread_work(struct kthread_work *work)
+void kthread_flush_work(struct kthread_work *work)
 {
        struct kthread_flush_work fwork = {
                KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
@@ -670,21 +918,19 @@ void flush_kthread_work(struct kthread_work *work)
        struct kthread_worker *worker;
        bool noop = false;
 
-retry:
        worker = work->worker;
        if (!worker)
                return;
 
        spin_lock_irq(&worker->lock);
-       if (work->worker != worker) {
-               spin_unlock_irq(&worker->lock);
-               goto retry;
-       }
+       /* Work must not be used with >1 worker, see kthread_queue_work(). */
+       WARN_ON_ONCE(work->worker != worker);
 
        if (!list_empty(&work->node))
-               insert_kthread_work(worker, &fwork.work, work->node.next);
+               kthread_insert_work(worker, &fwork.work, work->node.next);
        else if (worker->current_work == work)
-               insert_kthread_work(worker, &fwork.work, worker->work_list.next);
+               kthread_insert_work(worker, &fwork.work,
+                                   worker->work_list.next);
        else
                noop = true;
 
@@ -693,23 +939,214 @@ retry:
        if (!noop)
                wait_for_completion(&fwork.done);
 }
-EXPORT_SYMBOL_GPL(flush_kthread_work);
+EXPORT_SYMBOL_GPL(kthread_flush_work);
+
+/*
+ * This function removes the work from the worker queue. Also it makes sure
+ * that it won't get queued later via the delayed work's timer.
+ *
+ * The work might still be in use when this function finishes. See the
+ * current_work proceed by the worker.
+ *
+ * Return: %true if @work was pending and successfully canceled,
+ *     %false if @work was not pending
+ */
+static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
+                                 unsigned long *flags)
+{
+       /* Try to cancel the timer if exists. */
+       if (is_dwork) {
+               struct kthread_delayed_work *dwork =
+                       container_of(work, struct kthread_delayed_work, work);
+               struct kthread_worker *worker = work->worker;
+
+               /*
+                * del_timer_sync() must be called to make sure that the timer
+                * callback is not running. The lock must be temporary released
+                * to avoid a deadlock with the callback. In the meantime,
+                * any queuing is blocked by setting the canceling counter.
+                */
+               work->canceling++;
+               spin_unlock_irqrestore(&worker->lock, *flags);
+               del_timer_sync(&dwork->timer);
+               spin_lock_irqsave(&worker->lock, *flags);
+               work->canceling--;
+       }
+
+       /*
+        * Try to remove the work from a worker list. It might either
+        * be from worker->work_list or from worker->delayed_work_list.
+        */
+       if (!list_empty(&work->node)) {
+               list_del_init(&work->node);
+               return true;
+       }
+
+       return false;
+}
+
+/**
+ * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
+ * @worker: kthread worker to use
+ * @dwork: kthread delayed work to queue
+ * @delay: number of jiffies to wait before queuing
+ *
+ * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
+ * modify @dwork's timer so that it expires after @delay. If @delay is zero,
+ * @work is guaranteed to be queued immediately.
+ *
+ * Return: %true if @dwork was pending and its timer was modified,
+ * %false otherwise.
+ *
+ * A special case is when the work is being canceled in parallel.
+ * It might be caused either by the real kthread_cancel_delayed_work_sync()
+ * or yet another kthread_mod_delayed_work() call. We let the other command
+ * win and return %false here. The caller is supposed to synchronize these
+ * operations a reasonable way.
+ *
+ * This function is safe to call from any context including IRQ handler.
+ * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
+ * for details.
+ */
+bool kthread_mod_delayed_work(struct kthread_worker *worker,
+                             struct kthread_delayed_work *dwork,
+                             unsigned long delay)
+{
+       struct kthread_work *work = &dwork->work;
+       unsigned long flags;
+       int ret = false;
+
+       spin_lock_irqsave(&worker->lock, flags);
+
+       /* Do not bother with canceling when never queued. */
+       if (!work->worker)
+               goto fast_queue;
+
+       /* Work must not be used with >1 worker, see kthread_queue_work() */
+       WARN_ON_ONCE(work->worker != worker);
+
+       /* Do not fight with another command that is canceling this work. */
+       if (work->canceling)
+               goto out;
+
+       ret = __kthread_cancel_work(work, true, &flags);
+fast_queue:
+       __kthread_queue_delayed_work(worker, dwork, delay);
+out:
+       spin_unlock_irqrestore(&worker->lock, flags);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
+
+static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
+{
+       struct kthread_worker *worker = work->worker;
+       unsigned long flags;
+       int ret = false;
+
+       if (!worker)
+               goto out;
+
+       spin_lock_irqsave(&worker->lock, flags);
+       /* Work must not be used with >1 worker, see kthread_queue_work(). */
+       WARN_ON_ONCE(work->worker != worker);
+
+       ret = __kthread_cancel_work(work, is_dwork, &flags);
+
+       if (worker->current_work != work)
+               goto out_fast;
+
+       /*
+        * The work is in progress and we need to wait with the lock released.
+        * In the meantime, block any queuing by setting the canceling counter.
+        */
+       work->canceling++;
+       spin_unlock_irqrestore(&worker->lock, flags);
+       kthread_flush_work(work);
+       spin_lock_irqsave(&worker->lock, flags);
+       work->canceling--;
+
+out_fast:
+       spin_unlock_irqrestore(&worker->lock, flags);
+out:
+       return ret;
+}
+
+/**
+ * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
+ * @work: the kthread work to cancel
+ *
+ * Cancel @work and wait for its execution to finish.  This function
+ * can be used even if the work re-queues itself. On return from this
+ * function, @work is guaranteed to be not pending or executing on any CPU.
+ *
+ * kthread_cancel_work_sync(&delayed_work->work) must not be used for
+ * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
+ *
+ * The caller must ensure that the worker on which @work was last
+ * queued can't be destroyed before this function returns.
+ *
+ * Return: %true if @work was pending, %false otherwise.
+ */
+bool kthread_cancel_work_sync(struct kthread_work *work)
+{
+       return __kthread_cancel_work_sync(work, false);
+}
+EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
+
+/**
+ * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
+ *     wait for it to finish.
+ * @dwork: the kthread delayed work to cancel
+ *
+ * This is kthread_cancel_work_sync() for delayed works.
+ *
+ * Return: %true if @dwork was pending, %false otherwise.
+ */
+bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
+{
+       return __kthread_cancel_work_sync(&dwork->work, true);
+}
+EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
 
 /**
- * flush_kthread_worker - flush all current works on a kthread_worker
+ * kthread_flush_worker - flush all current works on a kthread_worker
  * @worker: worker to flush
  *
  * Wait until all currently executing or pending works on @worker are
  * finished.
  */
-void flush_kthread_worker(struct kthread_worker *worker)
+void kthread_flush_worker(struct kthread_worker *worker)
 {
        struct kthread_flush_work fwork = {
                KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
                COMPLETION_INITIALIZER_ONSTACK(fwork.done),
        };
 
-       queue_kthread_work(worker, &fwork.work);
+       kthread_queue_work(worker, &fwork.work);
        wait_for_completion(&fwork.done);
 }
-EXPORT_SYMBOL_GPL(flush_kthread_worker);
+EXPORT_SYMBOL_GPL(kthread_flush_worker);
+
+/**
+ * kthread_destroy_worker - destroy a kthread worker
+ * @worker: worker to be destroyed
+ *
+ * Flush and destroy @worker.  The simple flush is enough because the kthread
+ * worker API is used only in trivial scenarios.  There are no multi-step state
+ * machines needed.
+ */
+void kthread_destroy_worker(struct kthread_worker *worker)
+{
+       struct task_struct *task;
+
+       task = worker->task;
+       if (WARN_ON(!task))
+               return;
+
+       kthread_flush_worker(worker);
+       kthread_stop(task);
+       WARN_ON(!list_empty(&worker->work_list));
+       kfree(worker);
+}
+EXPORT_SYMBOL(kthread_destroy_worker);
index ca8cea1ef6737dfe4c16a62f96d3d0569f181c41..e6480e20379e4d0ee4fb51284364d6e7f7f1e25e 100644 (file)
@@ -71,6 +71,32 @@ void __weak nmi_panic_self_stop(struct pt_regs *regs)
        panic_smp_self_stop();
 }
 
+/*
+ * Stop other CPUs in panic.  Architecture dependent code may override this
+ * with more suitable version.  For example, if the architecture supports
+ * crash dump, it should save registers of each stopped CPU and disable
+ * per-CPU features such as virtualization extensions.
+ */
+void __weak crash_smp_send_stop(void)
+{
+       static int cpus_stopped;
+
+       /*
+        * This function can be called twice in panic path, but obviously
+        * we execute this only once.
+        */
+       if (cpus_stopped)
+               return;
+
+       /*
+        * Note smp_send_stop is the usual smp shutdown function, which
+        * unfortunately means it may not be hardened to work in a panic
+        * situation.
+        */
+       smp_send_stop();
+       cpus_stopped = 1;
+}
+
 atomic_t panic_cpu = ATOMIC_INIT(PANIC_CPU_INVALID);
 
 /*
@@ -164,14 +190,21 @@ void panic(const char *fmt, ...)
        if (!_crash_kexec_post_notifiers) {
                printk_nmi_flush_on_panic();
                __crash_kexec(NULL);
-       }
 
-       /*
-        * Note smp_send_stop is the usual smp shutdown function, which
-        * unfortunately means it may not be hardened to work in a panic
-        * situation.
-        */
-       smp_send_stop();
+               /*
+                * Note smp_send_stop is the usual smp shutdown function, which
+                * unfortunately means it may not be hardened to work in a
+                * panic situation.
+                */
+               smp_send_stop();
+       } else {
+               /*
+                * If we want to do crash dump after notifier calls and
+                * kmsg_dump, we will need architecture dependent extra
+                * works in addition to stopping other CPUs.
+                */
+               crash_smp_send_stop();
+       }
 
        /*
         * Run any panic handlers, including those that might need to
index 1d3b7665d0be0223343530bcfc940bfa3e525c28..2a99027312a6af6773027e20029752efddc418e3 100644 (file)
@@ -73,6 +73,8 @@ void __ptrace_unlink(struct task_struct *child)
 {
        BUG_ON(!child->ptrace);
 
+       clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+
        child->parent = child->real_parent;
        list_del_init(&child->ptrace_entry);
 
@@ -489,7 +491,6 @@ static int ptrace_detach(struct task_struct *child, unsigned int data)
 
        /* Architecture-specific hardware disable .. */
        ptrace_disable(child);
-       clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
 
        write_lock_irq(&tasklist_lock);
        /*
index 9988f5cc2d464548134e1f57d391e3b53fad785e..da79a109dbebc6f7d4cf725182b05365a946614b 100644 (file)
@@ -328,13 +328,15 @@ static struct rchan_callbacks default_channel_callbacks = {
 
 /**
  *     wakeup_readers - wake up readers waiting on a channel
- *     @data: contains the channel buffer
+ *     @work: contains the channel buffer
  *
- *     This is the timer function used to defer reader waking.
+ *     This is the function used to defer reader waking
  */
-static void wakeup_readers(unsigned long data)
+static void wakeup_readers(struct irq_work *work)
 {
-       struct rchan_buf *buf = (struct rchan_buf *)data;
+       struct rchan_buf *buf;
+
+       buf = container_of(work, struct rchan_buf, wakeup_work);
        wake_up_interruptible(&buf->read_wait);
 }
 
@@ -352,9 +354,10 @@ static void __relay_reset(struct rchan_buf *buf, unsigned int init)
        if (init) {
                init_waitqueue_head(&buf->read_wait);
                kref_init(&buf->kref);
-               setup_timer(&buf->timer, wakeup_readers, (unsigned long)buf);
-       } else
-               del_timer_sync(&buf->timer);
+               init_irq_work(&buf->wakeup_work, wakeup_readers);
+       } else {
+               irq_work_sync(&buf->wakeup_work);
+       }
 
        buf->subbufs_produced = 0;
        buf->subbufs_consumed = 0;
@@ -487,7 +490,7 @@ free_buf:
 static void relay_close_buf(struct rchan_buf *buf)
 {
        buf->finalized = 1;
-       del_timer_sync(&buf->timer);
+       irq_work_sync(&buf->wakeup_work);
        buf->chan->cb->remove_buf_file(buf->dentry);
        kref_put(&buf->kref, relay_remove_buf);
 }
@@ -754,14 +757,15 @@ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length)
                        buf->early_bytes += buf->chan->subbuf_size -
                                            buf->padding[old_subbuf];
                smp_mb();
-               if (waitqueue_active(&buf->read_wait))
+               if (waitqueue_active(&buf->read_wait)) {
                        /*
                         * Calling wake_up_interruptible() from here
                         * will deadlock if we happen to be logging
                         * from the scheduler (trying to re-grab
                         * rq->lock), so defer it.
                         */
-                       mod_timer(&buf->timer, jiffies + 1);
+                       irq_work_queue(&buf->wakeup_work);
+               }
        }
 
        old = buf->data;
index 13bc43d1fb227f8ee0c55a411460200a2ac3b067..4a5c6e73ecd41e7107a89b098f4eea9fefc647f1 100644 (file)
@@ -186,6 +186,11 @@ __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
                kfree(td);
                return PTR_ERR(tsk);
        }
+       /*
+        * Park the thread so that it could start right on the CPU
+        * when it is available.
+        */
+       kthread_park(tsk);
        get_task_struct(tsk);
        *per_cpu_ptr(ht->store, cpu) = tsk;
        if (ht->create) {
index bd81f039027782b923b99be667caa12142c2bf5d..479d840db286efa01febf1dddf56ec0b28a29731 100644 (file)
@@ -4261,7 +4261,7 @@ void print_worker_info(const char *log_lvl, struct task_struct *task)
         * This function is called without any synchronization and @task
         * could be in any state.  Be careful with dereferences.
         */
-       worker = probe_kthread_data(task);
+       worker = kthread_probe_data(task);
 
        /*
         * Carefully copy the associated workqueue's workfn and name.  Keep
index f3ca8c0ab634e04f98d959ef4b2759f9a5af472f..50144a3aeebdb8dfcadca9ec735a1b9012fc518d 100644 (file)
@@ -180,6 +180,7 @@ obj-$(CONFIG_IRQ_POLL) += irq_poll.o
 
 obj-$(CONFIG_STACKDEPOT) += stackdepot.o
 KASAN_SANITIZE_stackdepot.o := n
+KCOV_INSTRUMENT_stackdepot.o := n
 
 libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \
               fdt_empty_tree.o
index eca88087fa8adc0f451dc99aa19e4c63067031b7..0b66f0e5eb6bb172ae240fe59dd1eec0d9065493 100644 (file)
@@ -496,6 +496,11 @@ EXPORT_SYMBOL(bitmap_print_to_pagebuf);
  * ranges.  Consecutively set bits are shown as two hyphen-separated
  * decimal numbers, the smallest and largest bit numbers set in
  * the range.
+ * Optionally each range can be postfixed to denote that only parts of it
+ * should be set. The range will divided to groups of specific size.
+ * From each group will be used only defined amount of bits.
+ * Syntax: range:used_size/group_size
+ * Example: 0-1023:2/256 ==> 0,1,256,257,512,513,768,769
  *
  * Returns 0 on success, -errno on invalid input strings.
  * Error values:
@@ -507,16 +512,20 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
                int is_user, unsigned long *maskp,
                int nmaskbits)
 {
-       unsigned a, b;
+       unsigned int a, b, old_a, old_b;
+       unsigned int group_size, used_size;
        int c, old_c, totaldigits, ndigits;
        const char __user __force *ubuf = (const char __user __force *)buf;
-       int at_start, in_range;
+       int at_start, in_range, in_partial_range;
 
        totaldigits = c = 0;
+       old_a = old_b = 0;
+       group_size = used_size = 0;
        bitmap_zero(maskp, nmaskbits);
        do {
                at_start = 1;
                in_range = 0;
+               in_partial_range = 0;
                a = b = 0;
                ndigits = totaldigits;
 
@@ -547,6 +556,24 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
                        if ((totaldigits != ndigits) && isspace(old_c))
                                return -EINVAL;
 
+                       if (c == '/') {
+                               used_size = a;
+                               at_start = 1;
+                               in_range = 0;
+                               a = b = 0;
+                               continue;
+                       }
+
+                       if (c == ':') {
+                               old_a = a;
+                               old_b = b;
+                               at_start = 1;
+                               in_range = 0;
+                               in_partial_range = 1;
+                               a = b = 0;
+                               continue;
+                       }
+
                        if (c == '-') {
                                if (at_start || in_range)
                                        return -EINVAL;
@@ -567,15 +594,30 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
                }
                if (ndigits == totaldigits)
                        continue;
+               if (in_partial_range) {
+                       group_size = a;
+                       a = old_a;
+                       b = old_b;
+                       old_a = old_b = 0;
+               }
                /* if no digit is after '-', it's wrong*/
                if (at_start && in_range)
                        return -EINVAL;
-               if (!(a <= b))
+               if (!(a <= b) || !(used_size <= group_size))
                        return -EINVAL;
                if (b >= nmaskbits)
                        return -ERANGE;
                while (a <= b) {
-                       set_bit(a, maskp);
+                       if (in_partial_range) {
+                               static int pos_in_group = 1;
+
+                               if (pos_in_group <= used_size)
+                                       set_bit(a, maskp);
+
+                               if (a == b || ++pos_in_group > group_size)
+                                       pos_in_group = 1;
+                       } else
+                               set_bit(a, maskp);
                        a++;
                }
        } while (buflen && c == ',');
index d8a5cf66c316fe21eaecee8d973deac1eb3bd5d7..b8e2080c1a47a24a14c9618cd0dac5e4da0c0c67 100644 (file)
@@ -48,11 +48,9 @@ unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long
 {
        unsigned long long res;
        unsigned int rv;
-       int overflow;
 
        res = 0;
        rv = 0;
-       overflow = 0;
        while (*s) {
                unsigned int val;
 
@@ -71,15 +69,13 @@ unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long
                 */
                if (unlikely(res & (~0ull << 60))) {
                        if (res > div_u64(ULLONG_MAX - val, base))
-                               overflow = 1;
+                               rv |= KSTRTOX_OVERFLOW;
                }
                res = res * base + val;
                rv++;
                s++;
        }
        *p = res;
-       if (overflow)
-               rv |= KSTRTOX_OVERFLOW;
        return rv;
 }
 
index 9c5fe81104135364bca9f2b0da47f4e2a1ed51fc..7e35fc450c5bb780121cf3e6df0a87abaa740f5b 100644 (file)
@@ -1,6 +1,7 @@
 #include <linux/compiler.h>
 #include <linux/export.h>
 #include <linux/kasan-checks.h>
+#include <linux/thread_info.h>
 #include <linux/uaccess.h>
 #include <linux/kernel.h>
 #include <linux/errno.h>
@@ -111,6 +112,7 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
                long retval;
 
                kasan_check_write(dst, count);
+               check_object_size(dst, count, false);
                user_access_begin();
                retval = do_strncpy_from_user(dst, src, count, max);
                user_access_end();
index a869f84f44d38a8905619b71052bfadc0de81dea..e8a55a3c9febae80fab4627bc8f0846f4326c56a 100644 (file)
@@ -155,7 +155,7 @@ void __init free_bootmem_late(unsigned long physaddr, unsigned long size)
 {
        unsigned long cursor, end;
 
-       kmemleak_free_part(__va(physaddr), size);
+       kmemleak_free_part_phys(physaddr, size);
 
        cursor = PFN_UP(physaddr);
        end = PFN_DOWN(physaddr + size);
@@ -399,7 +399,7 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
 {
        unsigned long start, end;
 
-       kmemleak_free_part(__va(physaddr), size);
+       kmemleak_free_part_phys(physaddr, size);
 
        start = PFN_UP(physaddr);
        end = PFN_DOWN(physaddr + size);
@@ -420,7 +420,7 @@ void __init free_bootmem(unsigned long physaddr, unsigned long size)
 {
        unsigned long start, end;
 
-       kmemleak_free_part(__va(physaddr), size);
+       kmemleak_free_part_phys(physaddr, size);
 
        start = PFN_UP(physaddr);
        end = PFN_DOWN(physaddr + size);
index bd0e1412475eb872dd354999d3120a55cce925be..384c2cb51b56bf75ab2c132d0087e3757a71c276 100644 (file)
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -336,7 +336,7 @@ int __init cma_declare_contiguous(phys_addr_t base,
                 * kmemleak scans/reads tracked objects for pointers to other
                 * objects but this address isn't mapped and accessible
                 */
-               kmemleak_ignore(phys_to_virt(addr));
+               kmemleak_ignore_phys(addr);
                base = addr;
        }
 
index 086292f7c59d1a69881068758150c2140feb8253..a5e453cf05c499cf5c7eeb9b66ce14936d4494fd 100644 (file)
@@ -90,6 +90,8 @@
 #include <linux/cache.h>
 #include <linux/percpu.h>
 #include <linux/hardirq.h>
+#include <linux/bootmem.h>
+#include <linux/pfn.h>
 #include <linux/mmzone.h>
 #include <linux/slab.h>
 #include <linux/thread_info.h>
@@ -1121,6 +1123,51 @@ void __ref kmemleak_no_scan(const void *ptr)
 }
 EXPORT_SYMBOL(kmemleak_no_scan);
 
+/**
+ * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
+ *                      address argument
+ */
+void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
+                              gfp_t gfp)
+{
+       if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
+               kmemleak_alloc(__va(phys), size, min_count, gfp);
+}
+EXPORT_SYMBOL(kmemleak_alloc_phys);
+
+/**
+ * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
+ *                          physical address argument
+ */
+void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
+{
+       if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
+               kmemleak_free_part(__va(phys), size);
+}
+EXPORT_SYMBOL(kmemleak_free_part_phys);
+
+/**
+ * kmemleak_not_leak_phys - similar to kmemleak_not_leak but taking a physical
+ *                         address argument
+ */
+void __ref kmemleak_not_leak_phys(phys_addr_t phys)
+{
+       if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
+               kmemleak_not_leak(__va(phys));
+}
+EXPORT_SYMBOL(kmemleak_not_leak_phys);
+
+/**
+ * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
+ *                       address argument
+ */
+void __ref kmemleak_ignore_phys(phys_addr_t phys)
+{
+       if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
+               kmemleak_ignore(__va(phys));
+}
+EXPORT_SYMBOL(kmemleak_ignore_phys);
+
 /*
  * Update an object's checksum and return true if it was modified.
  */
index c8dfa430342be77ad35cea98dfad746c36aa26b3..7608bc305936177f03dcf92eda29c1eb818cdc2f 100644 (file)
@@ -723,7 +723,7 @@ int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
                     (unsigned long long)base + size - 1,
                     (void *)_RET_IP_);
 
-       kmemleak_free_part(__va(base), size);
+       kmemleak_free_part_phys(base, size);
        return memblock_remove_range(&memblock.reserved, base, size);
 }
 
@@ -1152,7 +1152,7 @@ static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
                 * The min_count is set to 0 so that memblock allocations are
                 * never reported as leaks.
                 */
-               kmemleak_alloc(__va(found), size, 0, 0);
+               kmemleak_alloc_phys(found, size, 0, 0);
                return found;
        }
        return 0;
@@ -1399,7 +1399,7 @@ void __init __memblock_free_early(phys_addr_t base, phys_addr_t size)
        memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
                     __func__, (u64)base, (u64)base + size - 1,
                     (void *)_RET_IP_);
-       kmemleak_free_part(__va(base), size);
+       kmemleak_free_part_phys(base, size);
        memblock_remove_range(&memblock.reserved, base, size);
 }
 
@@ -1419,7 +1419,7 @@ void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
        memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
                     __func__, (u64)base, (u64)base + size - 1,
                     (void *)_RET_IP_);
-       kmemleak_free_part(__va(base), size);
+       kmemleak_free_part_phys(base, size);
        cursor = PFN_UP(base);
        end = PFN_DOWN(base + size);
 
index ba609b684d7a889ee00c7edd281e8458e926e5ad..487dad610731b7746d1fd3668e98c6ede3828be7 100644 (file)
@@ -84,7 +84,7 @@ void __init free_bootmem_late(unsigned long addr, unsigned long size)
 {
        unsigned long cursor, end;
 
-       kmemleak_free_part(__va(addr), size);
+       kmemleak_free_part_phys(addr, size);
 
        cursor = PFN_UP(addr);
        end = PFN_DOWN(addr + size);
index c68ff3dcb9266e6a17b9af14cda176df464fe55e..e49121ee55f696547ddc9774ba6c425af2d49b57 100644 (file)
@@ -20,8 +20,6 @@
 
 #include "main.h"
 
-#include <linux/kconfig.h>
-
 struct net_device;
 
 #define BATADV_DEBUGFS_SUBDIR "batman_adv"
index 206a6b346a8dbc3d38b1771093671fa37f1daf27..a8368d1c4348ce4af68e53a1a28fdd4a4772799e 100755 (executable)
@@ -54,6 +54,7 @@ my $min_conf_desc_length = 4;
 my $spelling_file = "$D/spelling.txt";
 my $codespell = 0;
 my $codespellfile = "/usr/share/codespell/dictionary.txt";
+my $conststructsfile = "$D/const_structs.checkpatch";
 my $color = 1;
 my $allow_c99_comments = 1;
 
@@ -523,7 +524,11 @@ our @mode_permission_funcs = (
        ["module_param_array_named", 5],
        ["debugfs_create_(?:file|u8|u16|u32|u64|x8|x16|x32|x64|size_t|atomic_t|bool|blob|regset32|u32_array)", 2],
        ["proc_create(?:_data|)", 2],
-       ["(?:CLASS|DEVICE|SENSOR)_ATTR", 2],
+       ["(?:CLASS|DEVICE|SENSOR|SENSOR_DEVICE|IIO_DEVICE)_ATTR", 2],
+       ["IIO_DEV_ATTR_[A-Z_]+", 1],
+       ["SENSOR_(?:DEVICE_|)ATTR_2", 2],
+       ["SENSOR_TEMPLATE(?:_2|)", 3],
+       ["__ATTR", 2],
 );
 
 #Create a search pattern for all these functions to speed up a loop below
@@ -541,6 +546,32 @@ our $mode_perms_world_writable = qr{
        0[0-7][0-7][2367]
 }x;
 
+our %mode_permission_string_types = (
+       "S_IRWXU" => 0700,
+       "S_IRUSR" => 0400,
+       "S_IWUSR" => 0200,
+       "S_IXUSR" => 0100,
+       "S_IRWXG" => 0070,
+       "S_IRGRP" => 0040,
+       "S_IWGRP" => 0020,
+       "S_IXGRP" => 0010,
+       "S_IRWXO" => 0007,
+       "S_IROTH" => 0004,
+       "S_IWOTH" => 0002,
+       "S_IXOTH" => 0001,
+       "S_IRWXUGO" => 0777,
+       "S_IRUGO" => 0444,
+       "S_IWUGO" => 0222,
+       "S_IXUGO" => 0111,
+);
+
+#Create a search pattern for all these strings to speed up a loop below
+our $mode_perms_string_search = "";
+foreach my $entry (keys %mode_permission_string_types) {
+       $mode_perms_string_search .= '|' if ($mode_perms_string_search ne "");
+       $mode_perms_string_search .= $entry;
+}
+
 our $allowed_asm_includes = qr{(?x:
        irq|
        memory|
@@ -598,6 +629,29 @@ if ($codespell) {
 
 $misspellings = join("|", sort keys %spelling_fix) if keys %spelling_fix;
 
+my $const_structs = "";
+if (open(my $conststructs, '<', $conststructsfile)) {
+       while (<$conststructs>) {
+               my $line = $_;
+
+               $line =~ s/\s*\n?$//g;
+               $line =~ s/^\s*//g;
+
+               next if ($line =~ m/^\s*#/);
+               next if ($line =~ m/^\s*$/);
+               if ($line =~ /\s/) {
+                       print("$conststructsfile: '$line' invalid - ignored\n");
+                       next;
+               }
+
+               $const_structs .= '|' if ($const_structs ne "");
+               $const_structs .= $line;
+       }
+       close($conststructsfile);
+} else {
+       warn "No structs that should be const will be found - file '$conststructsfile': $!\n";
+}
+
 sub build_types {
        my $mods = "(?x:  \n" . join("|\n  ", (@modifierList, @modifierListFile)) . "\n)";
        my $all = "(?x:  \n" . join("|\n  ", (@typeList, @typeListFile)) . "\n)";
@@ -704,6 +758,16 @@ sub seed_camelcase_file {
        }
 }
 
+sub is_maintained_obsolete {
+       my ($filename) = @_;
+
+       return 0 if (!(-e "$root/scripts/get_maintainer.pl"));
+
+       my $status = `perl $root/scripts/get_maintainer.pl --status --nom --nol --nogit --nogit-fallback -f $filename 2>&1`;
+
+       return $status =~ /obsolete/i;
+}
+
 my $camelcase_seeded = 0;
 sub seed_camelcase_includes {
        return if ($camelcase_seeded);
@@ -2289,6 +2353,10 @@ sub process {
                }
 
                if ($found_file) {
+                       if (is_maintained_obsolete($realfile)) {
+                               WARN("OBSOLETE",
+                                    "$realfile is marked as 'obsolete' in the MAINTAINERS hierarchy.  No unnecessary modifications please.\n");
+                       }
                        if ($realfile =~ m@^(?:drivers/net/|net/|drivers/staging/)@) {
                                $check = 1;
                        } else {
@@ -2939,6 +3007,30 @@ sub process {
                             "Block comments use a trailing */ on a separate line\n" . $herecurr);
                }
 
+# Block comment * alignment
+               if ($prevline =~ /$;[ \t]*$/ &&                 #ends in comment
+                   $line =~ /^\+[ \t]*$;/ &&                   #leading comment
+                   $rawline =~ /^\+[ \t]*\*/ &&                #leading *
+                   (($prevrawline =~ /^\+.*?\/\*/ &&           #leading /*
+                     $prevrawline !~ /\*\/[ \t]*$/) ||         #no trailing */
+                    $prevrawline =~ /^\+[ \t]*\*/)) {          #leading *
+                       my $oldindent;
+                       $prevrawline =~ m@^\+([ \t]*/?)\*@;
+                       if (defined($1)) {
+                               $oldindent = expand_tabs($1);
+                       } else {
+                               $prevrawline =~ m@^\+(.*/?)\*@;
+                               $oldindent = expand_tabs($1);
+                       }
+                       $rawline =~ m@^\+([ \t]*)\*@;
+                       my $newindent = $1;
+                       $newindent = expand_tabs($newindent);
+                       if (length($oldindent) ne length($newindent)) {
+                               WARN("BLOCK_COMMENT_STYLE",
+                                    "Block comments should align the * on each line\n" . $hereprev);
+                       }
+               }
+
 # check for missing blank lines after struct/union declarations
 # with exceptions for various attributes and macros
                if ($prevline =~ /^[\+ ]};?\s*$/ &&
@@ -4665,7 +4757,17 @@ sub process {
                        $has_flow_statement = 1 if ($ctx =~ /\b(goto|return)\b/);
                        $has_arg_concat = 1 if ($ctx =~ /\#\#/ && $ctx !~ /\#\#\s*(?:__VA_ARGS__|args)\b/);
 
-                       $dstat =~ s/^.\s*\#\s*define\s+$Ident(?:\([^\)]*\))?\s*//;
+                       $dstat =~ s/^.\s*\#\s*define\s+$Ident(\([^\)]*\))?\s*//;
+                       my $define_args = $1;
+                       my $define_stmt = $dstat;
+                       my @def_args = ();
+
+                       if (defined $define_args && $define_args ne "") {
+                               $define_args = substr($define_args, 1, length($define_args) - 2);
+                               $define_args =~ s/\s*//g;
+                               @def_args = split(",", $define_args);
+                       }
+
                        $dstat =~ s/$;//g;
                        $dstat =~ s/\\\n.//g;
                        $dstat =~ s/^\s*//s;
@@ -4701,6 +4803,15 @@ sub process {
                                ^\[
                        }x;
                        #print "REST<$rest> dstat<$dstat> ctx<$ctx>\n";
+
+                       $ctx =~ s/\n*$//;
+                       my $herectx = $here . "\n";
+                       my $stmt_cnt = statement_rawlines($ctx);
+
+                       for (my $n = 0; $n < $stmt_cnt; $n++) {
+                               $herectx .= raw_line($linenr, $n) . "\n";
+                       }
+
                        if ($dstat ne '' &&
                            $dstat !~ /^(?:$Ident|-?$Constant),$/ &&                    # 10, // foo(),
                            $dstat !~ /^(?:$Ident|-?$Constant);$/ &&                    # foo();
@@ -4716,13 +4827,6 @@ sub process {
                            $dstat !~ /^\(\{/ &&                                                # ({...
                            $ctx !~ /^.\s*#\s*define\s+TRACE_(?:SYSTEM|INCLUDE_FILE|INCLUDE_PATH)\b/)
                        {
-                               $ctx =~ s/\n*$//;
-                               my $herectx = $here . "\n";
-                               my $cnt = statement_rawlines($ctx);
-
-                               for (my $n = 0; $n < $cnt; $n++) {
-                                       $herectx .= raw_line($linenr, $n) . "\n";
-                               }
 
                                if ($dstat =~ /;/) {
                                        ERROR("MULTISTATEMENT_MACRO_USE_DO_WHILE",
@@ -4731,6 +4835,46 @@ sub process {
                                        ERROR("COMPLEX_MACRO",
                                              "Macros with complex values should be enclosed in parentheses\n" . "$herectx");
                                }
+
+                       }
+
+                       # Make $define_stmt single line, comment-free, etc
+                       my @stmt_array = split('\n', $define_stmt);
+                       my $first = 1;
+                       $define_stmt = "";
+                       foreach my $l (@stmt_array) {
+                               $l =~ s/\\$//;
+                               if ($first) {
+                                       $define_stmt = $l;
+                                       $first = 0;
+                               } elsif ($l =~ /^[\+ ]/) {
+                                       $define_stmt .= substr($l, 1);
+                               }
+                       }
+                       $define_stmt =~ s/$;//g;
+                       $define_stmt =~ s/\s+/ /g;
+                       $define_stmt = trim($define_stmt);
+
+# check if any macro arguments are reused (ignore '...' and 'type')
+                       foreach my $arg (@def_args) {
+                               next if ($arg =~ /\.\.\./);
+                               next if ($arg =~ /^type$/i);
+                               my $tmp = $define_stmt;
+                               $tmp =~ s/\b(typeof|__typeof__|__builtin\w+|typecheck\s*\(\s*$Type\s*,|\#+)\s*\(*\s*$arg\s*\)*\b//g;
+                               $tmp =~ s/\#+\s*$arg\b//g;
+                               $tmp =~ s/\b$arg\s*\#\#//g;
+                               my $use_cnt = $tmp =~ s/\b$arg\b//g;
+                               if ($use_cnt > 1) {
+                                       CHK("MACRO_ARG_REUSE",
+                                           "Macro argument reuse '$arg' - possible side-effects?\n" . "$herectx");
+                                   }
+# check if any macro arguments may have other precedence issues
+                               if ($define_stmt =~ m/($Operators)?\s*\b$arg\b\s*($Operators)?/m &&
+                                   ((defined($1) && $1 ne ',') ||
+                                    (defined($2) && $2 ne ','))) {
+                                       CHK("MACRO_ARG_PRECEDENCE",
+                                           "Macro argument '$arg' may be better as '($arg)' to avoid precedence issues\n" . "$herectx");
+                               }
                        }
 
 # check for macros with flow control, but without ## concatenation
@@ -5495,46 +5639,46 @@ sub process {
                }
 
 # Check for memcpy(foo, bar, ETH_ALEN) that could be ether_addr_copy(foo, bar)
-               if ($^V && $^V ge 5.10.0 &&
-                   defined $stat &&
-                   $stat =~ /^\+(?:.*?)\bmemcpy\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*ETH_ALEN\s*\)/) {
-                       if (WARN("PREFER_ETHER_ADDR_COPY",
-                                "Prefer ether_addr_copy() over memcpy() if the Ethernet addresses are __aligned(2)\n" . "$here\n$stat\n") &&
-                           $fix) {
-                               $fixed[$fixlinenr] =~ s/\bmemcpy\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*ETH_ALEN\s*\)/ether_addr_copy($2, $7)/;
-                       }
-               }
+#              if ($^V && $^V ge 5.10.0 &&
+#                  defined $stat &&
+#                  $stat =~ /^\+(?:.*?)\bmemcpy\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*ETH_ALEN\s*\)/) {
+#                      if (WARN("PREFER_ETHER_ADDR_COPY",
+#                               "Prefer ether_addr_copy() over memcpy() if the Ethernet addresses are __aligned(2)\n" . "$here\n$stat\n") &&
+#                          $fix) {
+#                              $fixed[$fixlinenr] =~ s/\bmemcpy\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*ETH_ALEN\s*\)/ether_addr_copy($2, $7)/;
+#                      }
+#              }
 
 # Check for memcmp(foo, bar, ETH_ALEN) that could be ether_addr_equal*(foo, bar)
-               if ($^V && $^V ge 5.10.0 &&
-                   defined $stat &&
-                   $stat =~ /^\+(?:.*?)\bmemcmp\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*ETH_ALEN\s*\)/) {
-                       WARN("PREFER_ETHER_ADDR_EQUAL",
-                            "Prefer ether_addr_equal() or ether_addr_equal_unaligned() over memcmp()\n" . "$here\n$stat\n")
-               }
+#              if ($^V && $^V ge 5.10.0 &&
+#                  defined $stat &&
+#                  $stat =~ /^\+(?:.*?)\bmemcmp\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*ETH_ALEN\s*\)/) {
+#                      WARN("PREFER_ETHER_ADDR_EQUAL",
+#                           "Prefer ether_addr_equal() or ether_addr_equal_unaligned() over memcmp()\n" . "$here\n$stat\n")
+#              }
 
 # check for memset(foo, 0x0, ETH_ALEN) that could be eth_zero_addr
 # check for memset(foo, 0xFF, ETH_ALEN) that could be eth_broadcast_addr
-               if ($^V && $^V ge 5.10.0 &&
-                   defined $stat &&
-                   $stat =~ /^\+(?:.*?)\bmemset\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*ETH_ALEN\s*\)/) {
-
-                       my $ms_val = $7;
-
-                       if ($ms_val =~ /^(?:0x|)0+$/i) {
-                               if (WARN("PREFER_ETH_ZERO_ADDR",
-                                        "Prefer eth_zero_addr over memset()\n" . "$here\n$stat\n") &&
-                                   $fix) {
-                                       $fixed[$fixlinenr] =~ s/\bmemset\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*,\s*ETH_ALEN\s*\)/eth_zero_addr($2)/;
-                               }
-                       } elsif ($ms_val =~ /^(?:0xff|255)$/i) {
-                               if (WARN("PREFER_ETH_BROADCAST_ADDR",
-                                        "Prefer eth_broadcast_addr() over memset()\n" . "$here\n$stat\n") &&
-                                   $fix) {
-                                       $fixed[$fixlinenr] =~ s/\bmemset\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*,\s*ETH_ALEN\s*\)/eth_broadcast_addr($2)/;
-                               }
-                       }
-               }
+#              if ($^V && $^V ge 5.10.0 &&
+#                  defined $stat &&
+#                  $stat =~ /^\+(?:.*?)\bmemset\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*ETH_ALEN\s*\)/) {
+#
+#                      my $ms_val = $7;
+#
+#                      if ($ms_val =~ /^(?:0x|)0+$/i) {
+#                              if (WARN("PREFER_ETH_ZERO_ADDR",
+#                                       "Prefer eth_zero_addr over memset()\n" . "$here\n$stat\n") &&
+#                                  $fix) {
+#                                      $fixed[$fixlinenr] =~ s/\bmemset\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*,\s*ETH_ALEN\s*\)/eth_zero_addr($2)/;
+#                              }
+#                      } elsif ($ms_val =~ /^(?:0xff|255)$/i) {
+#                              if (WARN("PREFER_ETH_BROADCAST_ADDR",
+#                                       "Prefer eth_broadcast_addr() over memset()\n" . "$here\n$stat\n") &&
+#                                  $fix) {
+#                                      $fixed[$fixlinenr] =~ s/\bmemset\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*,\s*ETH_ALEN\s*\)/eth_broadcast_addr($2)/;
+#                              }
+#                      }
+#              }
 
 # typecasts on min/max could be min_t/max_t
                if ($^V && $^V ge 5.10.0 &&
@@ -5654,6 +5798,19 @@ sub process {
                             "externs should be avoided in .c files\n" .  $herecurr);
                }
 
+               if ($realfile =~ /\.[ch]$/ && defined $stat &&
+                   $stat =~ /^.\s*(?:extern\s+)?$Type\s*$Ident\s*\(\s*([^{]+)\s*\)\s*;/s &&
+                   $1 ne "void") {
+                       my $args = trim($1);
+                       while ($args =~ m/\s*($Type\s*(?:$Ident|\(\s*\*\s*$Ident?\s*\)\s*$balanced_parens)?)/g) {
+                               my $arg = trim($1);
+                               if ($arg =~ /^$Type$/ && $arg !~ /enum\s+$Ident$/) {
+                                       WARN("FUNCTION_ARGUMENTS",
+                                            "function definition argument '$arg' should also have an identifier name\n" . $herecurr);
+                               }
+                       }
+               }
+
 # checks for new __setup's
                if ($rawline =~ /\b__setup\("([^"]*)"/) {
                        my $name = $1;
@@ -5853,46 +6010,6 @@ sub process {
                }
 
 # check for various structs that are normally const (ops, kgdb, device_tree)
-               my $const_structs = qr{
-                               acpi_dock_ops|
-                               address_space_operations|
-                               backlight_ops|
-                               block_device_operations|
-                               dentry_operations|
-                               dev_pm_ops|
-                               dma_map_ops|
-                               extent_io_ops|
-                               file_lock_operations|
-                               file_operations|
-                               hv_ops|
-                               ide_dma_ops|
-                               intel_dvo_dev_ops|
-                               item_operations|
-                               iwl_ops|
-                               kgdb_arch|
-                               kgdb_io|
-                               kset_uevent_ops|
-                               lock_manager_operations|
-                               microcode_ops|
-                               mtrr_ops|
-                               neigh_ops|
-                               nlmsvc_binding|
-                               of_device_id|
-                               pci_raw_ops|
-                               pipe_buf_operations|
-                               platform_hibernation_ops|
-                               platform_suspend_ops|
-                               proto_ops|
-                               rpc_pipe_ops|
-                               seq_operations|
-                               snd_ac97_build_ops|
-                               soc_pcmcia_socket_ops|
-                               stacktrace_ops|
-                               sysfs_ops|
-                               tty_operations|
-                               uart_ops|
-                               usb_mon_operations|
-                               wd_ops}x;
                if ($line !~ /\bconst\b/ &&
                    $line =~ /\bstruct\s+($const_structs)\b/) {
                        WARN("CONST_STRUCT",
@@ -5979,34 +6096,69 @@ sub process {
 # Mode permission misuses where it seems decimal should be octal
 # This uses a shortcut match to avoid unnecessary uses of a slow foreach loop
                if ($^V && $^V ge 5.10.0 &&
+                   defined $stat &&
                    $line =~ /$mode_perms_search/) {
                        foreach my $entry (@mode_permission_funcs) {
                                my $func = $entry->[0];
                                my $arg_pos = $entry->[1];
 
+                               my $lc = $stat =~ tr@\n@@;
+                               $lc = $lc + $linenr;
+                               my $stat_real = raw_line($linenr, 0);
+                               for (my $count = $linenr + 1; $count <= $lc; $count++) {
+                                       $stat_real = $stat_real . "\n" . raw_line($count, 0);
+                               }
+
                                my $skip_args = "";
                                if ($arg_pos > 1) {
                                        $arg_pos--;
                                        $skip_args = "(?:\\s*$FuncArg\\s*,\\s*){$arg_pos,$arg_pos}";
                                }
-                               my $test = "\\b$func\\s*\\(${skip_args}([\\d]+)\\s*[,\\)]";
-                               if ($line =~ /$test/) {
+                               my $test = "\\b$func\\s*\\(${skip_args}($FuncArg(?:\\|\\s*$FuncArg)*)\\s*[,\\)]";
+                               if ($stat =~ /$test/) {
                                        my $val = $1;
                                        $val = $6 if ($skip_args ne "");
-
-                                       if ($val !~ /^0$/ &&
-                                           (($val =~ /^$Int$/ && $val !~ /^$Octal$/) ||
-                                            length($val) ne 4)) {
+                                       if (($val =~ /^$Int$/ && $val !~ /^$Octal$/) ||
+                                           ($val =~ /^$Octal$/ && length($val) ne 4)) {
                                                ERROR("NON_OCTAL_PERMISSIONS",
-                                                     "Use 4 digit octal (0777) not decimal permissions\n" . $herecurr);
-                                       } elsif ($val =~ /^$Octal$/ && (oct($val) & 02)) {
+                                                     "Use 4 digit octal (0777) not decimal permissions\n" . "$here\n" . $stat_real);
+                                       }
+                                       if ($val =~ /^$Octal$/ && (oct($val) & 02)) {
                                                ERROR("EXPORTED_WORLD_WRITABLE",
-                                                     "Exporting writable files is usually an error. Consider more restrictive permissions.\n" . $herecurr);
+                                                     "Exporting writable files is usually an error. Consider more restrictive permissions.\n" . "$here\n" . $stat_real);
                                        }
                                }
                        }
                }
 
+# check for uses of S_<PERMS> that could be octal for readability
+               if ($line =~ /\b$mode_perms_string_search\b/) {
+                       my $val = "";
+                       my $oval = "";
+                       my $to = 0;
+                       my $curpos = 0;
+                       my $lastpos = 0;
+                       while ($line =~ /\b(($mode_perms_string_search)\b(?:\s*\|\s*)?\s*)/g) {
+                               $curpos = pos($line);
+                               my $match = $2;
+                               my $omatch = $1;
+                               last if ($lastpos > 0 && ($curpos - length($omatch) != $lastpos));
+                               $lastpos = $curpos;
+                               $to |= $mode_permission_string_types{$match};
+                               $val .= '\s*\|\s*' if ($val ne "");
+                               $val .= $match;
+                               $oval .= $omatch;
+                       }
+                       $oval =~ s/^\s*\|\s*//;
+                       $oval =~ s/\s*\|\s*$//;
+                       my $octal = sprintf("%04o", $to);
+                       if (WARN("SYMBOLIC_PERMS",
+                                "Symbolic permissions '$oval' are not preferred. Consider using octal permissions '$octal'.\n" . $herecurr) &&
+                           $fix) {
+                               $fixed[$fixlinenr] =~ s/$val/$octal/;
+                       }
+               }
+
 # validate content of MODULE_LICENSE against list from include/linux/module.h
                if ($line =~ /\bMODULE_LICENSE\s*\(\s*($String)\s*\)/) {
                        my $extracted_string = get_quoted_string($line, $rawline);
diff --git a/scripts/const_structs.checkpatch b/scripts/const_structs.checkpatch
new file mode 100644 (file)
index 0000000..ac5f126
--- /dev/null
@@ -0,0 +1,64 @@
+acpi_dock_ops
+address_space_operations
+backlight_ops
+block_device_operations
+clk_ops
+comedi_lrange
+component_ops
+dentry_operations
+dev_pm_ops
+dma_map_ops
+driver_info
+drm_connector_funcs
+drm_encoder_funcs
+drm_encoder_helper_funcs
+ethtool_ops
+extent_io_ops
+file_lock_operations
+file_operations
+hv_ops
+ide_dma_ops
+ide_port_ops
+inode_operations
+intel_dvo_dev_ops
+irq_domain_ops
+item_operations
+iwl_cfg
+iwl_ops
+kgdb_arch
+kgdb_io
+kset_uevent_ops
+lock_manager_operations
+machine_desc
+microcode_ops
+mlxsw_reg_info
+mtrr_ops
+neigh_ops
+net_device_ops
+nlmsvc_binding
+nvkm_device_chip
+of_device_id
+pci_raw_ops
+pipe_buf_operations
+platform_hibernation_ops
+platform_suspend_ops
+proto_ops
+regmap_access_table
+rpc_pipe_ops
+rtc_class_ops
+sd_desc
+seq_operations
+sirfsoc_padmux
+snd_ac97_build_ops
+snd_soc_component_driver
+soc_pcmcia_socket_ops
+stacktrace_ops
+sysfs_ops
+tty_operations
+uart_ops
+usb_mon_operations
+v4l2_ctrl_ops
+v4l2_ioctl_ops
+vm_operations_struct
+wacom_features
+wd_ops
index b3775a9604eac9ff7d7e4b667003969408d3d491..a2ff3388e5ea396f478870b47e79faadb73cca02 100755 (executable)
@@ -263,7 +263,8 @@ exuberant()
        -I EXPORT_SYMBOL,EXPORT_SYMBOL_GPL,ACPI_EXPORT_SYMBOL   \
        -I DEFINE_TRACE,EXPORT_TRACEPOINT_SYMBOL,EXPORT_TRACEPOINT_SYMBOL_GPL \
        -I static,const                                         \
-       --extra=+f --c-kinds=+px --langmap=c:+.h "${regex[@]}"
+       --extra=+fq --c-kinds=+px --fields=+iaS --langmap=c:+.h \
+       "${regex[@]}"
 
        setup_regex exuberant kconfig
        all_kconfigs | xargs $1 -a                              \
index c8455b47388bcd901290e6a70ec178d4e48aa11a..7ab14ce65a73bff98d42d51f4479d280aee72a17 100644 (file)
@@ -338,7 +338,7 @@ static irqreturn_t sst_byt_irq_thread(int irq, void *context)
        spin_unlock_irqrestore(&sst->spinlock, flags);
 
        /* continue to send any remaining messages... */
-       queue_kthread_work(&ipc->kworker, &ipc->kwork);
+       kthread_queue_work(&ipc->kworker, &ipc->kwork);
 
        return IRQ_HANDLED;
 }
index 5d2949324d0ee68969b8102a2eb8da22b65813da..012742299dd532407b4d4859b89a74d09b262d0b 100644 (file)
@@ -12,7 +12,6 @@
  *
  */
 
-#include <linux/kconfig.h>
 #include <linux/stddef.h>
 #include <linux/acpi.h>
 
index a12c7bb08d3b88b891e30cf2d920b39929028c45..6c672ac79cce7b3b12e1a7cb65fdb8e8c4456f15 100644 (file)
@@ -111,7 +111,7 @@ static int ipc_tx_message(struct sst_generic_ipc *ipc, u64 header,
        list_add_tail(&msg->list, &ipc->tx_list);
        spin_unlock_irqrestore(&ipc->dsp->spinlock, flags);
 
-       queue_kthread_work(&ipc->kworker, &ipc->kwork);
+       kthread_queue_work(&ipc->kworker, &ipc->kwork);
 
        if (wait)
                return tx_wait_done(ipc, msg, rx_data);
@@ -281,7 +281,7 @@ int sst_ipc_init(struct sst_generic_ipc *ipc)
                return -ENOMEM;
 
        /* start the IPC message thread */
-       init_kthread_worker(&ipc->kworker);
+       kthread_init_worker(&ipc->kworker);
        ipc->tx_thread = kthread_run(kthread_worker_fn,
                                        &ipc->kworker, "%s",
                                        dev_name(ipc->dev));
@@ -292,7 +292,7 @@ int sst_ipc_init(struct sst_generic_ipc *ipc)
                return ret;
        }
 
-       init_kthread_work(&ipc->kwork, ipc_tx_msgs);
+       kthread_init_work(&ipc->kwork, ipc_tx_msgs);
        return 0;
 }
 EXPORT_SYMBOL_GPL(sst_ipc_init);
index 91565229d07422414418dc7495a3f655f321e7a1..e432a31fd9f2cbcd7a4f126bc32c048e2ba0a4ee 100644 (file)
@@ -818,7 +818,7 @@ static irqreturn_t hsw_irq_thread(int irq, void *context)
        spin_unlock_irqrestore(&sst->spinlock, flags);
 
        /* continue to send any remaining messages... */
-       queue_kthread_work(&ipc->kworker, &ipc->kwork);
+       kthread_queue_work(&ipc->kworker, &ipc->kwork);
 
        return IRQ_HANDLED;
 }
index 0bd01e62622cc80a6488c586306b989aa0eae46e..797cf40532352c22a1adc573dd1aa1db292e726e 100644 (file)
@@ -464,7 +464,7 @@ irqreturn_t skl_dsp_irq_thread_handler(int irq, void *context)
        skl_ipc_int_enable(dsp);
 
        /* continue to send any remaining messages... */
-       queue_kthread_work(&ipc->kworker, &ipc->kwork);
+       kthread_queue_work(&ipc->kworker, &ipc->kwork);
 
        return IRQ_HANDLED;
 }
index 878daf3429e8f822593aff99c490550e5d76ef93..7dc5a0af9b544c836223a10bfbb2330e9063ec17 100644 (file)
@@ -1,4 +1,3 @@
-#include <linux/kconfig.h>
 #include <linux/bug.h>
 
 void check(void)
index 9d0919ed52a4088d237ba47aa00219e1e1dee47b..f2e07f2fd4b48282a274aae7c667a25116ab4505 100644 (file)
@@ -3,7 +3,8 @@ CFLAGS += -I. -g -O2 -Wall -D_LGPL_SOURCE
 LDFLAGS += -lpthread -lurcu
 TARGETS = main
 OFILES = main.o radix-tree.o linux.o test.o tag_check.o find_next_bit.o \
-        regression1.o regression2.o regression3.o multiorder.o
+        regression1.o regression2.o regression3.o multiorder.o \
+        iteration_check.o
 
 targets: $(TARGETS)
 
diff --git a/tools/testing/radix-tree/iteration_check.c b/tools/testing/radix-tree/iteration_check.c
new file mode 100644 (file)
index 0000000..9adb8e7
--- /dev/null
@@ -0,0 +1,180 @@
+/*
+ * iteration_check.c: test races having to do with radix tree iteration
+ * Copyright (c) 2016 Intel Corporation
+ * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+#include <linux/radix-tree.h>
+#include <pthread.h>
+#include "test.h"
+
+#define NUM_THREADS 4
+#define TAG 0
+static pthread_mutex_t tree_lock = PTHREAD_MUTEX_INITIALIZER;
+static pthread_t threads[NUM_THREADS];
+RADIX_TREE(tree, GFP_KERNEL);
+bool test_complete;
+
+/* relentlessly fill the tree with tagged entries */
+static void *add_entries_fn(void *arg)
+{
+       int pgoff;
+
+       while (!test_complete) {
+               for (pgoff = 0; pgoff < 100; pgoff++) {
+                       pthread_mutex_lock(&tree_lock);
+                       if (item_insert(&tree, pgoff) == 0)
+                               item_tag_set(&tree, pgoff, TAG);
+                       pthread_mutex_unlock(&tree_lock);
+               }
+       }
+
+       return NULL;
+}
+
+/*
+ * Iterate over the tagged entries, doing a radix_tree_iter_retry() as we find
+ * things that have been removed and randomly resetting our iteration to the
+ * next chunk with radix_tree_iter_next().  Both radix_tree_iter_retry() and
+ * radix_tree_iter_next() cause radix_tree_next_slot() to be called with a
+ * NULL 'slot' variable.
+ */
+static void *tagged_iteration_fn(void *arg)
+{
+       struct radix_tree_iter iter;
+       void **slot;
+
+       while (!test_complete) {
+               rcu_read_lock();
+               radix_tree_for_each_tagged(slot, &tree, &iter, 0, TAG) {
+                       void *entry;
+                       int i;
+
+                       /* busy wait to let removals happen */
+                       for (i = 0; i < 1000000; i++)
+                               ;
+
+                       entry = radix_tree_deref_slot(slot);
+                       if (unlikely(!entry))
+                               continue;
+
+                       if (radix_tree_deref_retry(entry)) {
+                               slot = radix_tree_iter_retry(&iter);
+                               continue;
+                       }
+
+                       if (rand() % 50 == 0)
+                               slot = radix_tree_iter_next(&iter);
+               }
+               rcu_read_unlock();
+       }
+
+       return NULL;
+}
+
+/*
+ * Iterate over the entries, doing a radix_tree_iter_retry() as we find things
+ * that have been removed and randomly resetting our iteration to the next
+ * chunk with radix_tree_iter_next().  Both radix_tree_iter_retry() and
+ * radix_tree_iter_next() cause radix_tree_next_slot() to be called with a
+ * NULL 'slot' variable.
+ */
+static void *untagged_iteration_fn(void *arg)
+{
+       struct radix_tree_iter iter;
+       void **slot;
+
+       while (!test_complete) {
+               rcu_read_lock();
+               radix_tree_for_each_slot(slot, &tree, &iter, 0) {
+                       void *entry;
+                       int i;
+
+                       /* busy wait to let removals happen */
+                       for (i = 0; i < 1000000; i++)
+                               ;
+
+                       entry = radix_tree_deref_slot(slot);
+                       if (unlikely(!entry))
+                               continue;
+
+                       if (radix_tree_deref_retry(entry)) {
+                               slot = radix_tree_iter_retry(&iter);
+                               continue;
+                       }
+
+                       if (rand() % 50 == 0)
+                               slot = radix_tree_iter_next(&iter);
+               }
+               rcu_read_unlock();
+       }
+
+       return NULL;
+}
+
+/*
+ * Randomly remove entries to help induce radix_tree_iter_retry() calls in the
+ * two iteration functions.
+ */
+static void *remove_entries_fn(void *arg)
+{
+       while (!test_complete) {
+               int pgoff;
+
+               pgoff = rand() % 100;
+
+               pthread_mutex_lock(&tree_lock);
+               item_delete(&tree, pgoff);
+               pthread_mutex_unlock(&tree_lock);
+       }
+
+       return NULL;
+}
+
+/* This is a unit test for a bug found by the syzkaller tester */
+void iteration_test(void)
+{
+       int i;
+
+       printf("Running iteration tests for 10 seconds\n");
+
+       srand(time(0));
+       test_complete = false;
+
+       if (pthread_create(&threads[0], NULL, tagged_iteration_fn, NULL)) {
+               perror("pthread_create");
+               exit(1);
+       }
+       if (pthread_create(&threads[1], NULL, untagged_iteration_fn, NULL)) {
+               perror("pthread_create");
+               exit(1);
+       }
+       if (pthread_create(&threads[2], NULL, add_entries_fn, NULL)) {
+               perror("pthread_create");
+               exit(1);
+       }
+       if (pthread_create(&threads[3], NULL, remove_entries_fn, NULL)) {
+               perror("pthread_create");
+               exit(1);
+       }
+
+       sleep(10);
+       test_complete = true;
+
+       for (i = 0; i < NUM_THREADS; i++) {
+               if (pthread_join(threads[i], NULL)) {
+                       perror("pthread_join");
+                       exit(1);
+               }
+       }
+
+       item_kill_tree(&tree);
+}
index b7619ff3b552ba9d6f206a98b4b21731403a83cc..daa9010693e8374148a57481285f7fbc457a1356 100644 (file)
@@ -332,6 +332,7 @@ int main(int argc, char **argv)
        regression1_test();
        regression2_test();
        regression3_test();
+       iteration_test();
        single_thread_tests(long_run);
 
        sleep(1);
index 2d03a63bb79c6777c144296fb18e4e4ebcbd6ab3..0d6813a61b37f904b436e74448c910a350c10bb3 100644 (file)
@@ -43,7 +43,7 @@
 #include "regression.h"
 
 static RADIX_TREE(mt_tree, GFP_KERNEL);
-static pthread_mutex_t mt_lock;
+static pthread_mutex_t mt_lock = PTHREAD_MUTEX_INITIALIZER;
 
 struct page {
        pthread_mutex_t lock;
index e85131369723c971dbd5f7f198c389b4cd75fc3a..217fb2403f0901c13d55936fa7cfe0c6c9da4157 100644 (file)
@@ -27,6 +27,7 @@ void item_kill_tree(struct radix_tree_root *root);
 
 void tag_check(void);
 void multiorder_checks(void);
+void iteration_test(void);
 
 struct item *
 item_tag_set(struct radix_tree_root *root, unsigned long index, int tag);