]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'for-4.4/lightnvm' into for-next
authorJens Axboe <axboe@fb.com>
Thu, 29 Oct 2015 19:37:37 +0000 (04:37 +0900)
committerJens Axboe <axboe@fb.com>
Thu, 29 Oct 2015 19:37:37 +0000 (04:37 +0900)
86 files changed:
Documentation/ABI/testing/sysfs-block
Documentation/block/pr.txt [new file with mode: 0644]
Documentation/devicetree/bindings/spi/sh-msiof.txt
arch/alpha/include/asm/word-at-a-time.h
arch/arm64/kernel/debug-monitors.c
arch/arm64/kernel/insn.c
arch/arm64/kernel/setup.c
arch/arm64/mm/fault.c
arch/h8300/include/asm/Kbuild
arch/powerpc/include/asm/Kbuild
arch/s390/boot/compressed/Makefile
arch/s390/configs/default_defconfig
arch/s390/configs/gcov_defconfig
arch/s390/configs/performance_defconfig
arch/s390/include/asm/numa.h
arch/s390/include/asm/topology.h
arch/s390/kernel/asm-offsets.c
arch/s390/kernel/entry.S
arch/s390/kernel/vtime.c
arch/s390/numa/mode_emu.c
arch/s390/numa/numa.c
arch/tile/include/asm/Kbuild
arch/tile/include/asm/word-at-a-time.h
arch/x86/include/asm/xen/hypercall.h
arch/x86/xen/enlighten.c
arch/x86/xen/p2m.c
arch/x86/xen/setup.c
block/bio-integrity.c
block/blk-core.c
block/blk-integrity.c
block/blk-merge.c
block/blk-mq-sysfs.c
block/blk-mq-tag.c
block/blk-mq.c
block/blk-mq.h
block/blk-sysfs.c
block/blk.h
block/elevator.c
block/genhd.c
block/ioctl.c
block/partition-generic.c
block/t10-pi.c
drivers/base/regmap/regmap-debugfs.c
drivers/md/dm-table.c
drivers/md/dm.c
drivers/md/md.c
drivers/md/multipath.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/mtd/nand/mxc_nand.c
drivers/mtd/nand/sunxi_nand.c
drivers/nvdimm/btt.c
drivers/nvdimm/core.c
drivers/nvme/host/pci.c
drivers/regulator/axp20x-regulator.c
drivers/regulator/core.c
drivers/scsi/sd.c
drivers/scsi/sd_dif.c
drivers/spi/spi-davinci.c
drivers/target/target_core_iblock.c
drivers/video/fbdev/broadsheetfb.c
drivers/video/fbdev/fsl-diu-fb.c
drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
drivers/video/fbdev/omap2/displays-new/connector-dvi.c
drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c
drivers/video/fbdev/tridentfb.c
drivers/video/of_display_timing.c
fs/block_dev.c
fs/cifs/cifsfs.h
fs/cifs/inode.c
fs/cifs/smb2pdu.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4state.c
fs/nfs/nfs4trace.h
fs/nfs/write.c
include/linux/blk-mq.h
include/linux/blkdev.h
include/linux/genhd.h
include/linux/pr.h [new file with mode: 0644]
include/linux/t10-pi.h
include/uapi/linux/pr.h [new file with mode: 0644]
include/xen/interface/sched.h
lib/string.c
mm/filemap.c
net/sunrpc/xprtrdma/transport.c
net/sunrpc/xprtrdma/verbs.c

index 8df003963d99c0500eb9a1c3ee20be367812eb36..71d184dbb70d29daabf9ad1461d12615c7d34fbf 100644 (file)
@@ -60,6 +60,13 @@ Description:
                Indicates whether a storage device is capable of storing
                integrity metadata. Set if the device is T10 PI-capable.
 
+What:          /sys/block/<disk>/integrity/protection_interval_bytes
+Date:          July 2015
+Contact:       Martin K. Petersen <martin.petersen@oracle.com>
+Description:
+               Describes the number of data bytes which are protected
+               by one integrity tuple. Typically the device's logical
+               block size.
 
 What:          /sys/block/<disk>/integrity/write_generate
 Date:          June 2008
diff --git a/Documentation/block/pr.txt b/Documentation/block/pr.txt
new file mode 100644 (file)
index 0000000..d3eb1ca
--- /dev/null
@@ -0,0 +1,119 @@
+
+Block layer support for Persistent Reservations
+===============================================
+
+The Linux kernel supports a user space interface for simplified
+Persistent Reservations which map to block devices that support
+these (like SCSI). Persistent Reservations allow restricting
+access to block devices to specific initiators in a shared storage
+setup.
+
+This document gives a general overview of the support ioctl commands.
+For a more detailed reference please refer the the SCSI Primary
+Commands standard, specifically the section on Reservations and the
+"PERSISTENT RESERVE IN" and "PERSISTENT RESERVE OUT" commands.
+
+All implementations are expected to ensure the reservations survive
+a power loss and cover all connections in a multi path environment.
+These behaviors are optional in SPC but will be automatically applied
+by Linux.
+
+
+The following types of reservations are supported:
+--------------------------------------------------
+
+ - PR_WRITE_EXCLUSIVE
+
+       Only the initiator that owns the reservation can write to the
+       device.  Any initiator can read from the device.
+
+ - PR_EXCLUSIVE_ACCESS
+
+       Only the initiator that owns the reservation can access the
+       device.
+
+ - PR_WRITE_EXCLUSIVE_REG_ONLY
+
+       Only initiators with a registered key can write to the device,
+       Any initiator can read from the device.
+
+ - PR_EXCLUSIVE_ACCESS_REG_ONLY
+
+       Only initiators with a registered key can access the device.
+
+ - PR_WRITE_EXCLUSIVE_ALL_REGS
+
+       Only initiators with a registered key can write to the device,
+       Any initiator can read from the device.
+       All initiators with a registered key are considered reservation
+       holders.
+       Please reference the SPC spec on the meaning of a reservation
+       holder if you want to use this type. 
+
+ - PR_EXCLUSIVE_ACCESS_ALL_REGS
+
+       Only initiators with a registered key can access the device.
+       All initiators with a registered key are considered reservation
+       holders.
+       Please reference the SPC spec on the meaning of a reservation
+       holder if you want to use this type. 
+
+
+The following ioctl are supported:
+----------------------------------
+
+1. IOC_PR_REGISTER
+
+This ioctl command registers a new reservation if the new_key argument
+is non-null.  If no existing reservation exists old_key must be zero,
+if an existing reservation should be replaced old_key must contain
+the old reservation key.
+
+If the new_key argument is 0 it unregisters the existing reservation passed
+in old_key.
+
+
+2. IOC_PR_RESERVE
+
+This ioctl command reserves the device and thus restricts access for other
+devices based on the type argument.  The key argument must be the existing
+reservation key for the device as acquired by the IOC_PR_REGISTER,
+IOC_PR_REGISTER_IGNORE, IOC_PR_PREEMPT or IOC_PR_PREEMPT_ABORT commands.
+
+
+3. IOC_PR_RELEASE
+
+This ioctl command releases the reservation specified by key and flags
+and thus removes any access restriction implied by it.
+
+
+4. IOC_PR_PREEMPT
+
+This ioctl command releases the existing reservation referred to by
+old_key and replaces it with a a new reservation of type for the
+reservation key new_key.
+
+
+5. IOC_PR_PREEMPT_ABORT
+
+This ioctl command works like IOC_PR_PREEMPT except that it also aborts
+any outstanding command sent over a connection identified by old_key.
+
+6. IOC_PR_CLEAR
+
+This ioctl command unregisters both key and any other reservation key
+registered with the device and drops any existing reservation.
+
+
+Flags
+-----
+
+All the ioctls have a flag field.  Currently only one flag is supported:
+
+ - PR_FL_IGNORE_KEY
+
+       Ignore the existing reservation key.  This is commonly supported for
+       IOC_PR_REGISTER, and some implementation may support the flag for
+       IOC_PR_RESERVE.
+
+For all unknown flags the kernel will return -EOPNOTSUPP.
index 8f771441be60556ace93f2b29d87df856882c344..705075da2f10156e92a60828177c8483ee16eeec 100644 (file)
@@ -51,7 +51,7 @@ Optional properties, deprecated for soctype-specific bindings:
 - renesas,tx-fifo-size : Overrides the default tx fifo size given in words
                         (default is 64)
 - renesas,rx-fifo-size : Overrides the default rx fifo size given in words
-                        (default is 64, or 256 on R-Car Gen2)
+                        (default is 64)
 
 Pinctrl properties might be needed, too.  See
 Documentation/devicetree/bindings/pinctrl/renesas,*.
index 6b340d0f1521c3ad9c4edf984abe60982ae24c04..902e6ab00a066fead53614ed86cb88980aea2fba 100644 (file)
@@ -52,4 +52,6 @@ static inline unsigned long find_zero(unsigned long bits)
 #endif
 }
 
+#define zero_bytemask(mask) ((2ul << (find_zero(mask) * 8)) - 1)
+
 #endif /* _ASM_WORD_AT_A_TIME_H */
index cebf78661a553775003bfee8ec89f65e33e3ec55..253021ef2769078e69793288a8cc067aebb76d34 100644 (file)
@@ -201,7 +201,7 @@ void unregister_step_hook(struct step_hook *hook)
 }
 
 /*
- * Call registered single step handers
+ * Call registered single step handlers
  * There is no Syndrome info to check for determining the handler.
  * So we call all the registered handlers, until the right handler is
  * found which returns zero.
@@ -271,20 +271,21 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
  * Use reader/writer locks instead of plain spinlock.
  */
 static LIST_HEAD(break_hook);
-static DEFINE_RWLOCK(break_hook_lock);
+static DEFINE_SPINLOCK(break_hook_lock);
 
 void register_break_hook(struct break_hook *hook)
 {
-       write_lock(&break_hook_lock);
-       list_add(&hook->node, &break_hook);
-       write_unlock(&break_hook_lock);
+       spin_lock(&break_hook_lock);
+       list_add_rcu(&hook->node, &break_hook);
+       spin_unlock(&break_hook_lock);
 }
 
 void unregister_break_hook(struct break_hook *hook)
 {
-       write_lock(&break_hook_lock);
-       list_del(&hook->node);
-       write_unlock(&break_hook_lock);
+       spin_lock(&break_hook_lock);
+       list_del_rcu(&hook->node);
+       spin_unlock(&break_hook_lock);
+       synchronize_rcu();
 }
 
 static int call_break_hook(struct pt_regs *regs, unsigned int esr)
@@ -292,11 +293,11 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr)
        struct break_hook *hook;
        int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL;
 
-       read_lock(&break_hook_lock);
-       list_for_each_entry(hook, &break_hook, node)
+       rcu_read_lock();
+       list_for_each_entry_rcu(hook, &break_hook, node)
                if ((esr & hook->esr_mask) == hook->esr_val)
                        fn = hook->fn;
-       read_unlock(&break_hook_lock);
+       rcu_read_unlock();
 
        return fn ? fn(regs, esr) : DBG_HOOK_ERROR;
 }
index f341866aa810340e47aa9b7283b6b472ca2eae84..c08b9ad6f42931e8766d0186daa51a6cce8dbe39 100644 (file)
@@ -85,7 +85,7 @@ bool aarch64_insn_is_branch_imm(u32 insn)
                aarch64_insn_is_bcond(insn));
 }
 
-static DEFINE_SPINLOCK(patch_lock);
+static DEFINE_RAW_SPINLOCK(patch_lock);
 
 static void __kprobes *patch_map(void *addr, int fixmap)
 {
@@ -131,13 +131,13 @@ static int __kprobes __aarch64_insn_write(void *addr, u32 insn)
        unsigned long flags = 0;
        int ret;
 
-       spin_lock_irqsave(&patch_lock, flags);
+       raw_spin_lock_irqsave(&patch_lock, flags);
        waddr = patch_map(addr, FIX_TEXT_POKE0);
 
        ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
 
        patch_unmap(FIX_TEXT_POKE0);
-       spin_unlock_irqrestore(&patch_lock, flags);
+       raw_spin_unlock_irqrestore(&patch_lock, flags);
 
        return ret;
 }
index 6bab21f84a9ff38402e70345016ed50ae8e95e30..232247945b1c215c25fbfd708573fe3def5c68c5 100644 (file)
@@ -364,6 +364,8 @@ static void __init relocate_initrd(void)
                to_free = ram_end - orig_start;
 
        size = orig_end - orig_start;
+       if (!size)
+               return;
 
        /* initrd needs to be relocated completely inside linear mapping */
        new_start = memblock_find_in_range(0, PFN_PHYS(max_pfn),
index aba9ead1384c036a0d6a441c92ced63cfd7ed4ae..9fadf6d7039b721b072379b5af51abce726f5b92 100644 (file)
@@ -287,6 +287,7 @@ retry:
                         * starvation.
                         */
                        mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
+                       mm_flags |= FAULT_FLAG_TRIED;
                        goto retry;
                }
        }
index 70e6ae1e700673e3acbd03452d22f57db9c1166d..373cb23301e30248bfd62f2a08c6529f93db0382 100644 (file)
@@ -73,4 +73,5 @@ generic-y += uaccess.h
 generic-y += ucontext.h
 generic-y += unaligned.h
 generic-y += vga.h
+generic-y += word-at-a-time.h
 generic-y += xor.h
index ac1662956e0c4d4dfe8a0a7fff432e9f2a592f2c..ab9f4e0ed4cfcfd48a8d232fe20d0482739a22c5 100644 (file)
@@ -7,4 +7,3 @@ generic-y += mcs_spinlock.h
 generic-y += preempt.h
 generic-y += rwsem.h
 generic-y += vtime.h
-generic-y += word-at-a-time.h
index d4788111c16171135422a0ef29e23e2eb866236d..fac6ac9790fad18efc2f587757068f87ca7765fd 100644 (file)
@@ -10,7 +10,7 @@ targets += misc.o piggy.o sizes.h head.o
 
 KBUILD_CFLAGS := -m64 -D__KERNEL__ $(LINUX_INCLUDE) -O2
 KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
-KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks
+KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks -msoft-float
 KBUILD_CFLAGS += $(call cc-option,-mpacked-stack)
 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
 
index 0c98f1508542c9f900ee2bed1394413b8d5d8d88..ed7da281df66743f0badff631c9183bf318ec9b7 100644 (file)
@@ -381,7 +381,7 @@ CONFIG_ISCSI_TCP=m
 CONFIG_SCSI_DEBUG=m
 CONFIG_ZFCP=y
 CONFIG_SCSI_VIRTIO=m
-CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH=y
 CONFIG_SCSI_DH_RDAC=m
 CONFIG_SCSI_DH_HP_SW=m
 CONFIG_SCSI_DH_EMC=m
index 82083e1fbdc4c6cc9f4ad6a2c0cfbfbcd3af1210..9858b14cde1edccdcda3a217446f547641d98944 100644 (file)
@@ -377,7 +377,7 @@ CONFIG_ISCSI_TCP=m
 CONFIG_SCSI_DEBUG=m
 CONFIG_ZFCP=y
 CONFIG_SCSI_VIRTIO=m
-CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH=y
 CONFIG_SCSI_DH_RDAC=m
 CONFIG_SCSI_DH_HP_SW=m
 CONFIG_SCSI_DH_EMC=m
index c05c9e0821e3bcd956b929c591e41b5445ac9565..7f14f80717d4975161a696dd2e803d4ee87011d6 100644 (file)
@@ -377,7 +377,7 @@ CONFIG_ISCSI_TCP=m
 CONFIG_SCSI_DEBUG=m
 CONFIG_ZFCP=y
 CONFIG_SCSI_VIRTIO=m
-CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH=y
 CONFIG_SCSI_DH_RDAC=m
 CONFIG_SCSI_DH_HP_SW=m
 CONFIG_SCSI_DH_EMC=m
index 2a0efc63b9e5afb29cb2e6edd109dd9848353b27..dc19ee0c92aaa693d2ad3b8c4c614b3e0e427de7 100644 (file)
@@ -19,7 +19,7 @@ int numa_pfn_to_nid(unsigned long pfn);
 int __node_distance(int a, int b);
 void numa_update_cpu_topology(void);
 
-extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
+extern cpumask_t node_to_cpumask_map[MAX_NUMNODES];
 extern int numa_debug_enabled;
 
 #else
index 27ebde643933a908c1ebb2a75ff723d8d43a65f6..94fc55fc72ce88a18eb73d3f43d5a7895ac6cd9c 100644 (file)
@@ -68,7 +68,7 @@ static inline int cpu_to_node(int cpu)
 #define cpumask_of_node cpumask_of_node
 static inline const struct cpumask *cpumask_of_node(int node)
 {
-       return node_to_cpumask_map[node];
+       return &node_to_cpumask_map[node];
 }
 
 /*
index 48c9af7a76831ea63ef6ef92760df02f15c1188c..3aeeb1b562c00ff9c7afe559452fdc2c06457116 100644 (file)
@@ -176,6 +176,7 @@ int main(void)
        DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste));
        DEFINE(__LC_FP_CREG_SAVE_AREA, offsetof(struct _lowcore, fpt_creg_save_area));
        DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr));
+       DEFINE(__LC_PERCPU_OFFSET, offsetof(struct _lowcore, percpu_offset));
        DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
        DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap));
        DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb));
index 09b039d7983d802f2674504439e43e21c03d4cae..582fe44ab07cc69aaef1d4f782f6f89364914974 100644 (file)
@@ -733,6 +733,14 @@ ENTRY(psw_idle)
        stg     %r3,__SF_EMPTY(%r15)
        larl    %r1,.Lpsw_idle_lpsw+4
        stg     %r1,__SF_EMPTY+8(%r15)
+#ifdef CONFIG_SMP
+       larl    %r1,smp_cpu_mtid
+       llgf    %r1,0(%r1)
+       ltgr    %r1,%r1
+       jz      .Lpsw_idle_stcctm
+       .insn   rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15)
+.Lpsw_idle_stcctm:
+#endif
        STCK    __CLOCK_IDLE_ENTER(%r2)
        stpt    __TIMER_IDLE_ENTER(%r2)
 .Lpsw_idle_lpsw:
@@ -1159,7 +1167,27 @@ cleanup_critical:
        jhe     1f
        mvc     __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
        mvc     __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
-1:     # account system time going idle
+1:     # calculate idle cycles
+#ifdef CONFIG_SMP
+       clg     %r9,BASED(.Lcleanup_idle_insn)
+       jl      3f
+       larl    %r1,smp_cpu_mtid
+       llgf    %r1,0(%r1)
+       ltgr    %r1,%r1
+       jz      3f
+       .insn   rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15)
+       larl    %r3,mt_cycles
+       ag      %r3,__LC_PERCPU_OFFSET
+       la      %r4,__SF_EMPTY+16(%r15)
+2:     lg      %r0,0(%r3)
+       slg     %r0,0(%r4)
+       alg     %r0,64(%r4)
+       stg     %r0,0(%r3)
+       la      %r3,8(%r3)
+       la      %r4,8(%r4)
+       brct    %r1,2b
+#endif
+3:     # account system time going idle
        lg      %r9,__LC_STEAL_TIMER
        alg     %r9,__CLOCK_IDLE_ENTER(%r2)
        slg     %r9,__LC_LAST_UPDATE_CLOCK
index c8653435c70d9d203dbe05deed3c96d0aad6cdd9..dafc44f519c340329581c8a5b2fda6fdb6920252 100644 (file)
@@ -25,7 +25,7 @@ static DEFINE_SPINLOCK(virt_timer_lock);
 static atomic64_t virt_timer_current;
 static atomic64_t virt_timer_elapsed;
 
-static DEFINE_PER_CPU(u64, mt_cycles[32]);
+DEFINE_PER_CPU(u64, mt_cycles[8]);
 static DEFINE_PER_CPU(u64, mt_scaling_mult) = { 1 };
 static DEFINE_PER_CPU(u64, mt_scaling_div) = { 1 };
 static DEFINE_PER_CPU(u64, mt_scaling_jiffies);
@@ -60,6 +60,34 @@ static inline int virt_timer_forward(u64 elapsed)
        return elapsed >= atomic64_read(&virt_timer_current);
 }
 
+static void update_mt_scaling(void)
+{
+       u64 cycles_new[8], *cycles_old;
+       u64 delta, fac, mult, div;
+       int i;
+
+       stcctm5(smp_cpu_mtid + 1, cycles_new);
+       cycles_old = this_cpu_ptr(mt_cycles);
+       fac = 1;
+       mult = div = 0;
+       for (i = 0; i <= smp_cpu_mtid; i++) {
+               delta = cycles_new[i] - cycles_old[i];
+               div += delta;
+               mult *= i + 1;
+               mult += delta * fac;
+               fac *= i + 1;
+       }
+       div *= fac;
+       if (div > 0) {
+               /* Update scaling factor */
+               __this_cpu_write(mt_scaling_mult, mult);
+               __this_cpu_write(mt_scaling_div, div);
+               memcpy(cycles_old, cycles_new,
+                      sizeof(u64) * (smp_cpu_mtid + 1));
+       }
+       __this_cpu_write(mt_scaling_jiffies, jiffies_64);
+}
+
 /*
  * Update process times based on virtual cpu times stored by entry.S
  * to the lowcore fields user_timer, system_timer & steal_clock.
@@ -69,7 +97,6 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
        struct thread_info *ti = task_thread_info(tsk);
        u64 timer, clock, user, system, steal;
        u64 user_scaled, system_scaled;
-       int i;
 
        timer = S390_lowcore.last_update_timer;
        clock = S390_lowcore.last_update_clock;
@@ -85,34 +112,10 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
        S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
        S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock;
 
-       /* Do MT utilization calculation */
+       /* Update MT utilization calculation */
        if (smp_cpu_mtid &&
-           time_after64(jiffies_64, __this_cpu_read(mt_scaling_jiffies))) {
-               u64 cycles_new[32], *cycles_old;
-               u64 delta, fac, mult, div;
-
-               cycles_old = this_cpu_ptr(mt_cycles);
-               if (stcctm5(smp_cpu_mtid + 1, cycles_new) < 2) {
-                       fac = 1;
-                       mult = div = 0;
-                       for (i = 0; i <= smp_cpu_mtid; i++) {
-                               delta = cycles_new[i] - cycles_old[i];
-                               div += delta;
-                               mult *= i + 1;
-                               mult += delta * fac;
-                               fac *= i + 1;
-                       }
-                       div *= fac;
-                       if (div > 0) {
-                               /* Update scaling factor */
-                               __this_cpu_write(mt_scaling_mult, mult);
-                               __this_cpu_write(mt_scaling_div, div);
-                               memcpy(cycles_old, cycles_new,
-                                      sizeof(u64) * (smp_cpu_mtid + 1));
-                       }
-               }
-               __this_cpu_write(mt_scaling_jiffies, jiffies_64);
-       }
+           time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
+               update_mt_scaling();
 
        user = S390_lowcore.user_timer - ti->user_timer;
        S390_lowcore.steal_timer -= user;
@@ -181,6 +184,11 @@ void vtime_account_irq_enter(struct task_struct *tsk)
        S390_lowcore.last_update_timer = get_vtimer();
        S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
 
+       /* Update MT utilization calculation */
+       if (smp_cpu_mtid &&
+           time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
+               update_mt_scaling();
+
        system = S390_lowcore.system_timer - ti->system_timer;
        S390_lowcore.steal_timer -= system;
        ti->system_timer = S390_lowcore.system_timer;
index 7de4e2f780d789478d4d700821944f96b3846586..30b2698a28e29a6991a7116da1877e5bdee1963e 100644 (file)
@@ -368,7 +368,7 @@ static void topology_add_core(struct toptree *core)
                cpumask_copy(&top->thread_mask, &core->mask);
                cpumask_copy(&top->core_mask, &core_mc(core)->mask);
                cpumask_copy(&top->book_mask, &core_book(core)->mask);
-               cpumask_set_cpu(cpu, node_to_cpumask_map[core_node(core)->id]);
+               cpumask_set_cpu(cpu, &node_to_cpumask_map[core_node(core)->id]);
                top->node_id = core_node(core)->id;
        }
 }
@@ -383,7 +383,7 @@ static void toptree_to_topology(struct toptree *numa)
 
        /* Clear all node masks */
        for (i = 0; i < MAX_NUMNODES; i++)
-               cpumask_clear(node_to_cpumask_map[i]);
+               cpumask_clear(&node_to_cpumask_map[i]);
 
        /* Rebuild all masks */
        toptree_for_each(core, numa, CORE)
index 09b1d2355bd9849ab583bb52c33eb789b4f9804b..43f32ce60aa3d98af0b7665090fa3eb080d12fa7 100644 (file)
@@ -23,7 +23,7 @@
 pg_data_t *node_data[MAX_NUMNODES];
 EXPORT_SYMBOL(node_data);
 
-cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
+cpumask_t node_to_cpumask_map[MAX_NUMNODES];
 EXPORT_SYMBOL(node_to_cpumask_map);
 
 const struct numa_mode numa_mode_plain = {
@@ -144,7 +144,7 @@ void __init numa_setup(void)
 static int __init numa_init_early(void)
 {
        /* Attach all possible CPUs to node 0 for now. */
-       cpumask_copy(node_to_cpumask_map[0], cpu_possible_mask);
+       cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask);
        return 0;
 }
 early_initcall(numa_init_early);
index 0b6cacaad9333a4165bfd9447a180feadedb4db4..ba35c41c71fff33b2b2fe95f566ad8b3dc192c32 100644 (file)
@@ -40,5 +40,4 @@ generic-y += termbits.h
 generic-y += termios.h
 generic-y += trace_clock.h
 generic-y += types.h
-generic-y += word-at-a-time.h
 generic-y += xor.h
index 9e5ce0d7b292160d5f544fcda08c00ea6c04f168..b66a693c2c3453e4f4642fea133890ab268a32d8 100644 (file)
@@ -6,7 +6,7 @@
 struct word_at_a_time { /* unused */ };
 #define WORD_AT_A_TIME_CONSTANTS {}
 
-/* Generate 0x01 byte values for non-zero bytes using a SIMD instruction. */
+/* Generate 0x01 byte values for zero bytes using a SIMD instruction. */
 static inline unsigned long has_zero(unsigned long val, unsigned long *data,
                                     const struct word_at_a_time *c)
 {
@@ -33,4 +33,10 @@ static inline long find_zero(unsigned long mask)
 #endif
 }
 
+#ifdef __BIG_ENDIAN
+#define zero_bytemask(mask) (~1ul << (63 - __builtin_clzl(mask)))
+#else
+#define zero_bytemask(mask) ((2ul << __builtin_ctzl(mask)) - 1)
+#endif
+
 #endif /* _ASM_WORD_AT_A_TIME_H */
index 83aea8055119e2f26beb1c909536609d30e88943..4c20dd333412db5b367d0625e9b7cf69a7891493 100644 (file)
@@ -336,10 +336,10 @@ HYPERVISOR_update_descriptor(u64 ma, u64 desc)
        return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
 }
 
-static inline int
+static inline long
 HYPERVISOR_memory_op(unsigned int cmd, void *arg)
 {
-       return _hypercall2(int, memory_op, cmd, arg);
+       return _hypercall2(long, memory_op, cmd, arg);
 }
 
 static inline int
index 30d12afe52ed173b2a81720cd5c89c24e667de2a..993b7a71386d53f79befa7a302ede2fdcbed6bd4 100644 (file)
 #include <linux/memblock.h>
 #include <linux/edd.h>
 
+#ifdef CONFIG_KEXEC_CORE
+#include <linux/kexec.h>
+#endif
+
 #include <xen/xen.h>
 #include <xen/events.h>
 #include <xen/interface/xen.h>
@@ -1077,6 +1081,7 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
                /* Fast syscall setup is all done in hypercalls, so
                   these are all ignored.  Stub them out here to stop
                   Xen console noise. */
+               break;
 
        default:
                if (!pmu_msr_write(msr, low, high, &ret))
@@ -1807,6 +1812,21 @@ static struct notifier_block xen_hvm_cpu_notifier = {
        .notifier_call  = xen_hvm_cpu_notify,
 };
 
+#ifdef CONFIG_KEXEC_CORE
+static void xen_hvm_shutdown(void)
+{
+       native_machine_shutdown();
+       if (kexec_in_progress)
+               xen_reboot(SHUTDOWN_soft_reset);
+}
+
+static void xen_hvm_crash_shutdown(struct pt_regs *regs)
+{
+       native_machine_crash_shutdown(regs);
+       xen_reboot(SHUTDOWN_soft_reset);
+}
+#endif
+
 static void __init xen_hvm_guest_init(void)
 {
        if (xen_pv_domain())
@@ -1826,6 +1846,10 @@ static void __init xen_hvm_guest_init(void)
        x86_init.irqs.intr_init = xen_init_IRQ;
        xen_hvm_init_time_ops();
        xen_hvm_init_mmu_ops();
+#ifdef CONFIG_KEXEC_CORE
+       machine_ops.shutdown = xen_hvm_shutdown;
+       machine_ops.crash_shutdown = xen_hvm_crash_shutdown;
+#endif
 }
 #endif
 
index bfc08b13044b181c5948e5a2f22c205e900e0b47..660b3cfef23485f149e1a9b0b88f0b12666dbefb 100644 (file)
@@ -112,6 +112,15 @@ static unsigned long *p2m_identity;
 static pte_t *p2m_missing_pte;
 static pte_t *p2m_identity_pte;
 
+/*
+ * Hint at last populated PFN.
+ *
+ * Used to set HYPERVISOR_shared_info->arch.max_pfn so the toolstack
+ * can avoid scanning the whole P2M (which may be sized to account for
+ * hotplugged memory).
+ */
+static unsigned long xen_p2m_last_pfn;
+
 static inline unsigned p2m_top_index(unsigned long pfn)
 {
        BUG_ON(pfn >= MAX_P2M_PFN);
@@ -270,7 +279,7 @@ void xen_setup_mfn_list_list(void)
        else
                HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
                        virt_to_mfn(p2m_top_mfn);
-       HYPERVISOR_shared_info->arch.max_pfn = xen_max_p2m_pfn;
+       HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn;
        HYPERVISOR_shared_info->arch.p2m_generation = 0;
        HYPERVISOR_shared_info->arch.p2m_vaddr = (unsigned long)xen_p2m_addr;
        HYPERVISOR_shared_info->arch.p2m_cr3 =
@@ -406,6 +415,8 @@ void __init xen_vmalloc_p2m_tree(void)
        static struct vm_struct vm;
        unsigned long p2m_limit;
 
+       xen_p2m_last_pfn = xen_max_p2m_pfn;
+
        p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE;
        vm.flags = VM_ALLOC;
        vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit),
@@ -608,6 +619,12 @@ static bool alloc_p2m(unsigned long pfn)
                        free_p2m_page(p2m);
        }
 
+       /* Expanded the p2m? */
+       if (pfn > xen_p2m_last_pfn) {
+               xen_p2m_last_pfn = pfn;
+               HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn;
+       }
+
        return true;
 }
 
index f5ef6746d47a0ee36f6b0a11edd0c49cbcf3590a..1c30e4ab1022bda71ff80d841509605ae07034cc 100644 (file)
@@ -548,7 +548,7 @@ static unsigned long __init xen_get_max_pages(void)
 {
        unsigned long max_pages, limit;
        domid_t domid = DOMID_SELF;
-       int ret;
+       long ret;
 
        limit = xen_get_pages_limit();
        max_pages = limit;
@@ -798,7 +798,7 @@ char * __init xen_memory_setup(void)
                xen_ignore_unusable();
 
        /* Make sure the Xen-supplied memory map is well-ordered. */
-       sanitize_e820_map(xen_e820_map, xen_e820_map_entries,
+       sanitize_e820_map(xen_e820_map, ARRAY_SIZE(xen_e820_map),
                          &xen_e820_map_entries);
 
        max_pages = xen_get_max_pages();
index 14b8faf8b09d48937985713e10ed25745aad2dc2..f6325d573c10a42c673f844e4fb9585a0d9bd292 100644 (file)
 static struct kmem_cache *bip_slab;
 static struct workqueue_struct *kintegrityd_wq;
 
+void blk_flush_integrity(void)
+{
+       flush_workqueue(kintegrityd_wq);
+}
+
 /**
  * bio_integrity_alloc - Allocate integrity payload and attach it to bio
  * @bio:       bio to attach integrity metadata to
@@ -177,11 +182,11 @@ bool bio_integrity_enabled(struct bio *bio)
        if (bi == NULL)
                return false;
 
-       if (bio_data_dir(bio) == READ && bi->verify_fn != NULL &&
+       if (bio_data_dir(bio) == READ && bi->profile->verify_fn != NULL &&
            (bi->flags & BLK_INTEGRITY_VERIFY))
                return true;
 
-       if (bio_data_dir(bio) == WRITE && bi->generate_fn != NULL &&
+       if (bio_data_dir(bio) == WRITE && bi->profile->generate_fn != NULL &&
            (bi->flags & BLK_INTEGRITY_GENERATE))
                return true;
 
@@ -202,7 +207,7 @@ EXPORT_SYMBOL(bio_integrity_enabled);
 static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
                                                   unsigned int sectors)
 {
-       return sectors >> (ilog2(bi->interval) - 9);
+       return sectors >> (bi->interval_exp - 9);
 }
 
 static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
@@ -229,7 +234,7 @@ static int bio_integrity_process(struct bio *bio,
                bip->bip_vec->bv_offset;
 
        iter.disk_name = bio->bi_bdev->bd_disk->disk_name;
-       iter.interval = bi->interval;
+       iter.interval = 1 << bi->interval_exp;
        iter.seed = bip_get_seed(bip);
        iter.prot_buf = prot_buf;
 
@@ -340,7 +345,7 @@ int bio_integrity_prep(struct bio *bio)
 
        /* Auto-generate integrity metadata if this is a write */
        if (bio_data_dir(bio) == WRITE)
-               bio_integrity_process(bio, bi->generate_fn);
+               bio_integrity_process(bio, bi->profile->generate_fn);
 
        return 0;
 }
@@ -361,7 +366,7 @@ static void bio_integrity_verify_fn(struct work_struct *work)
        struct bio *bio = bip->bip_bio;
        struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
 
-       bio->bi_error = bio_integrity_process(bio, bi->verify_fn);
+       bio->bi_error = bio_integrity_process(bio, bi->profile->verify_fn);
 
        /* Restore original bio completion handler */
        bio->bi_end_io = bip->bip_end_io;
index 2eb722d48773cb8a8de49d58b934eed830755da7..16bb626ff8c849b3f15886bdc26889beea3b3892 100644 (file)
@@ -554,22 +554,23 @@ void blk_cleanup_queue(struct request_queue *q)
         * Drain all requests queued before DYING marking. Set DEAD flag to
         * prevent that q->request_fn() gets invoked after draining finished.
         */
-       if (q->mq_ops) {
-               blk_mq_freeze_queue(q);
-               spin_lock_irq(lock);
-       } else {
-               spin_lock_irq(lock);
+       blk_freeze_queue(q);
+       spin_lock_irq(lock);
+       if (!q->mq_ops)
                __blk_drain_queue(q, true);
-       }
        queue_flag_set(QUEUE_FLAG_DEAD, q);
        spin_unlock_irq(lock);
 
+       /* for synchronous bio-based driver finish in-flight integrity i/o */
+       blk_flush_integrity();
+
        /* @q won't process any more request, flush async actions */
        del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
        blk_sync_queue(q);
 
        if (q->mq_ops)
                blk_mq_free_queue(q);
+       percpu_ref_exit(&q->q_usage_counter);
 
        spin_lock_irq(lock);
        if (q->queue_lock != &q->__queue_lock)
@@ -629,6 +630,40 @@ struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
 }
 EXPORT_SYMBOL(blk_alloc_queue);
 
+int blk_queue_enter(struct request_queue *q, gfp_t gfp)
+{
+       while (true) {
+               int ret;
+
+               if (percpu_ref_tryget_live(&q->q_usage_counter))
+                       return 0;
+
+               if (!(gfp & __GFP_WAIT))
+                       return -EBUSY;
+
+               ret = wait_event_interruptible(q->mq_freeze_wq,
+                               !atomic_read(&q->mq_freeze_depth) ||
+                               blk_queue_dying(q));
+               if (blk_queue_dying(q))
+                       return -ENODEV;
+               if (ret)
+                       return ret;
+       }
+}
+
+void blk_queue_exit(struct request_queue *q)
+{
+       percpu_ref_put(&q->q_usage_counter);
+}
+
+static void blk_queue_usage_counter_release(struct percpu_ref *ref)
+{
+       struct request_queue *q =
+               container_of(ref, struct request_queue, q_usage_counter);
+
+       wake_up_all(&q->mq_freeze_wq);
+}
+
 struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
 {
        struct request_queue *q;
@@ -690,11 +725,22 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
 
        init_waitqueue_head(&q->mq_freeze_wq);
 
-       if (blkcg_init_queue(q))
+       /*
+        * Init percpu_ref in atomic mode so that it's faster to shutdown.
+        * See blk_register_queue() for details.
+        */
+       if (percpu_ref_init(&q->q_usage_counter,
+                               blk_queue_usage_counter_release,
+                               PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
                goto fail_bdi;
 
+       if (blkcg_init_queue(q))
+               goto fail_ref;
+
        return q;
 
+fail_ref:
+       percpu_ref_exit(&q->q_usage_counter);
 fail_bdi:
        bdi_destroy(&q->backing_dev_info);
 fail_split:
@@ -1594,6 +1640,30 @@ out:
        return ret;
 }
 
+unsigned int blk_plug_queued_count(struct request_queue *q)
+{
+       struct blk_plug *plug;
+       struct request *rq;
+       struct list_head *plug_list;
+       unsigned int ret = 0;
+
+       plug = current->plug;
+       if (!plug)
+               goto out;
+
+       if (q->mq_ops)
+               plug_list = &plug->mq_list;
+       else
+               plug_list = &plug->list;
+
+       list_for_each_entry(rq, plug_list, queuelist) {
+               if (rq->q == q)
+                       ret++;
+       }
+out:
+       return ret;
+}
+
 void init_request_from_bio(struct request *req, struct bio *bio)
 {
        req->cmd_type = REQ_TYPE_FS;
@@ -1641,9 +1711,11 @@ static void blk_queue_bio(struct request_queue *q, struct bio *bio)
         * Check if we can merge with the plugged list before grabbing
         * any locks.
         */
-       if (!blk_queue_nomerges(q) &&
-           blk_attempt_plug_merge(q, bio, &request_count, NULL))
-               return;
+       if (!blk_queue_nomerges(q)) {
+               if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
+                       return;
+       } else
+               request_count = blk_plug_queued_count(q);
 
        spin_lock_irq(q->queue_lock);
 
@@ -1966,9 +2038,19 @@ void generic_make_request(struct bio *bio)
        do {
                struct request_queue *q = bdev_get_queue(bio->bi_bdev);
 
-               q->make_request_fn(q, bio);
+               if (likely(blk_queue_enter(q, __GFP_WAIT) == 0)) {
+
+                       q->make_request_fn(q, bio);
+
+                       blk_queue_exit(q);
 
-               bio = bio_list_pop(current->bio_list);
+                       bio = bio_list_pop(current->bio_list);
+               } else {
+                       struct bio *bio_next = bio_list_pop(current->bio_list);
+
+                       bio_io_error(bio);
+                       bio = bio_next;
+               }
        } while (bio);
        current->bio_list = NULL; /* deactivate */
 }
index 75f29cf701889a5711cad9e63f041bd98c74022f..d69c5c79f98e71059827265531aba75a63458f74 100644 (file)
 
 #include "blk.h"
 
-static struct kmem_cache *integrity_cachep;
-
-static const char *bi_unsupported_name = "unsupported";
-
 /**
  * blk_rq_count_integrity_sg - Count number of integrity scatterlist elements
  * @q:         request queue
@@ -146,40 +142,40 @@ EXPORT_SYMBOL(blk_rq_map_integrity_sg);
  */
 int blk_integrity_compare(struct gendisk *gd1, struct gendisk *gd2)
 {
-       struct blk_integrity *b1 = gd1->integrity;
-       struct blk_integrity *b2 = gd2->integrity;
+       struct blk_integrity *b1 = &gd1->queue->integrity;
+       struct blk_integrity *b2 = &gd2->queue->integrity;
 
-       if (!b1 && !b2)
+       if (!b1->profile && !b2->profile)
                return 0;
 
-       if (!b1 || !b2)
+       if (!b1->profile || !b2->profile)
                return -1;
 
-       if (b1->interval != b2->interval) {
+       if (b1->interval_exp != b2->interval_exp) {
                pr_err("%s: %s/%s protection interval %u != %u\n",
                       __func__, gd1->disk_name, gd2->disk_name,
-                      b1->interval, b2->interval);
+                      1 << b1->interval_exp, 1 << b2->interval_exp);
                return -1;
        }
 
        if (b1->tuple_size != b2->tuple_size) {
-               printk(KERN_ERR "%s: %s/%s tuple sz %u != %u\n", __func__,
+               pr_err("%s: %s/%s tuple sz %u != %u\n", __func__,
                       gd1->disk_name, gd2->disk_name,
                       b1->tuple_size, b2->tuple_size);
                return -1;
        }
 
        if (b1->tag_size && b2->tag_size && (b1->tag_size != b2->tag_size)) {
-               printk(KERN_ERR "%s: %s/%s tag sz %u != %u\n", __func__,
+               pr_err("%s: %s/%s tag sz %u != %u\n", __func__,
                       gd1->disk_name, gd2->disk_name,
                       b1->tag_size, b2->tag_size);
                return -1;
        }
 
-       if (strcmp(b1->name, b2->name)) {
-               printk(KERN_ERR "%s: %s/%s type %s != %s\n", __func__,
+       if (b1->profile != b2->profile) {
+               pr_err("%s: %s/%s type %s != %s\n", __func__,
                       gd1->disk_name, gd2->disk_name,
-                      b1->name, b2->name);
+                      b1->profile->name, b2->profile->name);
                return -1;
        }
 
@@ -249,8 +245,8 @@ struct integrity_sysfs_entry {
 static ssize_t integrity_attr_show(struct kobject *kobj, struct attribute *attr,
                                   char *page)
 {
-       struct blk_integrity *bi =
-               container_of(kobj, struct blk_integrity, kobj);
+       struct gendisk *disk = container_of(kobj, struct gendisk, integrity_kobj);
+       struct blk_integrity *bi = &disk->queue->integrity;
        struct integrity_sysfs_entry *entry =
                container_of(attr, struct integrity_sysfs_entry, attr);
 
@@ -261,8 +257,8 @@ static ssize_t integrity_attr_store(struct kobject *kobj,
                                    struct attribute *attr, const char *page,
                                    size_t count)
 {
-       struct blk_integrity *bi =
-               container_of(kobj, struct blk_integrity, kobj);
+       struct gendisk *disk = container_of(kobj, struct gendisk, integrity_kobj);
+       struct blk_integrity *bi = &disk->queue->integrity;
        struct integrity_sysfs_entry *entry =
                container_of(attr, struct integrity_sysfs_entry, attr);
        ssize_t ret = 0;
@@ -275,18 +271,21 @@ static ssize_t integrity_attr_store(struct kobject *kobj,
 
 static ssize_t integrity_format_show(struct blk_integrity *bi, char *page)
 {
-       if (bi != NULL && bi->name != NULL)
-               return sprintf(page, "%s\n", bi->name);
+       if (bi->profile && bi->profile->name)
+               return sprintf(page, "%s\n", bi->profile->name);
        else
                return sprintf(page, "none\n");
 }
 
 static ssize_t integrity_tag_size_show(struct blk_integrity *bi, char *page)
 {
-       if (bi != NULL)
-               return sprintf(page, "%u\n", bi->tag_size);
-       else
-               return sprintf(page, "0\n");
+       return sprintf(page, "%u\n", bi->tag_size);
+}
+
+static ssize_t integrity_interval_show(struct blk_integrity *bi, char *page)
+{
+       return sprintf(page, "%u\n",
+                      bi->interval_exp ? 1 << bi->interval_exp : 0);
 }
 
 static ssize_t integrity_verify_store(struct blk_integrity *bi,
@@ -343,6 +342,11 @@ static struct integrity_sysfs_entry integrity_tag_size_entry = {
        .show = integrity_tag_size_show,
 };
 
+static struct integrity_sysfs_entry integrity_interval_entry = {
+       .attr = { .name = "protection_interval_bytes", .mode = S_IRUGO },
+       .show = integrity_interval_show,
+};
+
 static struct integrity_sysfs_entry integrity_verify_entry = {
        .attr = { .name = "read_verify", .mode = S_IRUGO | S_IWUSR },
        .show = integrity_verify_show,
@@ -363,6 +367,7 @@ static struct integrity_sysfs_entry integrity_device_entry = {
 static struct attribute *integrity_attrs[] = {
        &integrity_format_entry.attr,
        &integrity_tag_size_entry.attr,
+       &integrity_interval_entry.attr,
        &integrity_verify_entry.attr,
        &integrity_generate_entry.attr,
        &integrity_device_entry.attr,
@@ -374,114 +379,89 @@ static const struct sysfs_ops integrity_ops = {
        .store  = &integrity_attr_store,
 };
 
-static int __init blk_dev_integrity_init(void)
-{
-       integrity_cachep = kmem_cache_create("blkdev_integrity",
-                                            sizeof(struct blk_integrity),
-                                            0, SLAB_PANIC, NULL);
-       return 0;
-}
-subsys_initcall(blk_dev_integrity_init);
-
-static void blk_integrity_release(struct kobject *kobj)
-{
-       struct blk_integrity *bi =
-               container_of(kobj, struct blk_integrity, kobj);
-
-       kmem_cache_free(integrity_cachep, bi);
-}
-
 static struct kobj_type integrity_ktype = {
        .default_attrs  = integrity_attrs,
        .sysfs_ops      = &integrity_ops,
-       .release        = blk_integrity_release,
 };
 
-bool blk_integrity_is_initialized(struct gendisk *disk)
+static int blk_integrity_nop_fn(struct blk_integrity_iter *iter)
 {
-       struct blk_integrity *bi = blk_get_integrity(disk);
-
-       return (bi && bi->name && strcmp(bi->name, bi_unsupported_name) != 0);
+       return 0;
 }
-EXPORT_SYMBOL(blk_integrity_is_initialized);
+
+static struct blk_integrity_profile nop_profile = {
+       .name = "nop",
+       .generate_fn = blk_integrity_nop_fn,
+       .verify_fn = blk_integrity_nop_fn,
+};
 
 /**
  * blk_integrity_register - Register a gendisk as being integrity-capable
  * @disk:      struct gendisk pointer to make integrity-aware
- * @template:  optional integrity profile to register
+ * @template:  block integrity profile to register
  *
- * Description: When a device needs to advertise itself as being able
- * to send/receive integrity metadata it must use this function to
- * register the capability with the block layer.  The template is a
- * blk_integrity struct with values appropriate for the underlying
- * hardware.  If template is NULL the new profile is allocated but
- * not filled out. See Documentation/block/data-integrity.txt.
+ * Description: When a device needs to advertise itself as being able to
+ * send/receive integrity metadata it must use this function to register
+ * the capability with the block layer. The template is a blk_integrity
+ * struct with values appropriate for the underlying hardware. See
+ * Documentation/block/data-integrity.txt.
  */
-int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template)
+void blk_integrity_register(struct gendisk *disk, struct blk_integrity *template)
 {
-       struct blk_integrity *bi;
+       struct blk_integrity *bi = &disk->queue->integrity;
 
-       BUG_ON(disk == NULL);
+       bi->flags = BLK_INTEGRITY_VERIFY | BLK_INTEGRITY_GENERATE |
+               template->flags;
+       bi->interval_exp = ilog2(queue_logical_block_size(disk->queue));
+       bi->profile = template->profile ? template->profile : &nop_profile;
+       bi->tuple_size = template->tuple_size;
+       bi->tag_size = template->tag_size;
 
-       if (disk->integrity == NULL) {
-               bi = kmem_cache_alloc(integrity_cachep,
-                                     GFP_KERNEL | __GFP_ZERO);
-               if (!bi)
-                       return -1;
-
-               if (kobject_init_and_add(&bi->kobj, &integrity_ktype,
-                                        &disk_to_dev(disk)->kobj,
-                                        "%s", "integrity")) {
-                       kmem_cache_free(integrity_cachep, bi);
-                       return -1;
-               }
-
-               kobject_uevent(&bi->kobj, KOBJ_ADD);
-
-               bi->flags |= BLK_INTEGRITY_VERIFY | BLK_INTEGRITY_GENERATE;
-               bi->interval = queue_logical_block_size(disk->queue);
-               disk->integrity = bi;
-       } else
-               bi = disk->integrity;
-
-       /* Use the provided profile as template */
-       if (template != NULL) {
-               bi->name = template->name;
-               bi->generate_fn = template->generate_fn;
-               bi->verify_fn = template->verify_fn;
-               bi->tuple_size = template->tuple_size;
-               bi->tag_size = template->tag_size;
-               bi->flags |= template->flags;
-       } else
-               bi->name = bi_unsupported_name;
-
-       disk->queue->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;
-
-       return 0;
+       blk_integrity_revalidate(disk);
 }
 EXPORT_SYMBOL(blk_integrity_register);
 
 /**
- * blk_integrity_unregister - Remove block integrity profile
- * @disk:      disk whose integrity profile to deallocate
+ * blk_integrity_unregister - Unregister block integrity profile
+ * @disk:      disk whose integrity profile to unregister
  *
- * Description: This function frees all memory used by the block
- * integrity profile.  To be called at device teardown.
+ * Description: This function unregisters the integrity capability from
+ * a block device.
  */
 void blk_integrity_unregister(struct gendisk *disk)
 {
-       struct blk_integrity *bi;
+       blk_integrity_revalidate(disk);
+       memset(&disk->queue->integrity, 0, sizeof(struct blk_integrity));
+}
+EXPORT_SYMBOL(blk_integrity_unregister);
+
+void blk_integrity_revalidate(struct gendisk *disk)
+{
+       struct blk_integrity *bi = &disk->queue->integrity;
 
-       if (!disk || !disk->integrity)
+       if (!(disk->flags & GENHD_FL_UP))
                return;
 
-       disk->queue->backing_dev_info.capabilities &= ~BDI_CAP_STABLE_WRITES;
+       if (bi->profile)
+               disk->queue->backing_dev_info.capabilities |=
+                       BDI_CAP_STABLE_WRITES;
+       else
+               disk->queue->backing_dev_info.capabilities &=
+                       ~BDI_CAP_STABLE_WRITES;
+}
+
+void blk_integrity_add(struct gendisk *disk)
+{
+       if (kobject_init_and_add(&disk->integrity_kobj, &integrity_ktype,
+                                &disk_to_dev(disk)->kobj, "%s", "integrity"))
+               return;
 
-       bi = disk->integrity;
+       kobject_uevent(&disk->integrity_kobj, KOBJ_ADD);
+}
 
-       kobject_uevent(&bi->kobj, KOBJ_REMOVE);
-       kobject_del(&bi->kobj);
-       kobject_put(&bi->kobj);
-       disk->integrity = NULL;
+void blk_integrity_del(struct gendisk *disk)
+{
+       kobject_uevent(&disk->integrity_kobj, KOBJ_REMOVE);
+       kobject_del(&disk->integrity_kobj);
+       kobject_put(&disk->integrity_kobj);
 }
-EXPORT_SYMBOL(blk_integrity_unregister);
index c4e9c37f3e38122e5125502d62ab1ddd2e887408..de5716d8e525969e7849767a775aabec9e4d8b96 100644 (file)
 
 static struct bio *blk_bio_discard_split(struct request_queue *q,
                                         struct bio *bio,
-                                        struct bio_set *bs)
+                                        struct bio_set *bs,
+                                        unsigned *nsegs)
 {
        unsigned int max_discard_sectors, granularity;
        int alignment;
        sector_t tmp;
        unsigned split_sectors;
 
+       *nsegs = 1;
+
        /* Zero-sector (unknown) and one-sector granularities are the same.  */
        granularity = max(q->limits.discard_granularity >> 9, 1U);
 
@@ -51,8 +54,11 @@ static struct bio *blk_bio_discard_split(struct request_queue *q,
 
 static struct bio *blk_bio_write_same_split(struct request_queue *q,
                                            struct bio *bio,
-                                           struct bio_set *bs)
+                                           struct bio_set *bs,
+                                           unsigned *nsegs)
 {
+       *nsegs = 1;
+
        if (!q->limits.max_write_same_sectors)
                return NULL;
 
@@ -64,7 +70,8 @@ static struct bio *blk_bio_write_same_split(struct request_queue *q,
 
 static struct bio *blk_bio_segment_split(struct request_queue *q,
                                         struct bio *bio,
-                                        struct bio_set *bs)
+                                        struct bio_set *bs,
+                                        unsigned *segs)
 {
        struct bio_vec bv, bvprv, *bvprvp = NULL;
        struct bvec_iter iter;
@@ -106,24 +113,35 @@ new_segment:
                sectors += bv.bv_len >> 9;
        }
 
+       *segs = nsegs;
        return NULL;
 split:
+       *segs = nsegs;
        return bio_split(bio, sectors, GFP_NOIO, bs);
 }
 
 void blk_queue_split(struct request_queue *q, struct bio **bio,
                     struct bio_set *bs)
 {
-       struct bio *split;
+       struct bio *split, *res;
+       unsigned nsegs;
 
        if ((*bio)->bi_rw & REQ_DISCARD)
-               split = blk_bio_discard_split(q, *bio, bs);
+               split = blk_bio_discard_split(q, *bio, bs, &nsegs);
        else if ((*bio)->bi_rw & REQ_WRITE_SAME)
-               split = blk_bio_write_same_split(q, *bio, bs);
+               split = blk_bio_write_same_split(q, *bio, bs, &nsegs);
        else
-               split = blk_bio_segment_split(q, *bio, q->bio_split);
+               split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs);
+
+       /* physical segments can be figured out during splitting */
+       res = split ? split : *bio;
+       res->bi_phys_segments = nsegs;
+       bio_set_flag(res, BIO_SEG_VALID);
 
        if (split) {
+               /* there isn't chance to merge the splitted bio */
+               split->bi_rw |= REQ_NOMERGE;
+
                bio_chain(split, *bio);
                generic_make_request(*bio);
                *bio = split;
index 788fffd9b4098e35a953ed8cc182a9633f9cc421..6f57a110289c54c8e293b00aad0b42acb55ed6fc 100644 (file)
@@ -413,12 +413,6 @@ static void blk_mq_sysfs_init(struct request_queue *q)
                kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
 }
 
-/* see blk_register_queue() */
-void blk_mq_finish_init(struct request_queue *q)
-{
-       percpu_ref_switch_to_percpu(&q->mq_usage_counter);
-}
-
 int blk_mq_register_disk(struct gendisk *disk)
 {
        struct device *dev = disk_to_dev(disk);
index ed96474d75cb62fb261526736727c67ea2238d46..7a6b6e27fc26faca87db30c43b7caa6bd179b230 100644 (file)
@@ -75,6 +75,10 @@ void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
        struct blk_mq_bitmap_tags *bt;
        int i, wake_index;
 
+       /*
+        * Make sure all changes prior to this are visible from other CPUs.
+        */
+       smp_mb();
        bt = &tags->bitmap_tags;
        wake_index = atomic_read(&bt->wake_index);
        for (i = 0; i < BT_WAIT_QUEUES; i++) {
index d921cd5177f542f24fdcc052ce1e53e0c310c758..9e6922ded60a9327f19109cd724124009e2f595c 100644 (file)
@@ -78,47 +78,13 @@ static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
        clear_bit(CTX_TO_BIT(hctx, ctx), &bm->word);
 }
 
-static int blk_mq_queue_enter(struct request_queue *q, gfp_t gfp)
-{
-       while (true) {
-               int ret;
-
-               if (percpu_ref_tryget_live(&q->mq_usage_counter))
-                       return 0;
-
-               if (!(gfp & __GFP_WAIT))
-                       return -EBUSY;
-
-               ret = wait_event_interruptible(q->mq_freeze_wq,
-                               !atomic_read(&q->mq_freeze_depth) ||
-                               blk_queue_dying(q));
-               if (blk_queue_dying(q))
-                       return -ENODEV;
-               if (ret)
-                       return ret;
-       }
-}
-
-static void blk_mq_queue_exit(struct request_queue *q)
-{
-       percpu_ref_put(&q->mq_usage_counter);
-}
-
-static void blk_mq_usage_counter_release(struct percpu_ref *ref)
-{
-       struct request_queue *q =
-               container_of(ref, struct request_queue, mq_usage_counter);
-
-       wake_up_all(&q->mq_freeze_wq);
-}
-
 void blk_mq_freeze_queue_start(struct request_queue *q)
 {
        int freeze_depth;
 
        freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
        if (freeze_depth == 1) {
-               percpu_ref_kill(&q->mq_usage_counter);
+               percpu_ref_kill(&q->q_usage_counter);
                blk_mq_run_hw_queues(q, false);
        }
 }
@@ -126,18 +92,34 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
 
 static void blk_mq_freeze_queue_wait(struct request_queue *q)
 {
-       wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
+       wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
 }
 
 /*
  * Guarantee no request is in use, so we can change any data structure of
  * the queue afterward.
  */
-void blk_mq_freeze_queue(struct request_queue *q)
+void blk_freeze_queue(struct request_queue *q)
 {
+       /*
+        * In the !blk_mq case we are only calling this to kill the
+        * q_usage_counter, otherwise this increases the freeze depth
+        * and waits for it to return to zero.  For this reason there is
+        * no blk_unfreeze_queue(), and blk_freeze_queue() is not
+        * exported to drivers as the only user for unfreeze is blk_mq.
+        */
        blk_mq_freeze_queue_start(q);
        blk_mq_freeze_queue_wait(q);
 }
+
+void blk_mq_freeze_queue(struct request_queue *q)
+{
+       /*
+        * ...just an alias to keep freeze and unfreeze actions balanced
+        * in the blk_mq_* namespace
+        */
+       blk_freeze_queue(q);
+}
 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
 
 void blk_mq_unfreeze_queue(struct request_queue *q)
@@ -147,7 +129,7 @@ void blk_mq_unfreeze_queue(struct request_queue *q)
        freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
        WARN_ON_ONCE(freeze_depth < 0);
        if (!freeze_depth) {
-               percpu_ref_reinit(&q->mq_usage_counter);
+               percpu_ref_reinit(&q->q_usage_counter);
                wake_up_all(&q->mq_freeze_wq);
        }
 }
@@ -256,7 +238,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
        struct blk_mq_alloc_data alloc_data;
        int ret;
 
-       ret = blk_mq_queue_enter(q, gfp);
+       ret = blk_queue_enter(q, gfp);
        if (ret)
                return ERR_PTR(ret);
 
@@ -279,7 +261,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
        }
        blk_mq_put_ctx(ctx);
        if (!rq) {
-               blk_mq_queue_exit(q);
+               blk_queue_exit(q);
                return ERR_PTR(-EWOULDBLOCK);
        }
        return rq;
@@ -298,7 +280,7 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
 
        clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
        blk_mq_put_tag(hctx, tag, &ctx->last_tag);
-       blk_mq_queue_exit(q);
+       blk_queue_exit(q);
 }
 
 void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
@@ -990,18 +972,25 @@ void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
 }
 EXPORT_SYMBOL(blk_mq_delay_queue);
 
-static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
-                                   struct request *rq, bool at_head)
+static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
+                                           struct blk_mq_ctx *ctx,
+                                           struct request *rq,
+                                           bool at_head)
 {
-       struct blk_mq_ctx *ctx = rq->mq_ctx;
-
        trace_block_rq_insert(hctx->queue, rq);
 
        if (at_head)
                list_add(&rq->queuelist, &ctx->rq_list);
        else
                list_add_tail(&rq->queuelist, &ctx->rq_list);
+}
 
+static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
+                                   struct request *rq, bool at_head)
+{
+       struct blk_mq_ctx *ctx = rq->mq_ctx;
+
+       __blk_mq_insert_req_list(hctx, ctx, rq, at_head);
        blk_mq_hctx_mark_pending(hctx, ctx);
 }
 
@@ -1057,8 +1046,9 @@ static void blk_mq_insert_requests(struct request_queue *q,
                rq = list_first_entry(list, struct request, queuelist);
                list_del_init(&rq->queuelist);
                rq->mq_ctx = ctx;
-               __blk_mq_insert_request(hctx, rq, false);
+               __blk_mq_insert_req_list(hctx, ctx, rq, false);
        }
+       blk_mq_hctx_mark_pending(hctx, ctx);
        spin_unlock(&ctx->lock);
 
        blk_mq_run_hw_queue(hctx, from_schedule);
@@ -1140,7 +1130,7 @@ static inline bool blk_mq_merge_queue_io(struct blk_mq_hw_ctx *hctx,
                                         struct blk_mq_ctx *ctx,
                                         struct request *rq, struct bio *bio)
 {
-       if (!hctx_allow_merges(hctx)) {
+       if (!hctx_allow_merges(hctx) || !bio_mergeable(bio)) {
                blk_mq_bio_to_request(rq, bio);
                spin_lock(&ctx->lock);
 insert_rq:
@@ -1177,11 +1167,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
        int rw = bio_data_dir(bio);
        struct blk_mq_alloc_data alloc_data;
 
-       if (unlikely(blk_mq_queue_enter(q, GFP_KERNEL))) {
-               bio_io_error(bio);
-               return NULL;
-       }
-
+       blk_queue_enter_live(q);
        ctx = blk_mq_get_ctx(q);
        hctx = q->mq_ops->map_queue(q, ctx->cpu);
 
@@ -1268,9 +1254,12 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
 
        blk_queue_split(q, &bio, q->bio_split);
 
-       if (!is_flush_fua && !blk_queue_nomerges(q) &&
-           blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
-               return;
+       if (!is_flush_fua && !blk_queue_nomerges(q)) {
+               if (blk_attempt_plug_merge(q, bio, &request_count,
+                                          &same_queue_rq))
+                       return;
+       } else
+               request_count = blk_plug_queued_count(q);
 
        rq = blk_mq_map_request(q, bio, &data);
        if (unlikely(!rq))
@@ -1377,7 +1366,7 @@ static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
        plug = current->plug;
        if (plug) {
                blk_mq_bio_to_request(rq, bio);
-               if (list_empty(&plug->mq_list))
+               if (!request_count)
                        trace_block_plug(q);
                else if (request_count >= BLK_MAX_REQUEST_COUNT) {
                        blk_flush_plug_list(plug, false);
@@ -2000,14 +1989,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
                hctxs[i]->queue_num = i;
        }
 
-       /*
-        * Init percpu_ref in atomic mode so that it's faster to shutdown.
-        * See blk_register_queue() for details.
-        */
-       if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
-                           PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
-               goto err_hctxs;
-
        setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
        blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
 
@@ -2088,8 +2069,6 @@ void blk_mq_free_queue(struct request_queue *q)
 
        blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
        blk_mq_free_hw_queues(q, set);
-
-       percpu_ref_exit(&q->mq_usage_counter);
 }
 
 /* Basically redo blk_mq_init_queue with queue frozen */
index f4fea79649105b4e134860b53294ef2dac90a95f..b44dce165761268c1f0a6bd64db78451f68d912f 100644 (file)
@@ -29,8 +29,6 @@ void __blk_mq_complete_request(struct request *rq);
 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
 void blk_mq_freeze_queue(struct request_queue *q);
 void blk_mq_free_queue(struct request_queue *q);
-void blk_mq_clone_flush_request(struct request *flush_rq,
-               struct request *orig_rq);
 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
 void blk_mq_wake_waiters(struct request_queue *q);
 
index 3e44a9da2a13579cacaee3d5e03bb3868f34d02a..61fc2633bbeabf25cb3292290f79c9d16fb089f3 100644 (file)
@@ -599,9 +599,8 @@ int blk_register_queue(struct gendisk *disk)
         */
        if (!blk_queue_init_done(q)) {
                queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
+               percpu_ref_switch_to_percpu(&q->q_usage_counter);
                blk_queue_bypass_end(q);
-               if (q->mq_ops)
-                       blk_mq_finish_init(q);
        }
 
        ret = blk_trace_init_sysfs(dev);
index 98614ad37c81f22e175d9455967f9f6a98b78676..da722eb786df6afd6ff0e567024fe2f7b02488e7 100644 (file)
@@ -72,6 +72,28 @@ void blk_dequeue_request(struct request *rq);
 void __blk_queue_free_tags(struct request_queue *q);
 bool __blk_end_bidi_request(struct request *rq, int error,
                            unsigned int nr_bytes, unsigned int bidi_bytes);
+int blk_queue_enter(struct request_queue *q, gfp_t gfp);
+void blk_queue_exit(struct request_queue *q);
+void blk_freeze_queue(struct request_queue *q);
+
+static inline void blk_queue_enter_live(struct request_queue *q)
+{
+       /*
+        * Given that running in generic_make_request() context
+        * guarantees that a live reference against q_usage_counter has
+        * been established, further references under that same context
+        * need not check that the queue has been frozen (marked dead).
+        */
+       percpu_ref_get(&q->q_usage_counter);
+}
+
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+void blk_flush_integrity(void);
+#else
+static inline void blk_flush_integrity(void)
+{
+}
+#endif
 
 void blk_rq_timed_out_timer(unsigned long data);
 unsigned long blk_rq_timeout(unsigned long timeout);
@@ -86,6 +108,7 @@ bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
                            unsigned int *request_count,
                            struct request **same_queue_rq);
+unsigned int blk_plug_queued_count(struct request_queue *q);
 
 void blk_account_io_start(struct request *req, bool new_io);
 void blk_account_io_completion(struct request *req, unsigned int bytes);
index 84d63943f2de2f386ff35e6a395f68ada173b5b2..c3555c9c672f94c1f13c3cd3c75c037e7c8110a7 100644 (file)
@@ -420,7 +420,7 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
         *      noxmerges: Only simple one-hit cache try
         *      merges:    All merge tries attempted
         */
-       if (blk_queue_nomerges(q))
+       if (blk_queue_nomerges(q) || !bio_mergeable(bio))
                return ELEVATOR_NO_MERGE;
 
        /*
index 0c706f33a599a723fc56b0ef87bb58a71de2cd66..e5cafa51567c9d589147523c8ab7b43504f9d725 100644 (file)
@@ -630,6 +630,7 @@ void add_disk(struct gendisk *disk)
        WARN_ON(retval);
 
        disk_add_events(disk);
+       blk_integrity_add(disk);
 }
 EXPORT_SYMBOL(add_disk);
 
@@ -638,6 +639,7 @@ void del_gendisk(struct gendisk *disk)
        struct disk_part_iter piter;
        struct hd_struct *part;
 
+       blk_integrity_del(disk);
        disk_del_events(disk);
 
        /* invalidate stuff */
index 8061eba42887a9c1163a8d2271517b8b061ce554..0918aed2d847e2ccd1d696ba8260be9592959063 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/backing-dev.h>
 #include <linux/fs.h>
 #include <linux/blktrace_api.h>
+#include <linux/pr.h>
 #include <asm/uaccess.h>
 
 static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user *arg)
@@ -193,10 +194,20 @@ int blkdev_reread_part(struct block_device *bdev)
 }
 EXPORT_SYMBOL(blkdev_reread_part);
 
-static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
-                            uint64_t len, int secure)
+static int blk_ioctl_discard(struct block_device *bdev, fmode_t mode,
+               unsigned long arg, unsigned long flags)
 {
-       unsigned long flags = 0;
+       uint64_t range[2];
+       uint64_t start, len;
+
+       if (!(mode & FMODE_WRITE))
+               return -EBADF;
+
+       if (copy_from_user(range, (void __user *)arg, sizeof(range)))
+               return -EFAULT;
+
+       start = range[0];
+       len = range[1];
 
        if (start & 511)
                return -EINVAL;
@@ -207,14 +218,24 @@ static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
 
        if (start + len > (i_size_read(bdev->bd_inode) >> 9))
                return -EINVAL;
-       if (secure)
-               flags |= BLKDEV_DISCARD_SECURE;
        return blkdev_issue_discard(bdev, start, len, GFP_KERNEL, flags);
 }
 
-static int blk_ioctl_zeroout(struct block_device *bdev, uint64_t start,
-                            uint64_t len)
+static int blk_ioctl_zeroout(struct block_device *bdev, fmode_t mode,
+               unsigned long arg)
 {
+       uint64_t range[2];
+       uint64_t start, len;
+
+       if (!(mode & FMODE_WRITE))
+               return -EBADF;
+
+       if (copy_from_user(range, (void __user *)arg, sizeof(range)))
+               return -EFAULT;
+
+       start = range[0];
+       len = range[1];
+
        if (start & 511)
                return -EINVAL;
        if (len & 511)
@@ -275,6 +296,96 @@ int __blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode,
  */
 EXPORT_SYMBOL_GPL(__blkdev_driver_ioctl);
 
+static int blkdev_pr_register(struct block_device *bdev,
+               struct pr_registration __user *arg)
+{
+       const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
+       struct pr_registration reg;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+       if (!ops || !ops->pr_register)
+               return -EOPNOTSUPP;
+       if (copy_from_user(&reg, arg, sizeof(reg)))
+               return -EFAULT;
+
+       if (reg.flags & ~PR_FL_IGNORE_KEY)
+               return -EOPNOTSUPP;
+       return ops->pr_register(bdev, reg.old_key, reg.new_key, reg.flags);
+}
+
+static int blkdev_pr_reserve(struct block_device *bdev,
+               struct pr_reservation __user *arg)
+{
+       const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
+       struct pr_reservation rsv;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+       if (!ops || !ops->pr_reserve)
+               return -EOPNOTSUPP;
+       if (copy_from_user(&rsv, arg, sizeof(rsv)))
+               return -EFAULT;
+
+       if (rsv.flags & ~PR_FL_IGNORE_KEY)
+               return -EOPNOTSUPP;
+       return ops->pr_reserve(bdev, rsv.key, rsv.type, rsv.flags);
+}
+
+static int blkdev_pr_release(struct block_device *bdev,
+               struct pr_reservation __user *arg)
+{
+       const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
+       struct pr_reservation rsv;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+       if (!ops || !ops->pr_release)
+               return -EOPNOTSUPP;
+       if (copy_from_user(&rsv, arg, sizeof(rsv)))
+               return -EFAULT;
+
+       if (rsv.flags)
+               return -EOPNOTSUPP;
+       return ops->pr_release(bdev, rsv.key, rsv.type);
+}
+
+static int blkdev_pr_preempt(struct block_device *bdev,
+               struct pr_preempt __user *arg, bool abort)
+{
+       const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
+       struct pr_preempt p;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+       if (!ops || !ops->pr_preempt)
+               return -EOPNOTSUPP;
+       if (copy_from_user(&p, arg, sizeof(p)))
+               return -EFAULT;
+
+       if (p.flags)
+               return -EOPNOTSUPP;
+       return ops->pr_preempt(bdev, p.old_key, p.new_key, p.type, abort);
+}
+
+static int blkdev_pr_clear(struct block_device *bdev,
+               struct pr_clear __user *arg)
+{
+       const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
+       struct pr_clear c;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+       if (!ops || !ops->pr_clear)
+               return -EOPNOTSUPP;
+       if (copy_from_user(&c, arg, sizeof(c)))
+               return -EFAULT;
+
+       if (c.flags)
+               return -EOPNOTSUPP;
+       return ops->pr_clear(bdev, c.key);
+}
+
 /*
  * Is it an unrecognized ioctl? The correct returns are either
  * ENOTTY (final) or ENOIOCTLCMD ("I don't know this one, try a
@@ -295,89 +406,115 @@ static inline int is_unrecognized_ioctl(int ret)
                ret == -ENOIOCTLCMD;
 }
 
-/*
- * always keep this in sync with compat_blkdev_ioctl()
- */
-int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
-                       unsigned long arg)
+static int blkdev_flushbuf(struct block_device *bdev, fmode_t mode,
+               unsigned cmd, unsigned long arg)
 {
-       struct gendisk *disk = bdev->bd_disk;
-       struct backing_dev_info *bdi;
-       loff_t size;
-       int ret, n;
-       unsigned int max_sectors;
+       int ret;
 
-       switch(cmd) {
-       case BLKFLSBUF:
-               if (!capable(CAP_SYS_ADMIN))
-                       return -EACCES;
-
-               ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
-               if (!is_unrecognized_ioctl(ret))
-                       return ret;
+       if (!capable(CAP_SYS_ADMIN))
+               return -EACCES;
 
-               fsync_bdev(bdev);
-               invalidate_bdev(bdev);
-               return 0;
+       ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
+       if (!is_unrecognized_ioctl(ret))
+               return ret;
 
-       case BLKROSET:
-               ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
-               if (!is_unrecognized_ioctl(ret))
-                       return ret;
-               if (!capable(CAP_SYS_ADMIN))
-                       return -EACCES;
-               if (get_user(n, (int __user *)(arg)))
-                       return -EFAULT;
-               set_device_ro(bdev, n);
-               return 0;
+       fsync_bdev(bdev);
+       invalidate_bdev(bdev);
+       return 0;
+}
 
-       case BLKDISCARD:
-       case BLKSECDISCARD: {
-               uint64_t range[2];
+static int blkdev_roset(struct block_device *bdev, fmode_t mode,
+               unsigned cmd, unsigned long arg)
+{
+       int ret, n;
 
-               if (!(mode & FMODE_WRITE))
-                       return -EBADF;
+       ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
+       if (!is_unrecognized_ioctl(ret))
+               return ret;
+       if (!capable(CAP_SYS_ADMIN))
+               return -EACCES;
+       if (get_user(n, (int __user *)arg))
+               return -EFAULT;
+       set_device_ro(bdev, n);
+       return 0;
+}
 
-               if (copy_from_user(range, (void __user *)arg, sizeof(range)))
-                       return -EFAULT;
+static int blkdev_getgeo(struct block_device *bdev,
+               struct hd_geometry __user *argp)
+{
+       struct gendisk *disk = bdev->bd_disk;
+       struct hd_geometry geo;
+       int ret;
 
-               return blk_ioctl_discard(bdev, range[0], range[1],
-                                        cmd == BLKSECDISCARD);
-       }
-       case BLKZEROOUT: {
-               uint64_t range[2];
+       if (!argp)
+               return -EINVAL;
+       if (!disk->fops->getgeo)
+               return -ENOTTY;
+
+       /*
+        * We need to set the startsect first, the driver may
+        * want to override it.
+        */
+       memset(&geo, 0, sizeof(geo));
+       geo.start = get_start_sect(bdev);
+       ret = disk->fops->getgeo(bdev, &geo);
+       if (ret)
+               return ret;
+       if (copy_to_user(argp, &geo, sizeof(geo)))
+               return -EFAULT;
+       return 0;
+}
 
-               if (!(mode & FMODE_WRITE))
-                       return -EBADF;
+/* set the logical block size */
+static int blkdev_bszset(struct block_device *bdev, fmode_t mode,
+               int __user *argp)
+{
+       int ret, n;
 
-               if (copy_from_user(range, (void __user *)arg, sizeof(range)))
-                       return -EFAULT;
+       if (!capable(CAP_SYS_ADMIN))
+               return -EACCES;
+       if (!argp)
+               return -EINVAL;
+       if (get_user(n, argp))
+               return -EFAULT;
 
-               return blk_ioctl_zeroout(bdev, range[0], range[1]);
+       if (!(mode & FMODE_EXCL)) {
+               bdgrab(bdev);
+               if (blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0)
+                       return -EBUSY;
        }
 
-       case HDIO_GETGEO: {
-               struct hd_geometry geo;
+       ret = set_blocksize(bdev, n);
+       if (!(mode & FMODE_EXCL))
+               blkdev_put(bdev, mode | FMODE_EXCL);
+       return ret;
+}
 
-               if (!arg)
-                       return -EINVAL;
-               if (!disk->fops->getgeo)
-                       return -ENOTTY;
-
-               /*
-                * We need to set the startsect first, the driver may
-                * want to override it.
-                */
-               memset(&geo, 0, sizeof(geo));
-               geo.start = get_start_sect(bdev);
-               ret = disk->fops->getgeo(bdev, &geo);
-               if (ret)
-                       return ret;
-               if (copy_to_user((struct hd_geometry __user *)arg, &geo,
-                                       sizeof(geo)))
-                       return -EFAULT;
-               return 0;
-       }
+/*
+ * always keep this in sync with compat_blkdev_ioctl()
+ */
+int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
+                       unsigned long arg)
+{
+       struct backing_dev_info *bdi;
+       void __user *argp = (void __user *)arg;
+       loff_t size;
+       unsigned int max_sectors;
+
+       switch (cmd) {
+       case BLKFLSBUF:
+               return blkdev_flushbuf(bdev, mode, cmd, arg);
+       case BLKROSET:
+               return blkdev_roset(bdev, mode, cmd, arg);
+       case BLKDISCARD:
+               return blk_ioctl_discard(bdev, mode, arg, 0);
+       case BLKSECDISCARD:
+               return blk_ioctl_discard(bdev, mode, arg,
+                               BLKDEV_DISCARD_SECURE);
+       case BLKZEROOUT:
+               return blk_ioctl_zeroout(bdev, mode, arg);
+       case HDIO_GETGEO:
+               return blkdev_getgeo(bdev, argp);
        case BLKRAGET:
        case BLKFRAGET:
                if (!arg)
@@ -414,28 +551,11 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
                bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
                return 0;
        case BLKBSZSET:
-               /* set the logical block size */
-               if (!capable(CAP_SYS_ADMIN))
-                       return -EACCES;
-               if (!arg)
-                       return -EINVAL;
-               if (get_user(n, (int __user *) arg))
-                       return -EFAULT;
-               if (!(mode & FMODE_EXCL)) {
-                       bdgrab(bdev);
-                       if (blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0)
-                               return -EBUSY;
-               }
-               ret = set_blocksize(bdev, n);
-               if (!(mode & FMODE_EXCL))
-                       blkdev_put(bdev, mode | FMODE_EXCL);
-               return ret;
+               return blkdev_bszset(bdev, mode, argp);
        case BLKPG:
-               ret = blkpg_ioctl(bdev, (struct blkpg_ioctl_arg __user *) arg);
-               break;
+               return blkpg_ioctl(bdev, argp);
        case BLKRRPART:
-               ret = blkdev_reread_part(bdev);
-               break;
+               return blkdev_reread_part(bdev);
        case BLKGETSIZE:
                size = i_size_read(bdev->bd_inode);
                if ((size >> 9) > ~0UL)
@@ -447,11 +567,21 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
        case BLKTRACESTOP:
        case BLKTRACESETUP:
        case BLKTRACETEARDOWN:
-               ret = blk_trace_ioctl(bdev, cmd, (char __user *) arg);
-               break;
+               return blk_trace_ioctl(bdev, cmd, argp);
+       case IOC_PR_REGISTER:
+               return blkdev_pr_register(bdev, argp);
+       case IOC_PR_RESERVE:
+               return blkdev_pr_reserve(bdev, argp);
+       case IOC_PR_RELEASE:
+               return blkdev_pr_release(bdev, argp);
+       case IOC_PR_PREEMPT:
+               return blkdev_pr_preempt(bdev, argp, false);
+       case IOC_PR_PREEMPT_ABORT:
+               return blkdev_pr_preempt(bdev, argp, true);
+       case IOC_PR_CLEAR:
+               return blkdev_pr_clear(bdev, argp);
        default:
-               ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
+               return __blkdev_driver_ioctl(bdev, mode, cmd, arg);
        }
-       return ret;
 }
 EXPORT_SYMBOL_GPL(blkdev_ioctl);
index e7711133284e187dd4a0ed74a151b0880b342d33..3b030157ec85c45faedd520b6993cd440254d763 100644 (file)
@@ -428,6 +428,7 @@ rescan:
 
        if (disk->fops->revalidate_disk)
                disk->fops->revalidate_disk(disk);
+       blk_integrity_revalidate(disk);
        check_disk_size_change(disk, bdev);
        bdev->bd_invalidated = 0;
        if (!get_capacity(disk) || !(state = check_partition(disk, bdev)))
index 24d6e9715318e682e21607119358f9582038ceff..2c97912335a90944e04927eb7be8219f361e92e0 100644 (file)
@@ -160,38 +160,30 @@ static int t10_pi_type3_verify_ip(struct blk_integrity_iter *iter)
        return t10_pi_verify(iter, t10_pi_ip_fn, 3);
 }
 
-struct blk_integrity t10_pi_type1_crc = {
+struct blk_integrity_profile t10_pi_type1_crc = {
        .name                   = "T10-DIF-TYPE1-CRC",
        .generate_fn            = t10_pi_type1_generate_crc,
        .verify_fn              = t10_pi_type1_verify_crc,
-       .tuple_size             = sizeof(struct t10_pi_tuple),
-       .tag_size               = 0,
 };
 EXPORT_SYMBOL(t10_pi_type1_crc);
 
-struct blk_integrity t10_pi_type1_ip = {
+struct blk_integrity_profile t10_pi_type1_ip = {
        .name                   = "T10-DIF-TYPE1-IP",
        .generate_fn            = t10_pi_type1_generate_ip,
        .verify_fn              = t10_pi_type1_verify_ip,
-       .tuple_size             = sizeof(struct t10_pi_tuple),
-       .tag_size               = 0,
 };
 EXPORT_SYMBOL(t10_pi_type1_ip);
 
-struct blk_integrity t10_pi_type3_crc = {
+struct blk_integrity_profile t10_pi_type3_crc = {
        .name                   = "T10-DIF-TYPE3-CRC",
        .generate_fn            = t10_pi_type3_generate_crc,
        .verify_fn              = t10_pi_type3_verify_crc,
-       .tuple_size             = sizeof(struct t10_pi_tuple),
-       .tag_size               = 0,
 };
 EXPORT_SYMBOL(t10_pi_type3_crc);
 
-struct blk_integrity t10_pi_type3_ip = {
+struct blk_integrity_profile t10_pi_type3_ip = {
        .name                   = "T10-DIF-TYPE3-IP",
        .generate_fn            = t10_pi_type3_generate_ip,
        .verify_fn              = t10_pi_type3_verify_ip,
-       .tuple_size             = sizeof(struct t10_pi_tuple),
-       .tag_size               = 0,
 };
 EXPORT_SYMBOL(t10_pi_type3_ip);
index f42f2bac646623fc1db767bae3a5fff0ecf98aac..4c55cfbad19e95df8cb67864d78af960c073b4df 100644 (file)
@@ -32,8 +32,7 @@ static DEFINE_MUTEX(regmap_debugfs_early_lock);
 /* Calculate the length of a fixed format  */
 static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size)
 {
-       snprintf(buf, buf_size, "%x", max_val);
-       return strlen(buf);
+       return snprintf(NULL, 0, "%x", max_val);
 }
 
 static ssize_t regmap_name_read_file(struct file *file,
@@ -432,7 +431,7 @@ static ssize_t regmap_access_read_file(struct file *file,
                /* If we're in the region the user is trying to read */
                if (p >= *ppos) {
                        /* ...but not beyond it */
-                       if (buf_pos >= count - 1 - tot_len)
+                       if (buf_pos + tot_len + 1 >= count)
                                break;
 
                        /* Format the register */
index e76ed003769e1bc023e582df9f505c6ed9ca3fc2..061152a437300bbfacfbbc6e475c3935df7cecb6 100644 (file)
@@ -1014,15 +1014,16 @@ static int dm_table_build_index(struct dm_table *t)
        return r;
 }
 
+static bool integrity_profile_exists(struct gendisk *disk)
+{
+       return !!blk_get_integrity(disk);
+}
+
 /*
  * Get a disk whose integrity profile reflects the table's profile.
- * If %match_all is true, all devices' profiles must match.
- * If %match_all is false, all devices must at least have an
- * allocated integrity profile; but uninitialized is ok.
  * Returns NULL if integrity support was inconsistent or unavailable.
  */
-static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t,
-                                                   bool match_all)
+static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t)
 {
        struct list_head *devices = dm_table_get_devices(t);
        struct dm_dev_internal *dd = NULL;
@@ -1030,10 +1031,8 @@ static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t,
 
        list_for_each_entry(dd, devices, list) {
                template_disk = dd->dm_dev->bdev->bd_disk;
-               if (!blk_get_integrity(template_disk))
+               if (!integrity_profile_exists(template_disk))
                        goto no_integrity;
-               if (!match_all && !blk_integrity_is_initialized(template_disk))
-                       continue; /* skip uninitialized profiles */
                else if (prev_disk &&
                         blk_integrity_compare(prev_disk, template_disk) < 0)
                        goto no_integrity;
@@ -1052,34 +1051,40 @@ no_integrity:
 }
 
 /*
- * Register the mapped device for blk_integrity support if
- * the underlying devices have an integrity profile.  But all devices
- * may not have matching profiles (checking all devices isn't reliable
+ * Register the mapped device for blk_integrity support if the
+ * underlying devices have an integrity profile.  But all devices may
+ * not have matching profiles (checking all devices isn't reliable
  * during table load because this table may use other DM device(s) which
- * must be resumed before they will have an initialized integity profile).
- * Stacked DM devices force a 2 stage integrity profile validation:
- * 1 - during load, validate all initialized integrity profiles match
- * 2 - during resume, validate all integrity profiles match
+ * must be resumed before they will have an initialized integity
+ * profile).  Consequently, stacked DM devices force a 2 stage integrity
+ * profile validation: First pass during table load, final pass during
+ * resume.
  */
-static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device *md)
+static int dm_table_register_integrity(struct dm_table *t)
 {
+       struct mapped_device *md = t->md;
        struct gendisk *template_disk = NULL;
 
-       template_disk = dm_table_get_integrity_disk(t, false);
+       template_disk = dm_table_get_integrity_disk(t);
        if (!template_disk)
                return 0;
 
-       if (!blk_integrity_is_initialized(dm_disk(md))) {
+       if (!integrity_profile_exists(dm_disk(md))) {
                t->integrity_supported = 1;
-               return blk_integrity_register(dm_disk(md), NULL);
+               /*
+                * Register integrity profile during table load; we can do
+                * this because the final profile must match during resume.
+                */
+               blk_integrity_register(dm_disk(md),
+                                      blk_get_integrity(template_disk));
+               return 0;
        }
 
        /*
-        * If DM device already has an initalized integrity
+        * If DM device already has an initialized integrity
         * profile the new profile should not conflict.
         */
-       if (blk_integrity_is_initialized(template_disk) &&
-           blk_integrity_compare(dm_disk(md), template_disk) < 0) {
+       if (blk_integrity_compare(dm_disk(md), template_disk) < 0) {
                DMWARN("%s: conflict with existing integrity profile: "
                       "%s profile mismatch",
                       dm_device_name(t->md),
@@ -1087,7 +1092,7 @@ static int dm_table_prealloc_integrity(struct dm_table *t, struct mapped_device
                return 1;
        }
 
-       /* Preserve existing initialized integrity profile */
+       /* Preserve existing integrity profile */
        t->integrity_supported = 1;
        return 0;
 }
@@ -1112,7 +1117,7 @@ int dm_table_complete(struct dm_table *t)
                return r;
        }
 
-       r = dm_table_prealloc_integrity(t, t->md);
+       r = dm_table_register_integrity(t);
        if (r) {
                DMERR("could not register integrity profile.");
                return r;
@@ -1278,29 +1283,30 @@ combine_limits:
 }
 
 /*
- * Set the integrity profile for this device if all devices used have
- * matching profiles.  We're quite deep in the resume path but still
- * don't know if all devices (particularly DM devices this device
- * may be stacked on) have matching profiles.  Even if the profiles
- * don't match we have no way to fail (to resume) at this point.
+ * Verify that all devices have an integrity profile that matches the
+ * DM device's registered integrity profile.  If the profiles don't
+ * match then unregister the DM device's integrity profile.
  */
-static void dm_table_set_integrity(struct dm_table *t)
+static void dm_table_verify_integrity(struct dm_table *t)
 {
        struct gendisk *template_disk = NULL;
 
-       if (!blk_get_integrity(dm_disk(t->md)))
-               return;
+       if (t->integrity_supported) {
+               /*
+                * Verify that the original integrity profile
+                * matches all the devices in this table.
+                */
+               template_disk = dm_table_get_integrity_disk(t);
+               if (template_disk &&
+                   blk_integrity_compare(dm_disk(t->md), template_disk) >= 0)
+                       return;
+       }
 
-       template_disk = dm_table_get_integrity_disk(t, true);
-       if (template_disk)
-               blk_integrity_register(dm_disk(t->md),
-                                      blk_get_integrity(template_disk));
-       else if (blk_integrity_is_initialized(dm_disk(t->md)))
-               DMWARN("%s: device no longer has a valid integrity profile",
-                      dm_device_name(t->md));
-       else
+       if (integrity_profile_exists(dm_disk(t->md))) {
                DMWARN("%s: unable to establish an integrity profile",
                       dm_device_name(t->md));
+               blk_integrity_unregister(dm_disk(t->md));
+       }
 }
 
 static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
@@ -1500,7 +1506,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
        else
                queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
 
-       dm_table_set_integrity(t);
+       dm_table_verify_integrity(t);
 
        /*
         * Determine whether or not this queue's I/O timings contribute
index 6264781dc69a6066b88d719537c471b7d1cd7b27..f4d953e10e2f03b858b38a91ed3a48142205a54f 100644 (file)
@@ -2233,8 +2233,6 @@ static void cleanup_mapped_device(struct mapped_device *md)
                spin_lock(&_minor_lock);
                md->disk->private_data = NULL;
                spin_unlock(&_minor_lock);
-               if (blk_get_integrity(md->disk))
-                       blk_integrity_unregister(md->disk);
                del_gendisk(md->disk);
                put_disk(md->disk);
        }
index c702de18207ae76ab56f1235ed5c98a9095ed050..714aa92db174b9457f2011fd220327d5a99ee67d 100644 (file)
@@ -1962,12 +1962,9 @@ int md_integrity_register(struct mddev *mddev)
         * All component devices are integrity capable and have matching
         * profiles, register the common profile for the md device.
         */
-       if (blk_integrity_register(mddev->gendisk,
-                       bdev_get_integrity(reference->bdev)) != 0) {
-               printk(KERN_ERR "md: failed to register integrity for %s\n",
-                       mdname(mddev));
-               return -EINVAL;
-       }
+       blk_integrity_register(mddev->gendisk,
+                              bdev_get_integrity(reference->bdev));
+
        printk(KERN_NOTICE "md: data integrity enabled on %s\n", mdname(mddev));
        if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) {
                printk(KERN_ERR "md: failed to create integrity pool for %s\n",
@@ -1997,6 +1994,7 @@ void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
        if (bi_rdev && blk_integrity_compare(mddev->gendisk,
                                             rdev->bdev->bd_disk) >= 0)
                return;
+       WARN_ON_ONCE(!mddev->suspended);
        printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev));
        blk_integrity_unregister(mddev->gendisk);
 }
@@ -5542,7 +5540,6 @@ static int do_md_stop(struct mddev *mddev, int mode,
                if (mddev->hold_active == UNTIL_STOP)
                        mddev->hold_active = 0;
        }
-       blk_integrity_unregister(disk);
        md_new_event(mddev);
        sysfs_notify_dirent_safe(mddev->sysfs_state);
        return 0;
index d132f06afdd1aa3140922f7965494087cf43eb7a..7331a80d89f1987a42d22a2ae7510210519e6bae 100644 (file)
@@ -264,7 +264,9 @@ static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev)
                        spin_unlock_irq(&conf->device_lock);
                        rcu_assign_pointer(p->rdev, rdev);
                        err = 0;
+                       mddev_suspend(mddev);
                        md_integrity_add_rdev(rdev, mddev);
+                       mddev_resume(mddev);
                        break;
                }
 
index 049df6c4a8cc302c9a266e34e5a1edefecb31cf7..a881b111fa35d126b93142cc70cccceec6661dbd 100644 (file)
@@ -1621,7 +1621,9 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
                        break;
                }
        }
+       mddev_suspend(mddev);
        md_integrity_add_rdev(rdev, mddev);
+       mddev_resume(mddev);
        if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
                queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
        print_conf(conf);
index 7c99a403771527354a5323137f5004d7adf9e007..6f0ec107996a063f0220e27a23daf77026dea0d9 100644 (file)
@@ -1736,7 +1736,9 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
                rcu_assign_pointer(p->rdev, rdev);
                break;
        }
+       mddev_suspend(mddev);
        md_integrity_add_rdev(rdev, mddev);
+       mddev_resume(mddev);
        if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
                queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
 
index 2426db88db36bf95f1f247eeae597ff69238c70d..f04445b992f512c537018b81bf0d685a3ee2f62b 100644 (file)
@@ -879,7 +879,7 @@ static void copy_spare(struct mtd_info *mtd, bool bfrom)
                                      oob_chunk_size);
 
                /* the last chunk */
-               memcpy16_toio(&s[oob_chunk_size * sparebuf_size],
+               memcpy16_toio(&s[i * sparebuf_size],
                              &d[i * oob_chunk_size],
                              host->used_oobsize - i * oob_chunk_size);
        }
index f97a58d6aae1bbbacdb29ca86ac30c1f21d19e48..e7d333c162befd274f891b8674b5ca8fd905315e 100644 (file)
 #define NFC_ECC_MODE           GENMASK(15, 12)
 #define NFC_RANDOM_SEED                GENMASK(30, 16)
 
+/* NFC_USER_DATA helper macros */
+#define NFC_BUF_TO_USER_DATA(buf)      ((buf)[0] | ((buf)[1] << 8) | \
+                                       ((buf)[2] << 16) | ((buf)[3] << 24))
+
 #define NFC_DEFAULT_TIMEOUT_MS 1000
 
 #define NFC_SRAM_SIZE          1024
@@ -646,15 +650,9 @@ static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
                offset = layout->eccpos[i * ecc->bytes] - 4 + mtd->writesize;
 
                /* Fill OOB data in */
-               if (oob_required) {
-                       tmp = 0xffffffff;
-                       memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, &tmp,
-                                   4);
-               } else {
-                       memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE,
-                                   chip->oob_poi + offset - mtd->writesize,
-                                   4);
-               }
+               writel(NFC_BUF_TO_USER_DATA(chip->oob_poi +
+                                           layout->oobfree[i].offset),
+                      nfc->regs + NFC_REG_USER_DATA_BASE);
 
                chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset, -1);
 
@@ -784,14 +782,8 @@ static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd,
                offset += ecc->size;
 
                /* Fill OOB data in */
-               if (oob_required) {
-                       tmp = 0xffffffff;
-                       memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, &tmp,
-                                   4);
-               } else {
-                       memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, oob,
-                                   4);
-               }
+               writel(NFC_BUF_TO_USER_DATA(oob),
+                      nfc->regs + NFC_REG_USER_DATA_BASE);
 
                tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ACCESS_DIR |
                      (1 << 30);
@@ -1389,6 +1381,7 @@ static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc)
                                        node);
                nand_release(&chip->mtd);
                sunxi_nand_ecc_cleanup(&chip->nand.ecc);
+               list_del(&chip->node);
        }
 }
 
index 254239746020b5f0334b27fa7550a82ee3d9b97b..eae93ab8ffcded3060c072a5299909ebee89aa7d 100644 (file)
@@ -1279,7 +1279,6 @@ static int btt_blk_init(struct btt *btt)
 
 static void btt_blk_cleanup(struct btt *btt)
 {
-       blk_integrity_unregister(btt->btt_disk);
        del_gendisk(btt->btt_disk);
        put_disk(btt->btt_disk);
        blk_cleanup_queue(btt->btt_queue);
index cb62ec6a12d073cf1abd5422e151636d57d6f781..82c49bb870555fc6636ebc30106c396807f12d7b 100644 (file)
@@ -392,29 +392,18 @@ void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus)
 EXPORT_SYMBOL_GPL(nvdimm_bus_unregister);
 
 #ifdef CONFIG_BLK_DEV_INTEGRITY
-static int nd_pi_nop_generate_verify(struct blk_integrity_iter *iter)
-{
-       return 0;
-}
-
 int nd_integrity_init(struct gendisk *disk, unsigned long meta_size)
 {
-       struct blk_integrity integrity = {
-               .name = "ND-PI-NOP",
-               .generate_fn = nd_pi_nop_generate_verify,
-               .verify_fn = nd_pi_nop_generate_verify,
-               .tuple_size = meta_size,
-               .tag_size = meta_size,
-       };
-       int ret;
+       struct blk_integrity bi;
 
        if (meta_size == 0)
                return 0;
 
-       ret = blk_integrity_register(disk, &integrity);
-       if (ret)
-               return ret;
+       bi.profile = NULL;
+       bi.tuple_size = meta_size;
+       bi.tag_size = meta_size;
 
+       blk_integrity_register(disk, &bi);
        blk_queue_max_integrity_segments(disk->queue, 1);
 
        return 0;
index ff47a8b62f6881d14af9059190606e5754f2bea2..73b156fe0c2c6eeeff1a1c35dd6538a2a3e588ea 100644 (file)
 #include <linux/slab.h>
 #include <linux/t10-pi.h>
 #include <linux/types.h>
+#include <linux/pr.h>
 #include <scsi/sg.h>
 #include <asm-generic/io-64-nonatomic-lo-hi.h>
+#include <asm/unaligned.h>
 
 #include <uapi/linux/nvme_ioctl.h>
 #include "nvme.h"
@@ -538,7 +540,7 @@ static void nvme_dif_remap(struct request *req,
        virt = bip_get_seed(bip);
        phys = nvme_block_nr(ns, blk_rq_pos(req));
        nlb = (blk_rq_bytes(req) >> ns->lba_shift);
-       ts = ns->disk->integrity->tuple_size;
+       ts = ns->disk->queue->integrity.tuple_size;
 
        for (i = 0; i < nlb; i++, virt++, phys++) {
                pi = (struct t10_pi_tuple *)p;
@@ -548,36 +550,20 @@ static void nvme_dif_remap(struct request *req,
        kunmap_atomic(pmap);
 }
 
-static int nvme_noop_verify(struct blk_integrity_iter *iter)
-{
-       return 0;
-}
-
-static int nvme_noop_generate(struct blk_integrity_iter *iter)
-{
-       return 0;
-}
-
-struct blk_integrity nvme_meta_noop = {
-       .name                   = "NVME_META_NOOP",
-       .generate_fn            = nvme_noop_generate,
-       .verify_fn              = nvme_noop_verify,
-};
-
 static void nvme_init_integrity(struct nvme_ns *ns)
 {
        struct blk_integrity integrity;
 
        switch (ns->pi_type) {
        case NVME_NS_DPS_PI_TYPE3:
-               integrity = t10_pi_type3_crc;
+               integrity.profile = &t10_pi_type3_crc;
                break;
        case NVME_NS_DPS_PI_TYPE1:
        case NVME_NS_DPS_PI_TYPE2:
-               integrity = t10_pi_type1_crc;
+               integrity.profile = &t10_pi_type1_crc;
                break;
        default:
-               integrity = nvme_meta_noop;
+               integrity.profile = NULL;
                break;
        }
        integrity.tuple_size = ns->ms;
@@ -2048,6 +2034,7 @@ static int nvme_revalidate_disk(struct gendisk *disk)
        pi_type = ns->ms == sizeof(struct t10_pi_tuple) ?
                                        id->dps & NVME_NS_DPS_PI_MASK : 0;
 
+       blk_mq_freeze_queue(disk->queue);
        if (blk_get_integrity(disk) && (ns->pi_type != pi_type ||
                                ns->ms != old_ms ||
                                bs != queue_logical_block_size(disk->queue) ||
@@ -2057,8 +2044,7 @@ static int nvme_revalidate_disk(struct gendisk *disk)
        ns->pi_type = pi_type;
        blk_queue_logical_block_size(ns->queue, bs);
 
-       if (ns->ms && !blk_get_integrity(disk) && (disk->flags & GENHD_FL_UP) &&
-                                                               !ns->ext)
+       if (ns->ms && !ns->ext)
                nvme_init_integrity(ns);
 
        if ((ns->ms && !(ns->ms == 8 && ns->pi_type) &&
@@ -2070,11 +2056,104 @@ static int nvme_revalidate_disk(struct gendisk *disk)
 
        if (dev->oncs & NVME_CTRL_ONCS_DSM)
                nvme_config_discard(ns);
+       blk_mq_unfreeze_queue(disk->queue);
 
        kfree(id);
        return 0;
 }
 
+static char nvme_pr_type(enum pr_type type)
+{
+       switch (type) {
+       case PR_WRITE_EXCLUSIVE:
+               return 1;
+       case PR_EXCLUSIVE_ACCESS:
+               return 2;
+       case PR_WRITE_EXCLUSIVE_REG_ONLY:
+               return 3;
+       case PR_EXCLUSIVE_ACCESS_REG_ONLY:
+               return 4;
+       case PR_WRITE_EXCLUSIVE_ALL_REGS:
+               return 5;
+       case PR_EXCLUSIVE_ACCESS_ALL_REGS:
+               return 6;
+       default:
+               return 0;
+       }
+};
+
+static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
+                               u64 key, u64 sa_key, u8 op)
+{
+       struct nvme_ns *ns = bdev->bd_disk->private_data;
+       struct nvme_command c;
+       u8 data[16] = { 0, };
+
+       put_unaligned_le64(key, &data[0]);
+       put_unaligned_le64(sa_key, &data[8]);
+
+       memset(&c, 0, sizeof(c));
+       c.common.opcode = op;
+       c.common.nsid = cpu_to_le32(ns->ns_id);
+       c.common.cdw10[0] = cpu_to_le32(cdw10);
+
+       return nvme_submit_sync_cmd(ns->queue, &c, data, 16);
+}
+
+static int nvme_pr_register(struct block_device *bdev, u64 old,
+               u64 new, unsigned flags)
+{
+       u32 cdw10;
+
+       if (flags & ~PR_FL_IGNORE_KEY)
+               return -EOPNOTSUPP;
+
+       cdw10 = old ? 2 : 0;
+       cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0;
+       cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */
+       return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register);
+}
+
+static int nvme_pr_reserve(struct block_device *bdev, u64 key,
+               enum pr_type type, unsigned flags)
+{
+       u32 cdw10;
+
+       if (flags & ~PR_FL_IGNORE_KEY)
+               return -EOPNOTSUPP;
+
+       cdw10 = nvme_pr_type(type) << 8;
+       cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0);
+       return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire);
+}
+
+static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new,
+               enum pr_type type, bool abort)
+{
+       u32 cdw10 = nvme_pr_type(type) << 8 | abort ? 2 : 1;
+       return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire);
+}
+
+static int nvme_pr_clear(struct block_device *bdev, u64 key)
+{
+       u32 cdw10 = 1 | key ? 1 << 3 : 0;
+       return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_register);
+}
+
+static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
+{
+       u32 cdw10 = nvme_pr_type(type) << 8 | key ? 1 << 3 : 0;
+       return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release);
+}
+
+static const struct pr_ops nvme_pr_ops = {
+       .pr_register    = nvme_pr_register,
+       .pr_reserve     = nvme_pr_reserve,
+       .pr_release     = nvme_pr_release,
+       .pr_preempt     = nvme_pr_preempt,
+       .pr_clear       = nvme_pr_clear,
+};
+
 static const struct block_device_operations nvme_fops = {
        .owner          = THIS_MODULE,
        .ioctl          = nvme_ioctl,
@@ -2083,6 +2162,7 @@ static const struct block_device_operations nvme_fops = {
        .release        = nvme_release,
        .getgeo         = nvme_getgeo,
        .revalidate_disk= nvme_revalidate_disk,
+       .pr_ops         = &nvme_pr_ops,
 };
 
 static int nvme_kthread(void *data)
@@ -2425,11 +2505,8 @@ static void nvme_ns_remove(struct nvme_ns *ns)
 
        if (kill)
                blk_set_queue_dying(ns->queue);
-       if (ns->disk->flags & GENHD_FL_UP) {
-               if (blk_get_integrity(ns->disk))
-                       blk_integrity_unregister(ns->disk);
+       if (ns->disk->flags & GENHD_FL_UP)
                del_gendisk(ns->disk);
-       }
        if (kill || !blk_queue_dying(ns->queue)) {
                blk_mq_abort_requeue_list(ns->queue);
                blk_cleanup_queue(ns->queue);
index 01bf3476a79183714f62f67efcf5d8b17b70d497..a9567af7cec02c5a13102be118010e7bb7b1c888 100644 (file)
@@ -192,9 +192,9 @@ static const struct regulator_desc axp22x_regulators[] = {
        AXP_DESC(AXP22X, DCDC3, "dcdc3", "vin3", 600, 1860, 20,
                 AXP22X_DCDC3_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(3)),
        AXP_DESC(AXP22X, DCDC4, "dcdc4", "vin4", 600, 1540, 20,
-                AXP22X_DCDC4_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(3)),
+                AXP22X_DCDC4_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(4)),
        AXP_DESC(AXP22X, DCDC5, "dcdc5", "vin5", 1000, 2550, 50,
-                AXP22X_DCDC5_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(4)),
+                AXP22X_DCDC5_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(5)),
        /* secondary switchable output of DCDC1 */
        AXP_DESC_SW(AXP22X, DC1SW, "dc1sw", "dcdc1", 1600, 3400, 100,
                    AXP22X_DCDC1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(7)),
index 7849187d91aea909fdd9d0ce5bbabb35fc2e5736..8a34f6acc801531ce8eb16882fed2b04ed4c874c 100644 (file)
@@ -1403,6 +1403,10 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
                        return 0;
                }
 
+               /* Did the lookup explicitly defer for us? */
+               if (ret == -EPROBE_DEFER)
+                       return ret;
+
                if (have_full_constraints()) {
                        r = dummy_regulator_rdev;
                } else {
index 3f370228bf310a223eaee279c4839a5c9602410f..5e170a6809fde2fe3c8c6ff51d382c2a570485ea 100644 (file)
@@ -51,6 +51,7 @@
 #include <linux/async.h>
 #include <linux/slab.h>
 #include <linux/pm_runtime.h>
+#include <linux/pr.h>
 #include <asm/uaccess.h>
 #include <asm/unaligned.h>
 
@@ -1535,6 +1536,100 @@ static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
 }
 #endif
 
+static char sd_pr_type(enum pr_type type)
+{
+       switch (type) {
+       case PR_WRITE_EXCLUSIVE:
+               return 0x01;
+       case PR_EXCLUSIVE_ACCESS:
+               return 0x03;
+       case PR_WRITE_EXCLUSIVE_REG_ONLY:
+               return 0x05;
+       case PR_EXCLUSIVE_ACCESS_REG_ONLY:
+               return 0x06;
+       case PR_WRITE_EXCLUSIVE_ALL_REGS:
+               return 0x07;
+       case PR_EXCLUSIVE_ACCESS_ALL_REGS:
+               return 0x08;
+       default:
+               return 0;
+       }
+};
+
+static int sd_pr_command(struct block_device *bdev, u8 sa,
+               u64 key, u64 sa_key, u8 type, u8 flags)
+{
+       struct scsi_device *sdev = scsi_disk(bdev->bd_disk)->device;
+       struct scsi_sense_hdr sshdr;
+       int result;
+       u8 cmd[16] = { 0, };
+       u8 data[24] = { 0, };
+
+       cmd[0] = PERSISTENT_RESERVE_OUT;
+       cmd[1] = sa;
+       cmd[2] = type;
+       put_unaligned_be32(sizeof(data), &cmd[5]);
+
+       put_unaligned_be64(key, &data[0]);
+       put_unaligned_be64(sa_key, &data[8]);
+       data[20] = flags;
+
+       result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, &data, sizeof(data),
+                       &sshdr, SD_TIMEOUT, SD_MAX_RETRIES, NULL);
+
+       if ((driver_byte(result) & DRIVER_SENSE) &&
+           (scsi_sense_valid(&sshdr))) {
+               sdev_printk(KERN_INFO, sdev, "PR command failed: %d\n", result);
+               scsi_print_sense_hdr(sdev, NULL, &sshdr);
+       }
+
+       return result;
+}
+
+static int sd_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
+               u32 flags)
+{
+       if (flags & ~PR_FL_IGNORE_KEY)
+               return -EOPNOTSUPP;
+       return sd_pr_command(bdev, (flags & PR_FL_IGNORE_KEY) ? 0x06 : 0x00,
+                       old_key, new_key, 0,
+                       (1 << 0) /* APTPL */ |
+                       (1 << 2) /* ALL_TG_PT */);
+}
+
+static int sd_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
+               u32 flags)
+{
+       if (flags)
+               return -EOPNOTSUPP;
+       return sd_pr_command(bdev, 0x01, key, 0, sd_pr_type(type), 0);
+}
+
+static int sd_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
+{
+       return sd_pr_command(bdev, 0x02, key, 0, sd_pr_type(type), 0);
+}
+
+static int sd_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
+               enum pr_type type, bool abort)
+{
+       return sd_pr_command(bdev, abort ? 0x05 : 0x04, old_key, new_key,
+                            sd_pr_type(type), 0);
+}
+
+static int sd_pr_clear(struct block_device *bdev, u64 key)
+{
+       return sd_pr_command(bdev, 0x03, key, 0, 0, 0);
+}
+
+static const struct pr_ops sd_pr_ops = {
+       .pr_register    = sd_pr_register,
+       .pr_reserve     = sd_pr_reserve,
+       .pr_release     = sd_pr_release,
+       .pr_preempt     = sd_pr_preempt,
+       .pr_clear       = sd_pr_clear,
+};
+
 static const struct block_device_operations sd_fops = {
        .owner                  = THIS_MODULE,
        .open                   = sd_open,
@@ -1547,6 +1642,7 @@ static const struct block_device_operations sd_fops = {
        .check_events           = sd_check_events,
        .revalidate_disk        = sd_revalidate_disk,
        .unlock_native_capacity = sd_unlock_native_capacity,
+       .pr_ops                 = &sd_pr_ops,
 };
 
 /**
@@ -3068,7 +3164,6 @@ static void scsi_disk_release(struct device *dev)
        ida_remove(&sd_index_ida, sdkp->index);
        spin_unlock(&sd_index_lock);
 
-       blk_integrity_unregister(disk);
        disk->private_data = NULL;
        put_disk(disk);
        put_device(&sdkp->device->sdev_gendev);
index 5c06d292b94c704a5dd96f8549aa732baf8b69c9..987bf392c336181036f19debf282be2c2f9d1ffa 100644 (file)
@@ -43,6 +43,7 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
        struct scsi_device *sdp = sdkp->device;
        struct gendisk *disk = sdkp->disk;
        u8 type = sdkp->protection_type;
+       struct blk_integrity bi;
        int dif, dix;
 
        dif = scsi_host_dif_capable(sdp->host, type);
@@ -55,39 +56,43 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
        if (!dix)
                return;
 
+       memset(&bi, 0, sizeof(bi));
+
        /* Enable DMA of protection information */
        if (scsi_host_get_guard(sdkp->device->host) & SHOST_DIX_GUARD_IP) {
                if (type == SD_DIF_TYPE3_PROTECTION)
-                       blk_integrity_register(disk, &t10_pi_type3_ip);
+                       bi.profile = &t10_pi_type3_ip;
                else
-                       blk_integrity_register(disk, &t10_pi_type1_ip);
+                       bi.profile = &t10_pi_type1_ip;
 
-               disk->integrity->flags |= BLK_INTEGRITY_IP_CHECKSUM;
+               bi.flags |= BLK_INTEGRITY_IP_CHECKSUM;
        } else
                if (type == SD_DIF_TYPE3_PROTECTION)
-                       blk_integrity_register(disk, &t10_pi_type3_crc);
+                       bi.profile = &t10_pi_type3_crc;
                else
-                       blk_integrity_register(disk, &t10_pi_type1_crc);
+                       bi.profile = &t10_pi_type1_crc;
 
+       bi.tuple_size = sizeof(struct t10_pi_tuple);
        sd_printk(KERN_NOTICE, sdkp,
-                 "Enabling DIX %s protection\n", disk->integrity->name);
+                 "Enabling DIX %s protection\n", bi.profile->name);
 
-       /* Signal to block layer that we support sector tagging */
        if (dif && type) {
-
-               disk->integrity->flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
+               bi.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
 
                if (!sdkp->ATO)
-                       return;
+                       goto out;
 
                if (type == SD_DIF_TYPE3_PROTECTION)
-                       disk->integrity->tag_size = sizeof(u16) + sizeof(u32);
+                       bi.tag_size = sizeof(u16) + sizeof(u32);
                else
-                       disk->integrity->tag_size = sizeof(u16);
+                       bi.tag_size = sizeof(u16);
 
                sd_printk(KERN_NOTICE, sdkp, "DIF application tag size %u\n",
-                         disk->integrity->tag_size);
+                         bi.tag_size);
        }
+
+out:
+       blk_integrity_register(disk, &bi);
 }
 
 /*
index 3cf9faa6cc3fe871174ec1b2777472b0ac4c6883..a85d863d4a442f2f30633db5de0ff469ee9c6348 100644 (file)
@@ -992,11 +992,12 @@ static int davinci_spi_probe(struct platform_device *pdev)
                goto free_master;
        }
 
-       dspi->irq = platform_get_irq(pdev, 0);
-       if (dspi->irq <= 0) {
+       ret = platform_get_irq(pdev, 0);
+       if (ret == 0)
                ret = -EINVAL;
+       if (ret < 0)
                goto free_master;
-       }
+       dspi->irq = ret;
 
        ret = devm_request_threaded_irq(&pdev->dev, dspi->irq, davinci_spi_irq,
                                dummy_thread_fn, 0, dev_name(&pdev->dev), dspi);
index 0f19e11acac2197806eba0a42290b5067b560d73..f29c69120054463eee986c0ab6ed48ec6c504c39 100644 (file)
@@ -155,17 +155,17 @@ static int iblock_configure_device(struct se_device *dev)
        if (bi) {
                struct bio_set *bs = ib_dev->ibd_bio_set;
 
-               if (!strcmp(bi->name, "T10-DIF-TYPE3-IP") ||
-                   !strcmp(bi->name, "T10-DIF-TYPE1-IP")) {
+               if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-IP") ||
+                   !strcmp(bi->profile->name, "T10-DIF-TYPE1-IP")) {
                        pr_err("IBLOCK export of blk_integrity: %s not"
-                              " supported\n", bi->name);
+                              " supported\n", bi->profile->name);
                        ret = -ENOSYS;
                        goto out_blkdev_put;
                }
 
-               if (!strcmp(bi->name, "T10-DIF-TYPE3-CRC")) {
+               if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-CRC")) {
                        dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT;
-               } else if (!strcmp(bi->name, "T10-DIF-TYPE1-CRC")) {
+               } else if (!strcmp(bi->profile->name, "T10-DIF-TYPE1-CRC")) {
                        dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT;
                }
 
index 0e5fde1d3ffbe5a152035f33063afa98bf84f33e..9f9a7bef1ff6d46d80fe8cb6dcfeea5a3e26729d 100644 (file)
@@ -752,7 +752,7 @@ static ssize_t broadsheet_loadstore_waveform(struct device *dev,
        if ((fw_entry->size < 8*1024) || (fw_entry->size > 64*1024)) {
                dev_err(dev, "Invalid waveform\n");
                err = -EINVAL;
-               goto err_failed;
+               goto err_fw;
        }
 
        mutex_lock(&(par->io_lock));
@@ -762,13 +762,15 @@ static ssize_t broadsheet_loadstore_waveform(struct device *dev,
        mutex_unlock(&(par->io_lock));
        if (err < 0) {
                dev_err(dev, "Failed to store broadsheet waveform\n");
-               goto err_failed;
+               goto err_fw;
        }
 
        dev_info(dev, "Stored broadsheet waveform, size %zd\n", fw_entry->size);
 
-       return len;
+       err = len;
 
+err_fw:
+       release_firmware(fw_entry);
 err_failed:
        return err;
 }
index 7fa2e6f9e322d1e2223116474800b515684abfc2..b335c1ae8625106efff818d696ebad532ade7f17 100644 (file)
@@ -1628,9 +1628,16 @@ static int fsl_diu_suspend(struct platform_device *ofdev, pm_message_t state)
 static int fsl_diu_resume(struct platform_device *ofdev)
 {
        struct fsl_diu_data *data;
+       unsigned int i;
 
        data = dev_get_drvdata(&ofdev->dev);
-       enable_lcdc(data->fsl_diu_info);
+
+       fsl_diu_enable_interrupts(data);
+       update_lcdc(data->fsl_diu_info);
+       for (i = 0; i < NUM_AOIS; i++) {
+               if (data->mfb[i].count)
+                       fsl_diu_enable_panel(&data->fsl_diu_info[i]);
+       }
 
        return 0;
 }
index 9b8bebdf8f86e1209f0ca2f6f9779e8c64fa2e43..f9ec5c0484fabbd8d6f2cc5b5e5897c003e07b10 100644 (file)
@@ -831,6 +831,7 @@ static struct of_device_id of_platform_mb862xx_tbl[] = {
        { .compatible = "fujitsu,coral", },
        { /* end */ }
 };
+MODULE_DEVICE_TABLE(of, of_platform_mb862xx_tbl);
 
 static struct platform_driver of_platform_mb862xxfb_driver = {
        .driver = {
index a8ce920fa797d335d2dbfbbc1c9d8f93a4378959..d811e6dcaef727588cdc65695673a4f7144f0f30 100644 (file)
@@ -294,7 +294,7 @@ static int dvic_probe_of(struct platform_device *pdev)
 
        adapter_node = of_parse_phandle(node, "ddc-i2c-bus", 0);
        if (adapter_node) {
-               adapter = of_find_i2c_adapter_by_node(adapter_node);
+               adapter = of_get_i2c_adapter_by_node(adapter_node);
                if (adapter == NULL) {
                        dev_err(&pdev->dev, "failed to parse ddc-i2c-bus\n");
                        omap_dss_put_device(ddata->in);
index 90cbc4c3406c719909f3495cb97533face292d3c..c581231c74a53bb837dcc24da190202ed56cb648 100644 (file)
@@ -898,6 +898,7 @@ static const struct of_device_id acx565akm_of_match[] = {
        { .compatible = "omapdss,sony,acx565akm", },
        {},
 };
+MODULE_DEVICE_TABLE(of, acx565akm_of_match);
 
 static struct spi_driver acx565akm_driver = {
        .driver = {
index 7ed9a227f5eaf006ed5c2a9759ee9db299d114e3..01b43e9ce941acb8751c0c2e8294e19db7ce927c 100644 (file)
@@ -226,7 +226,7 @@ static void blade_image_blit(struct tridentfb_par *par, const char *data,
        writemmr(par, DST1, point(x, y));
        writemmr(par, DST2, point(x + w - 1, y + h - 1));
 
-       memcpy(par->io_virt + 0x10000, data, 4 * size);
+       iowrite32_rep(par->io_virt + 0x10000, data, size);
 }
 
 static void blade_copy_rect(struct tridentfb_par *par,
@@ -673,8 +673,14 @@ static int get_nativex(struct tridentfb_par *par)
 static inline void set_lwidth(struct tridentfb_par *par, int width)
 {
        write3X4(par, VGA_CRTC_OFFSET, width & 0xFF);
-       write3X4(par, AddColReg,
-                (read3X4(par, AddColReg) & 0xCF) | ((width & 0x300) >> 4));
+       /* chips older than TGUI9660 have only 1 width bit in AddColReg */
+       /* touching the other one breaks I2C/DDC */
+       if (par->chip_id == TGUI9440 || par->chip_id == CYBER9320)
+               write3X4(par, AddColReg,
+                    (read3X4(par, AddColReg) & 0xEF) | ((width & 0x100) >> 4));
+       else
+               write3X4(par, AddColReg,
+                    (read3X4(par, AddColReg) & 0xCF) | ((width & 0x300) >> 4));
 }
 
 /* For resolutions smaller than FP resolution stretch */
index 32d8275e4c88485b2b522f56733e90ba614fc7b2..8a1076beecd33aa29891849f5feaa36b42027036 100644 (file)
@@ -210,6 +210,7 @@ struct display_timings *of_get_display_timings(struct device_node *np)
                         */
                        pr_err("%s: error in timing %d\n",
                                of_node_full_name(np), disp->num_timings + 1);
+                       kfree(dt);
                        goto timingfail;
                }
 
index 073bb57adab10ce14e55205eddf0e3de5862be1d..0a793c7930eba290ab50555ea3def8fd1d99203b 100644 (file)
@@ -1075,7 +1075,7 @@ int revalidate_disk(struct gendisk *disk)
 
        if (disk->fops->revalidate_disk)
                ret = disk->fops->revalidate_disk(disk);
-
+       blk_integrity_revalidate(disk);
        bdev = bdget_disk(disk, 0);
        if (!bdev)
                return ret;
index 27aea110e92365e1e91610579369215cc54644ea..c3cc1609025fa3a966c2d5b10f32626214a9e4ef 100644 (file)
@@ -136,5 +136,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
 extern const struct export_operations cifs_export_ops;
 #endif /* CONFIG_CIFS_NFSD_EXPORT */
 
-#define CIFS_VERSION   "2.07"
+#define CIFS_VERSION   "2.08"
 #endif                         /* _CIFSFS_H */
index f621b44cb8009fe87bf631e0a96c941fe63d3408..6b66dd5d15408676ab6510f7ce415164fe5c0571 100644 (file)
@@ -2034,7 +2034,6 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
        struct tcon_link *tlink = NULL;
        struct cifs_tcon *tcon = NULL;
        struct TCP_Server_Info *server;
-       struct cifs_io_parms io_parms;
 
        /*
         * To avoid spurious oplock breaks from server, in the case of
@@ -2056,18 +2055,6 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
                        rc = -ENOSYS;
                cifsFileInfo_put(open_file);
                cifs_dbg(FYI, "SetFSize for attrs rc = %d\n", rc);
-               if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
-                       unsigned int bytes_written;
-
-                       io_parms.netfid = open_file->fid.netfid;
-                       io_parms.pid = open_file->pid;
-                       io_parms.tcon = tcon;
-                       io_parms.offset = 0;
-                       io_parms.length = attrs->ia_size;
-                       rc = CIFSSMBWrite(xid, &io_parms, &bytes_written,
-                                         NULL, NULL, 1);
-                       cifs_dbg(FYI, "Wrt seteof rc %d\n", rc);
-               }
        } else
                rc = -EINVAL;
 
@@ -2093,28 +2080,7 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
        else
                rc = -ENOSYS;
        cifs_dbg(FYI, "SetEOF by path (setattrs) rc = %d\n", rc);
-       if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
-               __u16 netfid;
-               int oplock = 0;
 
-               rc = SMBLegacyOpen(xid, tcon, full_path, FILE_OPEN,
-                                  GENERIC_WRITE, CREATE_NOT_DIR, &netfid,
-                                  &oplock, NULL, cifs_sb->local_nls,
-                                  cifs_remap(cifs_sb));
-               if (rc == 0) {
-                       unsigned int bytes_written;
-
-                       io_parms.netfid = netfid;
-                       io_parms.pid = current->tgid;
-                       io_parms.tcon = tcon;
-                       io_parms.offset = 0;
-                       io_parms.length = attrs->ia_size;
-                       rc = CIFSSMBWrite(xid, &io_parms, &bytes_written, NULL,
-                                         NULL,  1);
-                       cifs_dbg(FYI, "wrt seteof rc %d\n", rc);
-                       CIFSSMBClose(xid, tcon, netfid);
-               }
-       }
        if (tlink)
                cifs_put_tlink(tlink);
 
index ce83e2edbe0a22ae9858ec5a04caa4e2b6ad59d2..597a417ba94d3bb910f52e3f14119a197ff2d090 100644 (file)
@@ -922,7 +922,7 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
        if (tcon && tcon->bad_network_name)
                return -ENOENT;
 
-       if ((tcon->seal) &&
+       if ((tcon && tcon->seal) &&
            ((ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) == 0)) {
                cifs_dbg(VFS, "encryption requested but no server support");
                return -EOPNOTSUPP;
index f93b9cdb4934d17739bf4c6442d79bbfe32dcf13..5133bb18830e8c8b97e68e8f2c55d617ff92a321 100644 (file)
@@ -1458,12 +1458,18 @@ nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state)
        if (delegation)
                delegation_flags = delegation->flags;
        rcu_read_unlock();
-       if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) {
+       switch (data->o_arg.claim) {
+       default:
+               break;
+       case NFS4_OPEN_CLAIM_DELEGATE_CUR:
+       case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
                pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
                                   "returning a delegation for "
                                   "OPEN(CLAIM_DELEGATE_CUR)\n",
                                   clp->cl_hostname);
-       } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
+               return;
+       }
+       if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
                nfs_inode_set_delegation(state->inode,
                                         data->owner->so_cred,
                                         &data->o_res);
@@ -1771,6 +1777,9 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
        if (IS_ERR(opendata))
                return PTR_ERR(opendata);
        nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
+       write_seqlock(&state->seqlock);
+       nfs4_stateid_copy(&state->stateid, &state->open_stateid);
+       write_sequnlock(&state->seqlock);
        clear_bit(NFS_DELEGATED_STATE, &state->flags);
        switch (type & (FMODE_READ|FMODE_WRITE)) {
        case FMODE_READ|FMODE_WRITE:
@@ -1863,6 +1872,8 @@ static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
        data->rpc_done = 0;
        data->rpc_status = 0;
        data->timestamp = jiffies;
+       if (data->is_recover)
+               nfs4_set_sequence_privileged(&data->c_arg.seq_args);
        task = rpc_run_task(&task_setup_data);
        if (IS_ERR(task))
                return PTR_ERR(task);
index 5db324635e920a51923b37c3d22c9d3dee2f6682..d854693a15b0e2443779986552d29d9db3f6cdc2 100644 (file)
@@ -1725,7 +1725,8 @@ restart:
                        if (!test_and_clear_bit(ops->owner_flag_bit,
                                                        &sp->so_flags))
                                continue;
-                       atomic_inc(&sp->so_count);
+                       if (!atomic_inc_not_zero(&sp->so_count))
+                               continue;
                        spin_unlock(&clp->cl_lock);
                        rcu_read_unlock();
 
index 28df12e525bac5857c0d41aba62d558db82f526a..671cf68fe56bed7a457fddd4ccdd5913509ff1bd 100644 (file)
@@ -409,7 +409,7 @@ DECLARE_EVENT_CLASS(nfs4_open_event,
                        __entry->flags = flags;
                        __entry->fmode = (__force unsigned int)ctx->mode;
                        __entry->dev = ctx->dentry->d_sb->s_dev;
-                       if (!IS_ERR(state))
+                       if (!IS_ERR_OR_NULL(state))
                                inode = state->inode;
                        if (inode != NULL) {
                                __entry->fileid = NFS_FILEID(inode);
index 72624dc4a623b894ca0be949c5feab1cec455e02..75ab7622e0cc193bab28f2ba5bb56d37e5f49465 100644 (file)
@@ -569,19 +569,17 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
        if (!nfs_pageio_add_request(pgio, req)) {
                nfs_redirty_request(req);
                ret = pgio->pg_error;
-       }
+       } else
+               nfs_add_stats(page_file_mapping(page)->host,
+                               NFSIOS_WRITEPAGES, 1);
 out:
        return ret;
 }
 
 static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio)
 {
-       struct inode *inode = page_file_mapping(page)->host;
        int ret;
 
-       nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
-       nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
-
        nfs_pageio_cond_complete(pgio, page_file_index(page));
        ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE);
        if (ret == -EAGAIN) {
@@ -597,9 +595,11 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, st
 static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc)
 {
        struct nfs_pageio_descriptor pgio;
+       struct inode *inode = page_file_mapping(page)->host;
        int err;
 
-       nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc),
+       nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
+       nfs_pageio_init_write(&pgio, inode, wb_priority(wbc),
                                false, &nfs_async_write_completion_ops);
        err = nfs_do_writepage(page, wbc, &pgio);
        nfs_pageio_complete(&pgio);
@@ -1223,7 +1223,7 @@ static int nfs_can_extend_write(struct file *file, struct page *page, struct ino
                return 1;
        if (!flctx || (list_empty_careful(&flctx->flc_flock) &&
                       list_empty_careful(&flctx->flc_posix)))
-               return 0;
+               return 1;
 
        /* Check to see if there are whole file write locks */
        ret = 0;
index 5e7d43ab61c000d894164e093132f607344e9cc0..83cc9d4e545518e3cff6ff7d38067102e9398cf8 100644 (file)
@@ -166,7 +166,6 @@ enum {
 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
                                                  struct request_queue *q);
-void blk_mq_finish_init(struct request_queue *q);
 int blk_mq_register_disk(struct gendisk *);
 void blk_mq_unregister_disk(struct gendisk *);
 
index 19c2e947d4d127364887a133d4b0d0ce92090e1c..d045ca8487af17eb2aee07a8aed267f5b74e1b83 100644 (file)
@@ -35,6 +35,7 @@ struct sg_io_hdr;
 struct bsg_job;
 struct blkcg_gq;
 struct blk_flush_queue;
+struct pr_ops;
 
 #define BLKDEV_MIN_RQ  4
 #define BLKDEV_MAX_RQ  128     /* Default maximum */
@@ -369,6 +370,10 @@ struct request_queue {
         */
        struct kobject mq_kobj;
 
+#ifdef  CONFIG_BLK_DEV_INTEGRITY
+       struct blk_integrity integrity;
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
+
 #ifdef CONFIG_PM
        struct device           *dev;
        int                     rpm_status;
@@ -450,7 +455,7 @@ struct request_queue {
 #endif
        struct rcu_head         rcu_head;
        wait_queue_head_t       mq_freeze_wq;
-       struct percpu_ref       mq_usage_counter;
+       struct percpu_ref       q_usage_counter;
        struct list_head        all_q_node;
 
        struct blk_mq_tag_set   *tag_set;
@@ -1462,22 +1467,13 @@ struct blk_integrity_iter {
 
 typedef int (integrity_processing_fn) (struct blk_integrity_iter *);
 
-struct blk_integrity {
-       integrity_processing_fn *generate_fn;
-       integrity_processing_fn *verify_fn;
-
-       unsigned short          flags;
-       unsigned short          tuple_size;
-       unsigned short          interval;
-       unsigned short          tag_size;
-
-       const char              *name;
-
-       struct kobject          kobj;
+struct blk_integrity_profile {
+       integrity_processing_fn         *generate_fn;
+       integrity_processing_fn         *verify_fn;
+       const char                      *name;
 };
 
-extern bool blk_integrity_is_initialized(struct gendisk *);
-extern int blk_integrity_register(struct gendisk *, struct blk_integrity *);
+extern void blk_integrity_register(struct gendisk *, struct blk_integrity *);
 extern void blk_integrity_unregister(struct gendisk *);
 extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
 extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
@@ -1488,15 +1484,20 @@ extern bool blk_integrity_merge_rq(struct request_queue *, struct request *,
 extern bool blk_integrity_merge_bio(struct request_queue *, struct request *,
                                    struct bio *);
 
-static inline
-struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
+static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
 {
-       return bdev->bd_disk->integrity;
+       struct blk_integrity *bi = &disk->queue->integrity;
+
+       if (!bi->profile)
+               return NULL;
+
+       return bi;
 }
 
-static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
+static inline
+struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
 {
-       return disk->integrity;
+       return blk_get_integrity(bdev->bd_disk);
 }
 
 static inline bool blk_integrity_rq(struct request *rq)
@@ -1570,10 +1571,9 @@ static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
 {
        return 0;
 }
-static inline int blk_integrity_register(struct gendisk *d,
+static inline void blk_integrity_register(struct gendisk *d,
                                         struct blk_integrity *b)
 {
-       return 0;
 }
 static inline void blk_integrity_unregister(struct gendisk *d)
 {
@@ -1598,10 +1598,7 @@ static inline bool blk_integrity_merge_bio(struct request_queue *rq,
 {
        return true;
 }
-static inline bool blk_integrity_is_initialized(struct gendisk *g)
-{
-       return 0;
-}
+
 static inline bool integrity_req_gap_back_merge(struct request *req,
                                                struct bio *next)
 {
@@ -1633,6 +1630,7 @@ struct block_device_operations {
        /* this callback is with swap_lock and sometimes page table lock held */
        void (*swap_slot_free_notify) (struct block_device *, unsigned long);
        struct module *owner;
+       const struct pr_ops *pr_ops;
 };
 
 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
index 2adbfa6d02bc4b10ecee8af9c641f177d184157a..847cc1d916348386379c510ee38433e42d4b93d2 100644 (file)
@@ -163,6 +163,18 @@ struct disk_part_tbl {
 
 struct disk_events;
 
+#if defined(CONFIG_BLK_DEV_INTEGRITY)
+
+struct blk_integrity {
+       struct blk_integrity_profile    *profile;
+       unsigned char                   flags;
+       unsigned char                   tuple_size;
+       unsigned char                   interval_exp;
+       unsigned char                   tag_size;
+};
+
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
+
 struct gendisk {
        /* major, first_minor and minors are input parameters only,
         * don't use directly.  Use disk_devt() and disk_max_parts().
@@ -198,8 +210,8 @@ struct gendisk {
        atomic_t sync_io;               /* RAID */
        struct disk_events *ev;
 #ifdef  CONFIG_BLK_DEV_INTEGRITY
-       struct blk_integrity *integrity;
-#endif
+       struct kobject integrity_kobj;
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
        int node_id;
 };
 
@@ -727,6 +739,16 @@ static inline void part_nr_sects_write(struct hd_struct *part, sector_t size)
 #endif
 }
 
+#if defined(CONFIG_BLK_DEV_INTEGRITY)
+extern void blk_integrity_add(struct gendisk *);
+extern void blk_integrity_del(struct gendisk *);
+extern void blk_integrity_revalidate(struct gendisk *);
+#else  /* CONFIG_BLK_DEV_INTEGRITY */
+static inline void blk_integrity_add(struct gendisk *disk) { }
+static inline void blk_integrity_del(struct gendisk *disk) { }
+static inline void blk_integrity_revalidate(struct gendisk *disk) { }
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
+
 #else /* CONFIG_BLOCK */
 
 static inline void printk_all_partitions(void) { }
diff --git a/include/linux/pr.h b/include/linux/pr.h
new file mode 100644 (file)
index 0000000..65c01c1
--- /dev/null
@@ -0,0 +1,18 @@
+#ifndef LINUX_PR_H
+#define LINUX_PR_H
+
+#include <uapi/linux/pr.h>
+
+struct pr_ops {
+       int (*pr_register)(struct block_device *bdev, u64 old_key, u64 new_key,
+                       u32 flags);
+       int (*pr_reserve)(struct block_device *bdev, u64 key,
+                       enum pr_type type, u32 flags);
+       int (*pr_release)(struct block_device *bdev, u64 key,
+                       enum pr_type type);
+       int (*pr_preempt)(struct block_device *bdev, u64 old_key, u64 new_key,
+                       enum pr_type type, bool abort);
+       int (*pr_clear)(struct block_device *bdev, u64 key);
+};
+
+#endif /* LINUX_PR_H */
index 6a8b9942632dccaf6d8ef508d7e31867ac041237..dd8de82cf5b575c8aac692595068264ef0b3dce4 100644 (file)
@@ -14,9 +14,9 @@ struct t10_pi_tuple {
 };
 
 
-extern struct blk_integrity t10_pi_type1_crc;
-extern struct blk_integrity t10_pi_type1_ip;
-extern struct blk_integrity t10_pi_type3_crc;
-extern struct blk_integrity t10_pi_type3_ip;
+extern struct blk_integrity_profile t10_pi_type1_crc;
+extern struct blk_integrity_profile t10_pi_type1_ip;
+extern struct blk_integrity_profile t10_pi_type3_crc;
+extern struct blk_integrity_profile t10_pi_type3_ip;
 
 #endif
diff --git a/include/uapi/linux/pr.h b/include/uapi/linux/pr.h
new file mode 100644 (file)
index 0000000..57d7c0f
--- /dev/null
@@ -0,0 +1,48 @@
+#ifndef _UAPI_PR_H
+#define _UAPI_PR_H
+
+enum pr_type {
+       PR_WRITE_EXCLUSIVE              = 1,
+       PR_EXCLUSIVE_ACCESS             = 2,
+       PR_WRITE_EXCLUSIVE_REG_ONLY     = 3,
+       PR_EXCLUSIVE_ACCESS_REG_ONLY    = 4,
+       PR_WRITE_EXCLUSIVE_ALL_REGS     = 5,
+       PR_EXCLUSIVE_ACCESS_ALL_REGS    = 6,
+};
+
+struct pr_reservation {
+       __u64   key;
+       __u32   type;
+       __u32   flags;
+};
+
+struct pr_registration {
+       __u64   old_key;
+       __u64   new_key;
+       __u32   flags;
+       __u32   __pad;
+};
+
+struct pr_preempt {
+       __u64   old_key;
+       __u64   new_key;
+       __u32   type;
+       __u32   flags;
+};
+
+struct pr_clear {
+       __u64   key;
+       __u32   flags;
+       __u32   __pad;
+};
+
+#define PR_FL_IGNORE_KEY       (1 << 0)        /* ignore existing key */
+
+#define IOC_PR_REGISTER                _IOW('p', 200, struct pr_registration)
+#define IOC_PR_RESERVE         _IOW('p', 201, struct pr_reservation)
+#define IOC_PR_RELEASE         _IOW('p', 202, struct pr_reservation)
+#define IOC_PR_PREEMPT         _IOW('p', 203, struct pr_preempt)
+#define IOC_PR_PREEMPT_ABORT   _IOW('p', 204, struct pr_preempt)
+#define IOC_PR_CLEAR           _IOW('p', 205, struct pr_clear)
+
+#endif /* _UAPI_PR_H */
index 9ce083960a2575df0bd2a4e31ac3c8b881012880..f18490985fc8e5f39d10ed442d302293ac0e7699 100644 (file)
@@ -107,5 +107,13 @@ struct sched_watchdog {
 #define SHUTDOWN_suspend    2  /* Clean up, save suspend info, kill.         */
 #define SHUTDOWN_crash      3  /* Tell controller we've crashed.             */
 #define SHUTDOWN_watchdog   4  /* Restart because watchdog time expired.     */
+/*
+ * Domain asked to perform 'soft reset' for it. The expected behavior is to
+ * reset internal Xen state for the domain returning it to the point where it
+ * was created but leaving the domain's memory contents and vCPU contexts
+ * intact. This will allow the domain to start over and set up all Xen specific
+ * interfaces again.
+ */
+#define SHUTDOWN_soft_reset 5
 
 #endif /* __XEN_PUBLIC_SCHED_H__ */
index 8dbb7b1eab508712da538d0ef7cadc0e24b6c723..84775ba873b9efd978fa006be56e58057b34031f 100644 (file)
@@ -203,12 +203,13 @@ ssize_t strscpy(char *dest, const char *src, size_t count)
                unsigned long c, data;
 
                c = *(unsigned long *)(src+res);
-               *(unsigned long *)(dest+res) = c;
                if (has_zero(c, &data, &constants)) {
                        data = prep_zero_mask(c, data, &constants);
                        data = create_zero_mask(data);
+                       *(unsigned long *)(dest+res) = c & zero_bytemask(data);
                        return res + find_zero(data);
                }
+               *(unsigned long *)(dest+res) = c;
                res += sizeof(unsigned long);
                count -= sizeof(unsigned long);
                max -= sizeof(unsigned long);
index 72940fb38666811b80c146bc085a1c84fc0e7ecc..1cc5467cf36ce7852f7a0474d5fd3237b3dfff10 100644 (file)
@@ -2473,6 +2473,21 @@ ssize_t generic_perform_write(struct file *file,
                                                iov_iter_count(i));
 
 again:
+               /*
+                * Bring in the user page that we will copy from _first_.
+                * Otherwise there's a nasty deadlock on copying from the
+                * same page as we're writing to, without it being marked
+                * up-to-date.
+                *
+                * Not only is this an optimisation, but it is also required
+                * to check that the address is actually valid, when atomic
+                * usercopies are used, below.
+                */
+               if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
+                       status = -EFAULT;
+                       break;
+               }
+
                status = a_ops->write_begin(file, mapping, pos, bytes, flags,
                                                &page, &fsdata);
                if (unlikely(status < 0))
@@ -2480,17 +2495,8 @@ again:
 
                if (mapping_writably_mapped(mapping))
                        flush_dcache_page(page);
-               /*
-                * 'page' is now locked.  If we are trying to copy from a
-                * mapping of 'page' in userspace, the copy might fault and
-                * would need PageUptodate() to complete.  But, page can not be
-                * made Uptodate without acquiring the page lock, which we hold.
-                * Deadlock.  Avoid with pagefault_disable().  Fix up below with
-                * iov_iter_fault_in_readable().
-                */
-               pagefault_disable();
+
                copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
-               pagefault_enable();
                flush_dcache_page(page);
 
                status = a_ops->write_end(file, mapping, pos, bytes, copied,
@@ -2513,14 +2519,6 @@ again:
                         */
                        bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
                                                iov_iter_single_seg_count(i));
-                       /*
-                        * This is the fallback to recover if the copy from
-                        * userspace above faults.
-                        */
-                       if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
-                               status = -EFAULT;
-                               break;
-                       }
                        goto again;
                }
                pos += copied;
index 64443eb754ad0fe7fd0b16633c3aa10cebdc3e26..41e452bc580c0fea0f39fe71924b72dcdff6782f 100644 (file)
@@ -270,8 +270,8 @@ xprt_rdma_destroy(struct rpc_xprt *xprt)
 
        xprt_clear_connected(xprt);
 
-       rpcrdma_buffer_destroy(&r_xprt->rx_buf);
        rpcrdma_ep_destroy(&r_xprt->rx_ep, &r_xprt->rx_ia);
+       rpcrdma_buffer_destroy(&r_xprt->rx_buf);
        rpcrdma_ia_close(&r_xprt->rx_ia);
 
        xprt_rdma_free_addresses(xprt);
index eb081ad05e33bb65a89b4afb499177dff4d2de89..8a477e27bad75f4b9a6c8feeceb745ffe9407c71 100644 (file)
@@ -755,19 +755,22 @@ rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
 
        cancel_delayed_work_sync(&ep->rep_connect_worker);
 
-       if (ia->ri_id->qp) {
+       if (ia->ri_id->qp)
                rpcrdma_ep_disconnect(ep, ia);
+
+       rpcrdma_clean_cq(ep->rep_attr.recv_cq);
+       rpcrdma_clean_cq(ep->rep_attr.send_cq);
+
+       if (ia->ri_id->qp) {
                rdma_destroy_qp(ia->ri_id);
                ia->ri_id->qp = NULL;
        }
 
-       rpcrdma_clean_cq(ep->rep_attr.recv_cq);
        rc = ib_destroy_cq(ep->rep_attr.recv_cq);
        if (rc)
                dprintk("RPC:       %s: ib_destroy_cq returned %i\n",
                        __func__, rc);
 
-       rpcrdma_clean_cq(ep->rep_attr.send_cq);
        rc = ib_destroy_cq(ep->rep_attr.send_cq);
        if (rc)
                dprintk("RPC:       %s: ib_destroy_cq returned %i\n",