]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'v4l_for_linus' of git://linuxtv.org/mchehab/for_linus
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 11 Sep 2011 21:58:47 +0000 (14:58 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 11 Sep 2011 21:58:47 +0000 (14:58 -0700)
* 'v4l_for_linus' of git://linuxtv.org/mchehab/for_linus:
  [media] vp7045: fix buffer setup
  [media] nuvoton-cir: simplify raw IR sample handling
  [media] [Resend] viacam: Don't explode if pci_find_bus() returns NULL
  [media] v4l2: Fix documentation of the codec device controls
  [media] gspca - sonixj: Fix the darkness of sensor om6802 in 320x240
  [media] gspca - sonixj: Fix wrong register mask for sensor om6802
  [media] gspca - ov519: Fix LED inversion of some ov519 webcams
  [media] pwc: precedence bug in pwc_init_controls()

68 files changed:
Documentation/hwmon/max16065
arch/arm/include/asm/hardware/cache-l2x0.h
arch/arm/mach-integrator/integrator_ap.c
arch/arm/mm/abort-macro.S
arch/arm/mm/cache-l2x0.c
arch/arm/mm/init.c
arch/openrisc/include/asm/dma-mapping.h
arch/openrisc/include/asm/sigcontext.h
arch/openrisc/kernel/dma.c
arch/openrisc/kernel/signal.c
arch/x86/include/asm/pvclock.h
arch/x86/kernel/cpu/perf_event.c
arch/x86/pci/acpi.c
arch/x86/xen/setup.c
arch/x86/xen/smp.c
arch/x86/xen/xen-asm_32.S
drivers/base/regmap/regmap.c
drivers/dma/ste_dma40.c
drivers/hid/hid-ids.h
drivers/hid/hid-magicmouse.c
drivers/hid/hid-wacom.c
drivers/hid/usbhid/hid-quirks.c
drivers/hwmon/max16065.c
drivers/hwmon/pmbus/ucd9000.c
drivers/hwmon/pmbus/ucd9200.c
drivers/i2c/busses/i2c-pxa-pci.c
drivers/i2c/busses/i2c-tegra.c
drivers/iommu/amd_iommu.c
drivers/md/md.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/mmc/core/core.c
drivers/mmc/core/host.c
drivers/mmc/core/host.h
drivers/mmc/core/sd.c
drivers/mmc/host/sdhci-esdhc-imx.c
drivers/mmc/host/sdhci-s3c.c
drivers/mmc/host/sh_mobile_sdhi.c
drivers/mtd/ubi/debug.h
drivers/net/arm/am79c961a.c
drivers/pci/hotplug/pcihp_slot.c
drivers/pci/pci.c
drivers/pci/probe.c
drivers/rtc/rtc-ep93xx.c
drivers/rtc/rtc-lib.c
drivers/rtc/rtc-twl.c
drivers/scsi/qla4xxx/Kconfig
drivers/video/backlight/backlight.c
fs/9p/v9fs_vfs.h
fs/9p/vfs_file.c
fs/9p/vfs_inode.c
fs/9p/vfs_inode_dotl.c
fs/9p/vfs_super.c
fs/block_dev.c
fs/ceph/mds_client.c
fs/ceph/super.c
fs/namei.c
fs/ubifs/debug.h
include/linux/perf_event.h
include/linux/regulator/consumer.h
include/net/9p/9p.h
include/net/cfg80211.h
kernel/events/core.c
kernel/sched.c
kernel/time/alarmtimer.c
net/9p/trans_virtio.c
net/ceph/msgpool.c
net/ceph/osd_client.c

index 44b4f61e04f9e3195c32602f77b2fd310dbbf980..c11f64a1f2adb61077f5910215981d589503aa0f 100644 (file)
@@ -62,6 +62,13 @@ can be safely used to identify the chip. You will have to instantiate
 the devices explicitly. Please see Documentation/i2c/instantiating-devices for
 details.
 
+WARNING: Do not access chip registers using the i2cdump command, and do not use
+any of the i2ctools commands on a command register (0xa5 to 0xac). The chips
+supported by this driver interpret any access to a command register (including
+read commands) as request to execute the command in question. This may result in
+power loss, board resets, and/or Flash corruption. Worst case, your board may
+turn into a brick.
+
 
 Sysfs entries
 -------------
index bfa706ffd9683966dec63406c35e2e71942bd294..99a6ed7e1bfd5ce6395ca7a900079ada255da281 100644 (file)
 #define L2X0_CLEAN_INV_LINE_PA         0x7F0
 #define L2X0_CLEAN_INV_LINE_IDX                0x7F8
 #define L2X0_CLEAN_INV_WAY             0x7FC
-#define L2X0_LOCKDOWN_WAY_D            0x900
-#define L2X0_LOCKDOWN_WAY_I            0x904
+/*
+ * The lockdown registers repeat 8 times for L310, the L210 has only one
+ * D and one I lockdown register at 0x0900 and 0x0904.
+ */
+#define L2X0_LOCKDOWN_WAY_D_BASE       0x900
+#define L2X0_LOCKDOWN_WAY_I_BASE       0x904
+#define L2X0_LOCKDOWN_STRIDE           0x08
 #define L2X0_TEST_OPERATION            0xF00
 #define L2X0_LINE_DATA                 0xF10
 #define L2X0_LINE_TAG                  0xF30
index 2fbbdd5eac352bb4df44611894e4220e029f7903..fcf0ae95651fd3c0fa3ce9f17f3c4b7d3cc55d15 100644 (file)
@@ -337,15 +337,15 @@ static unsigned long timer_reload;
 static void integrator_clocksource_init(u32 khz)
 {
        void __iomem *base = (void __iomem *)TIMER2_VA_BASE;
-       u32 ctrl = TIMER_CTRL_ENABLE;
+       u32 ctrl = TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC;
 
        if (khz >= 1500) {
                khz /= 16;
-               ctrl = TIMER_CTRL_DIV16;
+               ctrl |= TIMER_CTRL_DIV16;
        }
 
-       writel(ctrl, base + TIMER_CTRL);
        writel(0xffff, base + TIMER_LOAD);
+       writel(ctrl, base + TIMER_CTRL);
 
        clocksource_mmio_init(base + TIMER_VALUE, "timer2",
                khz * 1000, 200, 16, clocksource_mmio_readl_down);
index 52162d59407a4679eee5fce0f4d848a62b2ea182..2cbf68ef0e8321121e5ecabb55f50f95083beb1d 100644 (file)
@@ -17,7 +17,7 @@
        cmp     \tmp, # 0x5600                  @ Is it ldrsb?
        orreq   \tmp, \tmp, #1 << 11            @ Set L-bit if yes
        tst     \tmp, #1 << 11                  @ L = 0 -> write
-       orreq   \psr, \psr, #1 << 11            @ yes.
+       orreq   \fsr, \fsr, #1 << 11            @ yes.
        b       do_DataAbort
 not_thumb:
        .endm
index 44c086710d2ba5d21a40741290ef8c40de35928c..9ecfdb5119513b632154f9812b4cd490de24efc5 100644 (file)
@@ -277,6 +277,25 @@ static void l2x0_disable(void)
        spin_unlock_irqrestore(&l2x0_lock, flags);
 }
 
+static void __init l2x0_unlock(__u32 cache_id)
+{
+       int lockregs;
+       int i;
+
+       if (cache_id == L2X0_CACHE_ID_PART_L310)
+               lockregs = 8;
+       else
+               /* L210 and unknown types */
+               lockregs = 1;
+
+       for (i = 0; i < lockregs; i++) {
+               writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE +
+                              i * L2X0_LOCKDOWN_STRIDE);
+               writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE +
+                              i * L2X0_LOCKDOWN_STRIDE);
+       }
+}
+
 void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
 {
        __u32 aux;
@@ -328,6 +347,8 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
         * accessing the below registers will fault.
         */
        if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
+               /* Make sure that I&D is not locked down when starting */
+               l2x0_unlock(cache_id);
 
                /* l2x0 controller is disabled */
                writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
index 91bca355cd3105462bdb4c6756d094910ecd0c81..cc7e2d8be9aa6f55cd1f670c998e1c065acec75e 100644 (file)
@@ -298,7 +298,7 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
 #ifdef CONFIG_HAVE_ARCH_PFN_VALID
 int pfn_valid(unsigned long pfn)
 {
-       return memblock_is_memory(pfn << PAGE_SHIFT);
+       return memblock_is_memory(__pfn_to_phys(pfn));
 }
 EXPORT_SYMBOL(pfn_valid);
 #endif
index 052f877b52a53d63e58fae5f1fe713164e5f9f59..60b472233900c4882b8f66f1339118183e298d42 100644 (file)
@@ -31,7 +31,6 @@
 
 #define DMA_ERROR_CODE         (~(dma_addr_t)0x0)
 
-int dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
 
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
@@ -47,6 +46,12 @@ dma_addr_t or1k_map_page(struct device *dev, struct page *page,
 void or1k_unmap_page(struct device *dev, dma_addr_t dma_handle,
                     size_t size, enum dma_data_direction dir,
                     struct dma_attrs *attrs);
+int or1k_map_sg(struct device *dev, struct scatterlist *sg,
+               int nents, enum dma_data_direction dir,
+               struct dma_attrs *attrs);
+void or1k_unmap_sg(struct device *dev, struct scatterlist *sg,
+                  int nents, enum dma_data_direction dir,
+                  struct dma_attrs *attrs);
 void or1k_sync_single_for_cpu(struct device *dev,
                              dma_addr_t dma_handle, size_t size,
                              enum dma_data_direction dir);
@@ -98,6 +103,51 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t addr,
        debug_dma_unmap_page(dev, addr, size, dir, true);
 }
 
+static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
+                                  int nents, enum dma_data_direction dir)
+{
+       int i, ents;
+       struct scatterlist *s;
+
+       for_each_sg(sg, s, nents, i)
+               kmemcheck_mark_initialized(sg_virt(s), s->length);
+       BUG_ON(!valid_dma_direction(dir));
+       ents = or1k_map_sg(dev, sg, nents, dir, NULL);
+       debug_dma_map_sg(dev, sg, nents, ents, dir);
+
+       return ents;
+}
+
+static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+                                     int nents, enum dma_data_direction dir)
+{
+       BUG_ON(!valid_dma_direction(dir));
+       debug_dma_unmap_sg(dev, sg, nents, dir);
+       or1k_unmap_sg(dev, sg, nents, dir, NULL);
+}
+
+static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
+                                     size_t offset, size_t size,
+                                     enum dma_data_direction dir)
+{
+       dma_addr_t addr;
+
+       kmemcheck_mark_initialized(page_address(page) + offset, size);
+       BUG_ON(!valid_dma_direction(dir));
+       addr = or1k_map_page(dev, page, offset, size, dir, NULL);
+       debug_dma_map_page(dev, page, offset, size, dir, addr, false);
+
+       return addr;
+}
+
+static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
+                                 size_t size, enum dma_data_direction dir)
+{
+       BUG_ON(!valid_dma_direction(dir));
+       or1k_unmap_page(dev, addr, size, dir, NULL);
+       debug_dma_unmap_page(dev, addr, size, dir, true);
+}
+
 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
                                           size_t size,
                                           enum dma_data_direction dir)
@@ -119,7 +169,12 @@ static inline void dma_sync_single_for_device(struct device *dev,
 static inline int dma_supported(struct device *dev, u64 dma_mask)
 {
        /* Support 32 bit DMA mask exclusively */
-       return dma_mask == 0xffffffffULL;
+       return dma_mask == DMA_BIT_MASK(32);
+}
+
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+       return 0;
 }
 
 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
index 54a5c50132e35e4000e45f874c6c1618894b9f9f..b79c2b19afbe6cfe9f4327535e257e12c9aaa29f 100644 (file)
 
 /* This struct is saved by setup_frame in signal.c, to keep the current
    context while a signal handler is executed. It's restored by sys_sigreturn.
-
-   To keep things simple, we use pt_regs here even though normally you just
-   specify the list of regs to save. Then we can use copy_from_user on the
-   entire regs instead of a bunch of get_user's as well...
 */
 
 struct sigcontext {
-       struct pt_regs regs;  /* needs to be first */
+       struct user_regs_struct regs;  /* needs to be first */
        unsigned long oldmask;
-       unsigned long usp;    /* usp before stacking this gunk on it */
 };
 
 #endif /* __ASM_OPENRISC_SIGCONTEXT_H */
index 968d3ee477e331bc7be928a96b5768311e16d676..f1c8ee2895d0f26f51a634b913ee5f61d5a059d7 100644 (file)
@@ -154,6 +154,33 @@ void or1k_unmap_page(struct device *dev, dma_addr_t dma_handle,
        /* Nothing special to do here... */
 }
 
+int or1k_map_sg(struct device *dev, struct scatterlist *sg,
+               int nents, enum dma_data_direction dir,
+               struct dma_attrs *attrs)
+{
+       struct scatterlist *s;
+       int i;
+
+       for_each_sg(sg, s, nents, i) {
+               s->dma_address = or1k_map_page(dev, sg_page(s), s->offset,
+                                              s->length, dir, NULL);
+       }
+
+       return nents;
+}
+
+void or1k_unmap_sg(struct device *dev, struct scatterlist *sg,
+                  int nents, enum dma_data_direction dir,
+                  struct dma_attrs *attrs)
+{
+       struct scatterlist *s;
+       int i;
+
+       for_each_sg(sg, s, nents, i) {
+               or1k_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, NULL);
+       }
+}
+
 void or1k_sync_single_for_cpu(struct device *dev,
                              dma_addr_t dma_handle, size_t size,
                              enum dma_data_direction dir)
@@ -187,5 +214,4 @@ static int __init dma_init(void)
 
        return 0;
 }
-
 fs_initcall(dma_init);
index 5f759c76834eee10dd821edc8a0b560d6d3931df..95207ab0c99ed536016e88b8be1a2b077ec3bae5 100644 (file)
@@ -52,31 +52,25 @@ struct rt_sigframe {
 static int restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
 {
        unsigned int err = 0;
-       unsigned long old_usp;
 
        /* Alwys make any pending restarted system call return -EINTR */
        current_thread_info()->restart_block.fn = do_no_restart_syscall;
 
-       /* restore the regs from &sc->regs (same as sc, since regs is first)
+       /*
+        * Restore the regs from &sc->regs.
         * (sc is already checked for VERIFY_READ since the sigframe was
         *  checked in sys_sigreturn previously)
         */
-
-       if (__copy_from_user(regs, sc, sizeof(struct pt_regs)))
+       if (__copy_from_user(regs, sc->regs.gpr, 32 * sizeof(unsigned long)))
+               goto badframe;
+       if (__copy_from_user(&regs->pc, &sc->regs.pc, sizeof(unsigned long)))
+               goto badframe;
+       if (__copy_from_user(&regs->sr, &sc->regs.sr, sizeof(unsigned long)))
                goto badframe;
 
        /* make sure the SM-bit is cleared so user-mode cannot fool us */
        regs->sr &= ~SPR_SR_SM;
 
-       /* restore the old USP as it was before we stacked the sc etc.
-        * (we cannot just pop the sigcontext since we aligned the sp and
-        *  stuff after pushing it)
-        */
-
-       err |= __get_user(old_usp, &sc->usp);
-
-       regs->sp = old_usp;
-
        /* TODO: the other ports use regs->orig_XX to disable syscall checks
         * after this completes, but we don't use that mechanism. maybe we can
         * use it now ?
@@ -137,18 +131,17 @@ static int setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
                            unsigned long mask)
 {
        int err = 0;
-       unsigned long usp = regs->sp;
 
-       /* copy the regs. they are first in sc so we can use sc directly */
+       /* copy the regs */
 
-       err |= __copy_to_user(sc, regs, sizeof(struct pt_regs));
+       err |= __copy_to_user(sc->regs.gpr, regs, 32 * sizeof(unsigned long));
+       err |= __copy_to_user(&sc->regs.pc, &regs->pc, sizeof(unsigned long));
+       err |= __copy_to_user(&sc->regs.sr, &regs->sr, sizeof(unsigned long));
 
        /* then some other stuff */
 
        err |= __put_user(mask, &sc->oldmask);
 
-       err |= __put_user(usp, &sc->usp);
-
        return err;
 }
 
index a518c0a4504465e6ac46068ba6d34a68d66a6fd3..c59cc97fe6c1478225863d0d814bece995652f5a 100644 (file)
@@ -44,7 +44,7 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
                : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
 #elif defined(__x86_64__)
        __asm__ (
-               "mul %[mul_frac] ; shrd $32, %[hi], %[lo]"
+               "mulq %[mul_frac] ; shrd $32, %[hi], %[lo]"
                : [lo]"=a"(product),
                  [hi]"=d"(tmp)
                : "0"(delta),
index 4ee3abf20ed6118a45e0e6068962f4c181eeecaf..cfa62ec090ece1dfb48420c5641cdb52b58feffa 100644 (file)
@@ -1900,6 +1900,9 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
 
        perf_callchain_store(entry, regs->ip);
 
+       if (!current->mm)
+               return;
+
        if (perf_callchain_user32(regs, entry))
                return;
 
index c95330267f08cfee41a27a0c94675d5779776f86..039d91315bc56bfcf6b8693c5ea3890ba7de4363 100644 (file)
@@ -365,8 +365,13 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_pci_root *root)
         */
        if (bus) {
                struct pci_bus *child;
-               list_for_each_entry(child, &bus->children, node)
-                       pcie_bus_configure_settings(child, child->self->pcie_mpss);
+               list_for_each_entry(child, &bus->children, node) {
+                       struct pci_dev *self = child->self;
+                       if (!self)
+                               continue;
+
+                       pcie_bus_configure_settings(child, self->pcie_mpss);
+               }
        }
 
        if (!bus)
index df118a825f395cbb4e79b9282594caa77aa6a4ca..c3b8d440873caa2986b623d329fec8d88c111435 100644 (file)
@@ -184,6 +184,19 @@ static unsigned long __init xen_set_identity(const struct e820entry *list,
                                        PFN_UP(start_pci), PFN_DOWN(last));
        return identity;
 }
+
+static unsigned long __init xen_get_max_pages(void)
+{
+       unsigned long max_pages = MAX_DOMAIN_PAGES;
+       domid_t domid = DOMID_SELF;
+       int ret;
+
+       ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
+       if (ret > 0)
+               max_pages = ret;
+       return min(max_pages, MAX_DOMAIN_PAGES);
+}
+
 /**
  * machine_specific_memory_setup - Hook for machine specific memory setup.
  **/
@@ -292,6 +305,12 @@ char * __init xen_memory_setup(void)
 
        sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
 
+       extra_limit = xen_get_max_pages();
+       if (extra_limit >= max_pfn)
+               extra_pages = extra_limit - max_pfn;
+       else
+               extra_pages = 0;
+
        extra_pages += xen_return_unused_memory(xen_start_info->nr_pages, &e820);
 
        /*
index e79dbb95482b84f07f01506ca3692ae8ee63ca22..d4fc6d454f8d0dced29f592dd0b33407c5088b92 100644 (file)
@@ -32,6 +32,7 @@
 #include <xen/page.h>
 #include <xen/events.h>
 
+#include <xen/hvc-console.h>
 #include "xen-ops.h"
 #include "mmu.h"
 
@@ -207,6 +208,15 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
        unsigned cpu;
        unsigned int i;
 
+       if (skip_ioapic_setup) {
+               char *m = (max_cpus == 0) ?
+                       "The nosmp parameter is incompatible with Xen; " \
+                       "use Xen dom0_max_vcpus=1 parameter" :
+                       "The noapic parameter is incompatible with Xen";
+
+               xen_raw_printk(m);
+               panic(m);
+       }
        xen_init_lock_cpu(0);
 
        smp_store_cpu_info(0);
index 22a2093b58623cca3472a03c3950cad4395a884d..b040b0e518caf0a0fe382999b36c2c04e7344860 100644 (file)
@@ -113,11 +113,13 @@ xen_iret_start_crit:
 
        /*
         * If there's something pending, mask events again so we can
-        * jump back into xen_hypervisor_callback
+        * jump back into xen_hypervisor_callback. Otherwise do not
+        * touch XEN_vcpu_info_mask.
         */
-       sete XEN_vcpu_info_mask(%eax)
+       jne 1f
+       movb $1, XEN_vcpu_info_mask(%eax)
 
-       popl %eax
+1:     popl %eax
 
        /*
         * From this point on the registers are restored and the stack
index 0eef4da1ac61f1deb274e56fb5601cfc6275c193..20663f8dae45a3674e6ce04c84a54c5e07361151 100644 (file)
@@ -168,13 +168,11 @@ struct regmap *regmap_init(struct device *dev,
        map->work_buf = kmalloc(map->format.buf_size, GFP_KERNEL);
        if (map->work_buf == NULL) {
                ret = -ENOMEM;
-               goto err_bus;
+               goto err_map;
        }
 
        return map;
 
-err_bus:
-       module_put(map->bus->owner);
 err_map:
        kfree(map);
 err:
@@ -188,7 +186,6 @@ EXPORT_SYMBOL_GPL(regmap_init);
 void regmap_exit(struct regmap *map)
 {
        kfree(map->work_buf);
-       module_put(map->bus->owner);
        kfree(map);
 }
 EXPORT_SYMBOL_GPL(regmap_exit);
index cd3a7c726bf87bef330f882981afc2fc42be964e..467e4dcb20a012cadfcc10bc616b25dbe11d373a 100644 (file)
@@ -174,8 +174,10 @@ struct d40_base;
  * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
  * transfer and call client callback.
  * @client: Cliented owned descriptor list.
+ * @pending_queue: Submitted jobs, to be issued by issue_pending()
  * @active: Active descriptor.
  * @queue: Queued jobs.
+ * @prepare_queue: Prepared jobs.
  * @dma_cfg: The client configuration of this dma channel.
  * @configured: whether the dma_cfg configuration is valid
  * @base: Pointer to the device instance struct.
@@ -203,6 +205,7 @@ struct d40_chan {
        struct list_head                 pending_queue;
        struct list_head                 active;
        struct list_head                 queue;
+       struct list_head                 prepare_queue;
        struct stedma40_chan_cfg         dma_cfg;
        bool                             configured;
        struct d40_base                 *base;
@@ -477,7 +480,6 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
 
                list_for_each_entry_safe(d, _d, &d40c->client, node)
                        if (async_tx_test_ack(&d->txd)) {
-                               d40_pool_lli_free(d40c, d);
                                d40_desc_remove(d);
                                desc = d;
                                memset(desc, 0, sizeof(*desc));
@@ -644,8 +646,11 @@ static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
        return d;
 }
 
+/* remove desc from current queue and add it to the pending_queue */
 static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
 {
+       d40_desc_remove(desc);
+       desc->is_in_client_list = false;
        list_add_tail(&desc->node, &d40c->pending_queue);
 }
 
@@ -803,6 +808,7 @@ done:
 static void d40_term_all(struct d40_chan *d40c)
 {
        struct d40_desc *d40d;
+       struct d40_desc *_d;
 
        /* Release active descriptors */
        while ((d40d = d40_first_active_get(d40c))) {
@@ -822,6 +828,21 @@ static void d40_term_all(struct d40_chan *d40c)
                d40_desc_free(d40c, d40d);
        }
 
+       /* Release client owned descriptors */
+       if (!list_empty(&d40c->client))
+               list_for_each_entry_safe(d40d, _d, &d40c->client, node) {
+                       d40_desc_remove(d40d);
+                       d40_desc_free(d40c, d40d);
+               }
+
+       /* Release descriptors in prepare queue */
+       if (!list_empty(&d40c->prepare_queue))
+               list_for_each_entry_safe(d40d, _d,
+                                        &d40c->prepare_queue, node) {
+                       d40_desc_remove(d40d);
+                       d40_desc_free(d40c, d40d);
+               }
+
        d40c->pending_tx = 0;
        d40c->busy = false;
 }
@@ -1208,7 +1229,6 @@ static void dma_tasklet(unsigned long data)
 
        if (!d40d->cyclic) {
                if (async_tx_test_ack(&d40d->txd)) {
-                       d40_pool_lli_free(d40c, d40d);
                        d40_desc_remove(d40d);
                        d40_desc_free(d40c, d40d);
                } else {
@@ -1595,21 +1615,10 @@ static int d40_free_dma(struct d40_chan *d40c)
        u32 event;
        struct d40_phy_res *phy = d40c->phy_chan;
        bool is_src;
-       struct d40_desc *d;
-       struct d40_desc *_d;
-
 
        /* Terminate all queued and active transfers */
        d40_term_all(d40c);
 
-       /* Release client owned descriptors */
-       if (!list_empty(&d40c->client))
-               list_for_each_entry_safe(d, _d, &d40c->client, node) {
-                       d40_pool_lli_free(d40c, d);
-                       d40_desc_remove(d);
-                       d40_desc_free(d40c, d);
-               }
-
        if (phy == NULL) {
                chan_err(d40c, "phy == null\n");
                return -EINVAL;
@@ -1911,6 +1920,12 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
                goto err;
        }
 
+       /*
+        * add descriptor to the prepare queue in order to be able
+        * to free them later in terminate_all
+        */
+       list_add_tail(&desc->node, &chan->prepare_queue);
+
        spin_unlock_irqrestore(&chan->lock, flags);
 
        return &desc->txd;
@@ -2400,6 +2415,7 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
                INIT_LIST_HEAD(&d40c->queue);
                INIT_LIST_HEAD(&d40c->pending_queue);
                INIT_LIST_HEAD(&d40c->client);
+               INIT_LIST_HEAD(&d40c->prepare_queue);
 
                tasklet_init(&d40c->tasklet, dma_tasklet,
                             (unsigned long) d40c);
index 7d27d2b0445ac07f546e818dced2ce7125e189be..7484e1b67249578a9e58ec9e3dcdf32bc6d84315 100644 (file)
 #define USB_DEVICE_ID_PENPOWER         0x00f4
 
 #define USB_VENDOR_ID_GREENASIA                0x0e8f
+#define USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD        0x3013
 
 #define USB_VENDOR_ID_GRETAGMACBETH    0x0971
 #define USB_DEVICE_ID_GRETAGMACBETH_HUEY       0x2005
index 0ec91c18a4216a52a4a3c7d85292a22f61a6e828..f0fbd7bd239e389e11e21d55ef108b5e8066984d 100644 (file)
@@ -81,6 +81,28 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie
 #define NO_TOUCHES -1
 #define SINGLE_TOUCH_UP -2
 
+/* Touch surface information. Dimension is in hundredths of a mm, min and max
+ * are in units. */
+#define MOUSE_DIMENSION_X (float)9056
+#define MOUSE_MIN_X -1100
+#define MOUSE_MAX_X 1258
+#define MOUSE_RES_X ((MOUSE_MAX_X - MOUSE_MIN_X) / (MOUSE_DIMENSION_X / 100))
+#define MOUSE_DIMENSION_Y (float)5152
+#define MOUSE_MIN_Y -1589
+#define MOUSE_MAX_Y 2047
+#define MOUSE_RES_Y ((MOUSE_MAX_Y - MOUSE_MIN_Y) / (MOUSE_DIMENSION_Y / 100))
+
+#define TRACKPAD_DIMENSION_X (float)13000
+#define TRACKPAD_MIN_X -2909
+#define TRACKPAD_MAX_X 3167
+#define TRACKPAD_RES_X \
+       ((TRACKPAD_MAX_X - TRACKPAD_MIN_X) / (TRACKPAD_DIMENSION_X / 100))
+#define TRACKPAD_DIMENSION_Y (float)11000
+#define TRACKPAD_MIN_Y -2456
+#define TRACKPAD_MAX_Y 2565
+#define TRACKPAD_RES_Y \
+       ((TRACKPAD_MAX_Y - TRACKPAD_MIN_Y) / (TRACKPAD_DIMENSION_Y / 100))
+
 /**
  * struct magicmouse_sc - Tracks Magic Mouse-specific data.
  * @input: Input device through which we report events.
@@ -406,17 +428,31 @@ static void magicmouse_setup_input(struct input_dev *input, struct hid_device *h
                 * inverse of the reported Y.
                 */
                if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
-                       input_set_abs_params(input, ABS_MT_POSITION_X, -1100,
-                               1358, 4, 0);
-                       input_set_abs_params(input, ABS_MT_POSITION_Y, -1589,
-                               2047, 4, 0);
+                       input_set_abs_params(input, ABS_MT_POSITION_X,
+                               MOUSE_MIN_X, MOUSE_MAX_X, 4, 0);
+                       input_set_abs_params(input, ABS_MT_POSITION_Y,
+                               MOUSE_MIN_Y, MOUSE_MAX_Y, 4, 0);
+
+                       input_abs_set_res(input, ABS_MT_POSITION_X,
+                               MOUSE_RES_X);
+                       input_abs_set_res(input, ABS_MT_POSITION_Y,
+                               MOUSE_RES_Y);
                } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
-                       input_set_abs_params(input, ABS_X, -2909, 3167, 4, 0);
-                       input_set_abs_params(input, ABS_Y, -2456, 2565, 4, 0);
-                       input_set_abs_params(input, ABS_MT_POSITION_X, -2909,
-                               3167, 4, 0);
-                       input_set_abs_params(input, ABS_MT_POSITION_Y, -2456,
-                               2565, 4, 0);
+                       input_set_abs_params(input, ABS_X, TRACKPAD_MIN_X,
+                               TRACKPAD_MAX_X, 4, 0);
+                       input_set_abs_params(input, ABS_Y, TRACKPAD_MIN_Y,
+                               TRACKPAD_MAX_Y, 4, 0);
+                       input_set_abs_params(input, ABS_MT_POSITION_X,
+                               TRACKPAD_MIN_X, TRACKPAD_MAX_X, 4, 0);
+                       input_set_abs_params(input, ABS_MT_POSITION_Y,
+                               TRACKPAD_MIN_Y, TRACKPAD_MAX_Y, 4, 0);
+
+                       input_abs_set_res(input, ABS_X, TRACKPAD_RES_X);
+                       input_abs_set_res(input, ABS_Y, TRACKPAD_RES_Y);
+                       input_abs_set_res(input, ABS_MT_POSITION_X,
+                               TRACKPAD_RES_X);
+                       input_abs_set_res(input, ABS_MT_POSITION_Y,
+                               TRACKPAD_RES_Y);
                }
 
                input_set_events_per_packet(input, 60);
@@ -501,9 +537,17 @@ static int magicmouse_probe(struct hid_device *hdev,
        }
        report->size = 6;
 
+       /*
+        * Some devices repond with 'invalid report id' when feature
+        * report switching it into multitouch mode is sent to it.
+        *
+        * This results in -EIO from the _raw low-level transport callback,
+        * but there seems to be no other way of switching the mode.
+        * Thus the super-ugly hacky success check below.
+        */
        ret = hdev->hid_output_raw_report(hdev, feature, sizeof(feature),
                        HID_FEATURE_REPORT);
-       if (ret != sizeof(feature)) {
+       if (ret != -EIO && ret != sizeof(feature)) {
                hid_err(hdev, "unable to request touch data (%d)\n", ret);
                goto err_stop_hw;
        }
index 06888323828c3b18a8e162402193eeb6aec72b84..a597039d0755bd88d506f620287dcaa00b921519 100644 (file)
@@ -353,11 +353,7 @@ static int wacom_probe(struct hid_device *hdev,
        if (ret) {
                hid_warn(hdev, "can't create sysfs battery attribute, err: %d\n",
                         ret);
-               /*
-                * battery attribute is not critical for the tablet, but if it
-                * failed then there is no need to create ac attribute
-                */
-               goto move_on;
+               goto err_battery;
        }
 
        wdata->ac.properties = wacom_ac_props;
@@ -371,14 +367,8 @@ static int wacom_probe(struct hid_device *hdev,
        if (ret) {
                hid_warn(hdev,
                         "can't create ac battery attribute, err: %d\n", ret);
-               /*
-                * ac attribute is not critical for the tablet, but if it
-                * failed then we don't want to battery attribute to exist
-                */
-               power_supply_unregister(&wdata->battery);
+               goto err_ac;
        }
-
-move_on:
 #endif
        hidinput = list_entry(hdev->inputs.next, struct hid_input, list);
        input = hidinput->input;
@@ -416,6 +406,13 @@ move_on:
 
        return 0;
 
+#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
+err_ac:
+       power_supply_unregister(&wdata->battery);
+err_battery:
+       device_remove_file(&hdev->dev, &dev_attr_speed);
+       hid_hw_stop(hdev);
+#endif
 err_free:
        kfree(wdata);
        return ret;
@@ -426,6 +423,7 @@ static void wacom_remove(struct hid_device *hdev)
 #ifdef CONFIG_HID_WACOM_POWER_SUPPLY
        struct wacom_data *wdata = hid_get_drvdata(hdev);
 #endif
+       device_remove_file(&hdev->dev, &dev_attr_speed);
        hid_hw_stop(hdev);
 
 #ifdef CONFIG_HID_WACOM_POWER_SUPPLY
index 4bdb5d46c52c2a21d4ead0e592b90391d6cf5ec8..3146fdcda272cd33f199f13b686d28c092a4921c 100644 (file)
@@ -47,6 +47,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016, HID_QUIRK_FULLSPEED_INTERVAL },
 
        { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT },
+       { USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
        { USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS, HID_QUIRK_MULTI_INPUT },
index d94a24fdf4ba2a07ad006ebeae536745d5e1a25d..dd2d7b9620c2bde5bb8cac1a2c8ac48879a1b290 100644 (file)
@@ -124,7 +124,7 @@ static inline int MV_TO_LIMIT(int mv, int range)
 
 static inline int ADC_TO_CURR(int adc, int gain)
 {
-       return adc * 1400000 / gain * 255;
+       return adc * 1400000 / (gain * 255);
 }
 
 /*
index ace1c731973435581b4d41efedf72bf19f8a888f..d0ddb60155c972c8136d3e2ddd9d0f857fc29cd9 100644 (file)
@@ -141,13 +141,11 @@ static int ucd9000_probe(struct i2c_client *client,
        block_buffer[ret] = '\0';
        dev_info(&client->dev, "Device ID %s\n", block_buffer);
 
-       mid = NULL;
-       for (i = 0; i < ARRAY_SIZE(ucd9000_id); i++) {
-               mid = &ucd9000_id[i];
+       for (mid = ucd9000_id; mid->name[0]; mid++) {
                if (!strncasecmp(mid->name, block_buffer, strlen(mid->name)))
                        break;
        }
-       if (!mid || !strlen(mid->name)) {
+       if (!mid->name[0]) {
                dev_err(&client->dev, "Unsupported device\n");
                return -ENODEV;
        }
index ffcc1cf3609d60d8cf56b4e767eed6292bb653c8..c65e9da707cc160a25e74681bc681715e189f3d1 100644 (file)
@@ -68,13 +68,11 @@ static int ucd9200_probe(struct i2c_client *client,
        block_buffer[ret] = '\0';
        dev_info(&client->dev, "Device ID %s\n", block_buffer);
 
-       mid = NULL;
-       for (i = 0; i < ARRAY_SIZE(ucd9200_id); i++) {
-               mid = &ucd9200_id[i];
+       for (mid = ucd9200_id; mid->name[0]; mid++) {
                if (!strncasecmp(mid->name, block_buffer, strlen(mid->name)))
                        break;
        }
-       if (!mid || !strlen(mid->name)) {
+       if (!mid->name[0]) {
                dev_err(&client->dev, "Unsupported device\n");
                return -ENODEV;
        }
index 6659d269b841b4c04869ec3aa9d18eb23cb4a4a4..b73da6cd6f915008384d9fd3f20b95ca4d41db8d 100644 (file)
@@ -109,12 +109,15 @@ static int __devinit ce4100_i2c_probe(struct pci_dev *dev,
                return -EINVAL;
        }
        sds = kzalloc(sizeof(*sds), GFP_KERNEL);
-       if (!sds)
+       if (!sds) {
+               ret = -ENOMEM;
                goto err_mem;
+       }
 
        for (i = 0; i < ARRAY_SIZE(sds->pdev); i++) {
                sds->pdev[i] = add_i2c_device(dev, i);
                if (IS_ERR(sds->pdev[i])) {
+                       ret = PTR_ERR(sds->pdev[i]);
                        while (--i >= 0)
                                platform_device_unregister(sds->pdev[i]);
                        goto err_dev_add;
index 2440b741197851247ea267cd8464467c925a4c72..3c94c4a81a554563daecc1d872f044886b09fb1b 100644 (file)
@@ -270,14 +270,30 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
 
        /* Rounds down to not include partial word at the end of buf */
        words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD;
-       if (words_to_transfer > tx_fifo_avail)
-               words_to_transfer = tx_fifo_avail;
 
-       i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
-
-       buf += words_to_transfer * BYTES_PER_FIFO_WORD;
-       buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD;
-       tx_fifo_avail -= words_to_transfer;
+       /* It's very common to have < 4 bytes, so optimize that case. */
+       if (words_to_transfer) {
+               if (words_to_transfer > tx_fifo_avail)
+                       words_to_transfer = tx_fifo_avail;
+
+               /*
+                * Update state before writing to FIFO.  If this casues us
+                * to finish writing all bytes (AKA buf_remaining goes to 0) we
+                * have a potential for an interrupt (PACKET_XFER_COMPLETE is
+                * not maskable).  We need to make sure that the isr sees
+                * buf_remaining as 0 and doesn't call us back re-entrantly.
+                */
+               buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD;
+               tx_fifo_avail -= words_to_transfer;
+               i2c_dev->msg_buf_remaining = buf_remaining;
+               i2c_dev->msg_buf = buf +
+                       words_to_transfer * BYTES_PER_FIFO_WORD;
+               barrier();
+
+               i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
+
+               buf += words_to_transfer * BYTES_PER_FIFO_WORD;
+       }
 
        /*
         * If there is a partial word at the end of buf, handle it manually to
@@ -287,14 +303,15 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
        if (tx_fifo_avail > 0 && buf_remaining > 0) {
                BUG_ON(buf_remaining > 3);
                memcpy(&val, buf, buf_remaining);
+
+               /* Again update before writing to FIFO to make sure isr sees. */
+               i2c_dev->msg_buf_remaining = 0;
+               i2c_dev->msg_buf = NULL;
+               barrier();
+
                i2c_writel(i2c_dev, val, I2C_TX_FIFO);
-               buf_remaining = 0;
-               tx_fifo_avail--;
        }
 
-       BUG_ON(tx_fifo_avail > 0 && buf_remaining > 0);
-       i2c_dev->msg_buf_remaining = buf_remaining;
-       i2c_dev->msg_buf = buf;
        return 0;
 }
 
@@ -411,9 +428,10 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
                        tegra_i2c_mask_irq(i2c_dev, I2C_INT_TX_FIFO_DATA_REQ);
        }
 
-       if ((status & I2C_INT_PACKET_XFER_COMPLETE) &&
-                       !i2c_dev->msg_buf_remaining)
+       if (status & I2C_INT_PACKET_XFER_COMPLETE) {
+               BUG_ON(i2c_dev->msg_buf_remaining);
                complete(&i2c_dev->msg_complete);
+       }
 
        i2c_writel(i2c_dev, status, I2C_INT_STATUS);
        if (i2c_dev->is_dvc)
@@ -531,7 +549,7 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
 
 static u32 tegra_i2c_func(struct i2c_adapter *adap)
 {
-       return I2C_FUNC_I2C;
+       return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
 }
 
 static const struct i2c_algorithm tegra_i2c_algo = {
@@ -719,6 +737,17 @@ static int tegra_i2c_resume(struct platform_device *pdev)
 }
 #endif
 
+#if defined(CONFIG_OF)
+/* Match table for of_platform binding */
+static const struct of_device_id tegra_i2c_of_match[] __devinitconst = {
+       { .compatible = "nvidia,tegra20-i2c", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, tegra_i2c_of_match);
+#else
+#define tegra_i2c_of_match NULL
+#endif
+
 static struct platform_driver tegra_i2c_driver = {
        .probe   = tegra_i2c_probe,
        .remove  = tegra_i2c_remove,
@@ -729,6 +758,7 @@ static struct platform_driver tegra_i2c_driver = {
        .driver  = {
                .name  = "tegra-i2c",
                .owner = THIS_MODULE,
+               .of_match_table = tegra_i2c_of_match,
        },
 };
 
index a14f8dc23462229c8ba34d6aec18a14eb3fcaf7a..0e4227f457af38c9e1c6fc6bf1e065b94e1e9c4f 100644 (file)
@@ -605,7 +605,9 @@ static void build_inv_all(struct iommu_cmd *cmd)
  * Writes the command to the IOMMUs command buffer and informs the
  * hardware about the new command.
  */
-static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
+static int iommu_queue_command_sync(struct amd_iommu *iommu,
+                                   struct iommu_cmd *cmd,
+                                   bool sync)
 {
        u32 left, tail, head, next_tail;
        unsigned long flags;
@@ -639,13 +641,18 @@ again:
        copy_cmd_to_buffer(iommu, cmd, tail);
 
        /* We need to sync now to make sure all commands are processed */
-       iommu->need_sync = true;
+       iommu->need_sync = sync;
 
        spin_unlock_irqrestore(&iommu->lock, flags);
 
        return 0;
 }
 
+static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
+{
+       return iommu_queue_command_sync(iommu, cmd, true);
+}
+
 /*
  * This function queues a completion wait command into the command
  * buffer of an IOMMU
@@ -661,7 +668,7 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
 
        build_completion_wait(&cmd, (u64)&sem);
 
-       ret = iommu_queue_command(iommu, &cmd);
+       ret = iommu_queue_command_sync(iommu, &cmd, false);
        if (ret)
                return ret;
 
@@ -840,14 +847,9 @@ static void domain_flush_complete(struct protection_domain *domain)
 static void domain_flush_devices(struct protection_domain *domain)
 {
        struct iommu_dev_data *dev_data;
-       unsigned long flags;
-
-       spin_lock_irqsave(&domain->lock, flags);
 
        list_for_each_entry(dev_data, &domain->dev_list, list)
                device_flush_dte(dev_data);
-
-       spin_unlock_irqrestore(&domain->lock, flags);
 }
 
 /****************************************************************************
index 3742ce8b0acf6b70cf119bc412654c94ec0aa165..5404b229582021c9c3811934ea73c6fa4efa1eca 100644 (file)
@@ -1138,8 +1138,11 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
                        ret = 0;
        }
        rdev->sectors = rdev->sb_start;
+       /* Limit to 4TB as metadata cannot record more than that */
+       if (rdev->sectors >= (2ULL << 32))
+               rdev->sectors = (2ULL << 32) - 2;
 
-       if (rdev->sectors < sb->size * 2 && sb->level > 1)
+       if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
                /* "this cannot possibly happen" ... */
                ret = -EINVAL;
 
@@ -1173,7 +1176,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
                mddev->clevel[0] = 0;
                mddev->layout = sb->layout;
                mddev->raid_disks = sb->raid_disks;
-               mddev->dev_sectors = sb->size * 2;
+               mddev->dev_sectors = ((sector_t)sb->size) * 2;
                mddev->events = ev1;
                mddev->bitmap_info.offset = 0;
                mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
@@ -1415,6 +1418,11 @@ super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
        rdev->sb_start = calc_dev_sboffset(rdev);
        if (!num_sectors || num_sectors > rdev->sb_start)
                num_sectors = rdev->sb_start;
+       /* Limit to 4TB as metadata cannot record more than that.
+        * 4TB == 2^32 KB, or 2*2^32 sectors.
+        */
+       if (num_sectors >= (2ULL << 32))
+               num_sectors = (2ULL << 32) - 2;
        md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
                       rdev->sb_page);
        md_super_wait(rdev->mddev);
index 32323f0afd8954714401cb8ed8c5b455b32a9743..f4622dd8fc590b2bbcaee444b24ab2f55336503d 100644 (file)
@@ -1099,12 +1099,11 @@ read_again:
                bio_list_add(&conf->pending_bio_list, mbio);
                spin_unlock_irqrestore(&conf->device_lock, flags);
        }
-       r1_bio_write_done(r1_bio);
-
-       /* In case raid1d snuck in to freeze_array */
-       wake_up(&conf->wait_barrier);
-
+       /* Mustn't call r1_bio_write_done before this next test,
+        * as it could result in the bio being freed.
+        */
        if (sectors_handled < (bio->bi_size >> 9)) {
+               r1_bio_write_done(r1_bio);
                /* We need another r1_bio.  It has already been counted
                 * in bio->bi_phys_segments
                 */
@@ -1117,6 +1116,11 @@ read_again:
                goto retry_write;
        }
 
+       r1_bio_write_done(r1_bio);
+
+       /* In case raid1d snuck in to freeze_array */
+       wake_up(&conf->wait_barrier);
+
        if (do_sync || !bitmap || !plugged)
                md_wakeup_thread(mddev->thread);
 
index 8b29cd4f01c89f8213d7dcc43cd1aaf6dd5799c7..d7a8468ddeabc493284d5acc2f81529f9e638461 100644 (file)
@@ -337,6 +337,21 @@ static void close_write(r10bio_t *r10_bio)
        md_write_end(r10_bio->mddev);
 }
 
+static void one_write_done(r10bio_t *r10_bio)
+{
+       if (atomic_dec_and_test(&r10_bio->remaining)) {
+               if (test_bit(R10BIO_WriteError, &r10_bio->state))
+                       reschedule_retry(r10_bio);
+               else {
+                       close_write(r10_bio);
+                       if (test_bit(R10BIO_MadeGood, &r10_bio->state))
+                               reschedule_retry(r10_bio);
+                       else
+                               raid_end_bio_io(r10_bio);
+               }
+       }
+}
+
 static void raid10_end_write_request(struct bio *bio, int error)
 {
        int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
@@ -387,17 +402,7 @@ static void raid10_end_write_request(struct bio *bio, int error)
         * Let's see if all mirrored write operations have finished
         * already.
         */
-       if (atomic_dec_and_test(&r10_bio->remaining)) {
-               if (test_bit(R10BIO_WriteError, &r10_bio->state))
-                       reschedule_retry(r10_bio);
-               else {
-                       close_write(r10_bio);
-                       if (test_bit(R10BIO_MadeGood, &r10_bio->state))
-                               reschedule_retry(r10_bio);
-                       else
-                               raid_end_bio_io(r10_bio);
-               }
-       }
+       one_write_done(r10_bio);
        if (dec_rdev)
                rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
 }
@@ -1127,20 +1132,12 @@ retry_write:
                spin_unlock_irqrestore(&conf->device_lock, flags);
        }
 
-       if (atomic_dec_and_test(&r10_bio->remaining)) {
-               /* This matches the end of raid10_end_write_request() */
-               bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
-                               r10_bio->sectors,
-                               !test_bit(R10BIO_Degraded, &r10_bio->state),
-                               0);
-               md_write_end(mddev);
-               raid_end_bio_io(r10_bio);
-       }
-
-       /* In case raid10d snuck in to freeze_array */
-       wake_up(&conf->wait_barrier);
+       /* Don't remove the bias on 'remaining' (one_write_done) until
+        * after checking if we need to go around again.
+        */
 
        if (sectors_handled < (bio->bi_size >> 9)) {
+               one_write_done(r10_bio);
                /* We need another r10_bio.  It has already been counted
                 * in bio->bi_phys_segments.
                 */
@@ -1154,6 +1151,10 @@ retry_write:
                r10_bio->state = 0;
                goto retry_write;
        }
+       one_write_done(r10_bio);
+
+       /* In case raid10d snuck in to freeze_array */
+       wake_up(&conf->wait_barrier);
 
        if (do_sync || !mddev->bitmap || !plugged)
                md_wakeup_thread(mddev->thread);
index 91a0a7460ebbe855d4a7753cf89db54432b35545..b27b94078c21888bd472346e92e04fcb8a87464e 100644 (file)
@@ -133,7 +133,7 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
                if (mrq->done)
                        mrq->done(mrq);
 
-               mmc_host_clk_gate(host);
+               mmc_host_clk_release(host);
        }
 }
 
@@ -192,7 +192,7 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
                        mrq->stop->mrq = mrq;
                }
        }
-       mmc_host_clk_ungate(host);
+       mmc_host_clk_hold(host);
        led_trigger_event(host->led, LED_FULL);
        host->ops->request(host, mrq);
 }
@@ -728,15 +728,17 @@ static inline void mmc_set_ios(struct mmc_host *host)
  */
 void mmc_set_chip_select(struct mmc_host *host, int mode)
 {
+       mmc_host_clk_hold(host);
        host->ios.chip_select = mode;
        mmc_set_ios(host);
+       mmc_host_clk_release(host);
 }
 
 /*
  * Sets the host clock to the highest possible frequency that
  * is below "hz".
  */
-void mmc_set_clock(struct mmc_host *host, unsigned int hz)
+static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
 {
        WARN_ON(hz < host->f_min);
 
@@ -747,6 +749,13 @@ void mmc_set_clock(struct mmc_host *host, unsigned int hz)
        mmc_set_ios(host);
 }
 
+void mmc_set_clock(struct mmc_host *host, unsigned int hz)
+{
+       mmc_host_clk_hold(host);
+       __mmc_set_clock(host, hz);
+       mmc_host_clk_release(host);
+}
+
 #ifdef CONFIG_MMC_CLKGATE
 /*
  * This gates the clock by setting it to 0 Hz.
@@ -779,7 +788,7 @@ void mmc_ungate_clock(struct mmc_host *host)
        if (host->clk_old) {
                BUG_ON(host->ios.clock);
                /* This call will also set host->clk_gated to false */
-               mmc_set_clock(host, host->clk_old);
+               __mmc_set_clock(host, host->clk_old);
        }
 }
 
@@ -807,8 +816,10 @@ void mmc_set_ungated(struct mmc_host *host)
  */
 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
 {
+       mmc_host_clk_hold(host);
        host->ios.bus_mode = mode;
        mmc_set_ios(host);
+       mmc_host_clk_release(host);
 }
 
 /*
@@ -816,8 +827,10 @@ void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
  */
 void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
 {
+       mmc_host_clk_hold(host);
        host->ios.bus_width = width;
        mmc_set_ios(host);
+       mmc_host_clk_release(host);
 }
 
 /**
@@ -1015,8 +1028,10 @@ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
 
                ocr &= 3 << bit;
 
+               mmc_host_clk_hold(host);
                host->ios.vdd = bit;
                mmc_set_ios(host);
+               mmc_host_clk_release(host);
        } else {
                pr_warning("%s: host doesn't support card's voltages\n",
                                mmc_hostname(host));
@@ -1063,8 +1078,10 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11
  */
 void mmc_set_timing(struct mmc_host *host, unsigned int timing)
 {
+       mmc_host_clk_hold(host);
        host->ios.timing = timing;
        mmc_set_ios(host);
+       mmc_host_clk_release(host);
 }
 
 /*
@@ -1072,8 +1089,10 @@ void mmc_set_timing(struct mmc_host *host, unsigned int timing)
  */
 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
 {
+       mmc_host_clk_hold(host);
        host->ios.drv_type = drv_type;
        mmc_set_ios(host);
+       mmc_host_clk_release(host);
 }
 
 /*
@@ -1091,6 +1110,8 @@ static void mmc_power_up(struct mmc_host *host)
 {
        int bit;
 
+       mmc_host_clk_hold(host);
+
        /* If ocr is set, we use it */
        if (host->ocr)
                bit = ffs(host->ocr) - 1;
@@ -1126,10 +1147,14 @@ static void mmc_power_up(struct mmc_host *host)
         * time required to reach a stable voltage.
         */
        mmc_delay(10);
+
+       mmc_host_clk_release(host);
 }
 
 static void mmc_power_off(struct mmc_host *host)
 {
+       mmc_host_clk_hold(host);
+
        host->ios.clock = 0;
        host->ios.vdd = 0;
 
@@ -1147,6 +1172,8 @@ static void mmc_power_off(struct mmc_host *host)
        host->ios.bus_width = MMC_BUS_WIDTH_1;
        host->ios.timing = MMC_TIMING_LEGACY;
        mmc_set_ios(host);
+
+       mmc_host_clk_release(host);
 }
 
 /*
index b29d3e8fd3a2ad713525c4be82c9b2f9699910ec..793d0a0dad8d74d85d239da2c3b107e47a5d1b4e 100644 (file)
@@ -119,14 +119,14 @@ static void mmc_host_clk_gate_work(struct work_struct *work)
 }
 
 /**
- *     mmc_host_clk_ungate - ungate hardware MCI clocks
+ *     mmc_host_clk_hold - ungate hardware MCI clocks
  *     @host: host to ungate.
  *
  *     Makes sure the host ios.clock is restored to a non-zero value
  *     past this call. Increase clock reference count and ungate clock
  *     if we're the first user.
  */
-void mmc_host_clk_ungate(struct mmc_host *host)
+void mmc_host_clk_hold(struct mmc_host *host)
 {
        unsigned long flags;
 
@@ -164,14 +164,14 @@ static bool mmc_host_may_gate_card(struct mmc_card *card)
 }
 
 /**
- *     mmc_host_clk_gate - gate off hardware MCI clocks
+ *     mmc_host_clk_release - gate off hardware MCI clocks
  *     @host: host to gate.
  *
  *     Calls the host driver with ios.clock set to zero as often as possible
  *     in order to gate off hardware MCI clocks. Decrease clock reference
  *     count and schedule disabling of clock.
  */
-void mmc_host_clk_gate(struct mmc_host *host)
+void mmc_host_clk_release(struct mmc_host *host)
 {
        unsigned long flags;
 
@@ -179,7 +179,7 @@ void mmc_host_clk_gate(struct mmc_host *host)
        host->clk_requests--;
        if (mmc_host_may_gate_card(host->card) &&
            !host->clk_requests)
-               schedule_work(&host->clk_gate_work);
+               queue_work(system_nrt_wq, &host->clk_gate_work);
        spin_unlock_irqrestore(&host->clk_lock, flags);
 }
 
@@ -231,7 +231,7 @@ static inline void mmc_host_clk_exit(struct mmc_host *host)
        if (cancel_work_sync(&host->clk_gate_work))
                mmc_host_clk_gate_delayed(host);
        if (host->clk_gated)
-               mmc_host_clk_ungate(host);
+               mmc_host_clk_hold(host);
        /* There should be only one user now */
        WARN_ON(host->clk_requests > 1);
 }
index de199f91192851b0d4f3162a6dcbe78199fa9e59..fb8a5cd2e4a1e87bfab9439f8c9e44a3d75ee4b4 100644 (file)
@@ -16,16 +16,16 @@ int mmc_register_host_class(void);
 void mmc_unregister_host_class(void);
 
 #ifdef CONFIG_MMC_CLKGATE
-void mmc_host_clk_ungate(struct mmc_host *host);
-void mmc_host_clk_gate(struct mmc_host *host);
+void mmc_host_clk_hold(struct mmc_host *host);
+void mmc_host_clk_release(struct mmc_host *host);
 unsigned int mmc_host_clk_rate(struct mmc_host *host);
 
 #else
-static inline void mmc_host_clk_ungate(struct mmc_host *host)
+static inline void mmc_host_clk_hold(struct mmc_host *host)
 {
 }
 
-static inline void mmc_host_clk_gate(struct mmc_host *host)
+static inline void mmc_host_clk_release(struct mmc_host *host)
 {
 }
 
index 633975ff2bb395f6e2e883c2e7ce05fb51ed8df2..0370e03e314253f027a36aa994591ea2817f0826 100644 (file)
@@ -469,56 +469,75 @@ static int sd_select_driver_type(struct mmc_card *card, u8 *status)
        return 0;
 }
 
-static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status)
+static void sd_update_bus_speed_mode(struct mmc_card *card)
 {
-       unsigned int bus_speed = 0, timing = 0;
-       int err;
-
        /*
         * If the host doesn't support any of the UHS-I modes, fallback on
         * default speed.
         */
        if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
-           MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50)))
-               return 0;
+           MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))) {
+               card->sd_bus_speed = 0;
+               return;
+       }
 
        if ((card->host->caps & MMC_CAP_UHS_SDR104) &&
            (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) {
-                       bus_speed = UHS_SDR104_BUS_SPEED;
-                       timing = MMC_TIMING_UHS_SDR104;
-                       card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
+                       card->sd_bus_speed = UHS_SDR104_BUS_SPEED;
        } else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
                   (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) {
-                       bus_speed = UHS_DDR50_BUS_SPEED;
-                       timing = MMC_TIMING_UHS_DDR50;
-                       card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
+                       card->sd_bus_speed = UHS_DDR50_BUS_SPEED;
        } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
                    MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode &
                    SD_MODE_UHS_SDR50)) {
-                       bus_speed = UHS_SDR50_BUS_SPEED;
-                       timing = MMC_TIMING_UHS_SDR50;
-                       card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
+                       card->sd_bus_speed = UHS_SDR50_BUS_SPEED;
        } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
                    MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) &&
                   (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) {
-                       bus_speed = UHS_SDR25_BUS_SPEED;
-                       timing = MMC_TIMING_UHS_SDR25;
-                       card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
+                       card->sd_bus_speed = UHS_SDR25_BUS_SPEED;
        } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
                    MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 |
                    MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode &
                    SD_MODE_UHS_SDR12)) {
-                       bus_speed = UHS_SDR12_BUS_SPEED;
-                       timing = MMC_TIMING_UHS_SDR12;
-                       card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR;
+                       card->sd_bus_speed = UHS_SDR12_BUS_SPEED;
+       }
+}
+
+static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status)
+{
+       int err;
+       unsigned int timing = 0;
+
+       switch (card->sd_bus_speed) {
+       case UHS_SDR104_BUS_SPEED:
+               timing = MMC_TIMING_UHS_SDR104;
+               card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
+               break;
+       case UHS_DDR50_BUS_SPEED:
+               timing = MMC_TIMING_UHS_DDR50;
+               card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
+               break;
+       case UHS_SDR50_BUS_SPEED:
+               timing = MMC_TIMING_UHS_SDR50;
+               card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
+               break;
+       case UHS_SDR25_BUS_SPEED:
+               timing = MMC_TIMING_UHS_SDR25;
+               card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
+               break;
+       case UHS_SDR12_BUS_SPEED:
+               timing = MMC_TIMING_UHS_SDR12;
+               card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR;
+               break;
+       default:
+               return 0;
        }
 
-       card->sd_bus_speed = bus_speed;
-       err = mmc_sd_switch(card, 1, 0, bus_speed, status);
+       err = mmc_sd_switch(card, 1, 0, card->sd_bus_speed, status);
        if (err)
                return err;
 
-       if ((status[16] & 0xF) != bus_speed)
+       if ((status[16] & 0xF) != card->sd_bus_speed)
                printk(KERN_WARNING "%s: Problem setting bus speed mode!\n",
                        mmc_hostname(card->host));
        else {
@@ -618,18 +637,24 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
                mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
        }
 
+       /*
+        * Select the bus speed mode depending on host
+        * and card capability.
+        */
+       sd_update_bus_speed_mode(card);
+
        /* Set the driver strength for the card */
        err = sd_select_driver_type(card, status);
        if (err)
                goto out;
 
-       /* Set bus speed mode of the card */
-       err = sd_set_bus_speed_mode(card, status);
+       /* Set current limit for the card */
+       err = sd_set_current_limit(card, status);
        if (err)
                goto out;
 
-       /* Set current limit for the card */
-       err = sd_set_current_limit(card, status);
+       /* Set bus speed mode of the card */
+       err = sd_set_bus_speed_mode(card, status);
        if (err)
                goto out;
 
index 0e9780f5a4a9d8147c9eaa9d1524fd6ebbc59331..4dc0028086a34357ef5b5578678ce7ca4a1ccdd6 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/err.h>
 #include <linux/clk.h>
 #include <linux/gpio.h>
+#include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/mmc/host.h>
 #include <linux/mmc/mmc.h>
index 2bd7bf4fece75715ed275673b1eed6bf888ba862..fe886d6c474a41718fb4b388c90c14ad26bf823e 100644 (file)
@@ -302,6 +302,8 @@ static int sdhci_s3c_platform_8bit_width(struct sdhci_host *host, int width)
                ctrl &= ~SDHCI_CTRL_8BITBUS;
                break;
        default:
+               ctrl &= ~SDHCI_CTRL_4BITBUS;
+               ctrl &= ~SDHCI_CTRL_8BITBUS;
                break;
        }
 
index 774f6439d7ce06be72eee93ee1896e0e32fab9ea..0c4a672f5db618bd68c96b4bb7a6239bdf7f12aa 100644 (file)
@@ -120,11 +120,11 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
        mmc_data->hclk = clk_get_rate(priv->clk);
        mmc_data->set_pwr = sh_mobile_sdhi_set_pwr;
        mmc_data->get_cd = sh_mobile_sdhi_get_cd;
-       if (mmc_data->flags & TMIO_MMC_HAS_IDLE_WAIT)
-               mmc_data->write16_hook = sh_mobile_sdhi_write16_hook;
        mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED;
        if (p) {
                mmc_data->flags = p->tmio_flags;
+               if (mmc_data->flags & TMIO_MMC_HAS_IDLE_WAIT)
+                       mmc_data->write16_hook = sh_mobile_sdhi_write16_hook;
                mmc_data->ocr_mask = p->tmio_ocr_mask;
                mmc_data->capabilities |= p->tmio_caps;
 
index 65b5b76cc3794102af8229bd239499829705f61a..64fbb002182518310e56693cc88b294465cc2383 100644 (file)
@@ -181,7 +181,7 @@ static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
 
 #define ubi_dbg_msg(fmt, ...) do {                                           \
        if (0)                                                               \
-               pr_debug(fmt "\n", ##__VA_ARGS__);                           \
+               printk(KERN_DEBUG fmt "\n", ##__VA_ARGS__);                  \
 } while (0)
 
 #define dbg_msg(fmt, ...)  ubi_dbg_msg(fmt, ##__VA_ARGS__)
index 52fe21e1e2cd11b20f50d3c59c7672818ac485bc..3b1416e3d217a51ca0c53f05816994073a49ed33 100644 (file)
@@ -308,8 +308,11 @@ static void am79c961_timer(unsigned long data)
        struct net_device *dev = (struct net_device *)data;
        struct dev_priv *priv = netdev_priv(dev);
        unsigned int lnkstat, carrier;
+       unsigned long flags;
 
+       spin_lock_irqsave(&priv->chip_lock, flags);
        lnkstat = read_ireg(dev->base_addr, ISALED0) & ISALED0_LNKST;
+       spin_unlock_irqrestore(&priv->chip_lock, flags);
        carrier = netif_carrier_ok(dev);
 
        if (lnkstat && !carrier) {
index 753b21aaea6196553fd0f3a541e2115e9b95e479..3ffd9c1acc0a33e0146f992fccc695444799b726 100644 (file)
@@ -169,7 +169,9 @@ void pci_configure_slot(struct pci_dev *dev)
                        (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)))
                return;
 
-       pcie_bus_configure_settings(dev->bus, dev->bus->self->pcie_mpss);
+       if (dev->bus && dev->bus->self)
+               pcie_bus_configure_settings(dev->bus,
+                                           dev->bus->self->pcie_mpss);
 
        memset(&hpp, 0, sizeof(hpp));
        ret = pci_get_hp_params(dev, &hpp);
index 0ce67423a0a31ce1c725d5f8c52bd3811baaa253..4e84fd4a4312c2d4660bff0992aa21b7fe67e5a1 100644 (file)
@@ -77,7 +77,7 @@ unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
 unsigned long pci_hotplug_io_size  = DEFAULT_HOTPLUG_IO_SIZE;
 unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
 
-enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE;
+enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE;
 
 /*
  * The default CLS is used if arch didn't set CLS explicitly and not
index 8473727b29fabaaa743b3d02ee771822175d02b4..b1187ff31d89940ba03ff18b2d2b2c1974808de7 100644 (file)
@@ -1396,34 +1396,37 @@ static void pcie_write_mps(struct pci_dev *dev, int mps)
 
 static void pcie_write_mrrs(struct pci_dev *dev, int mps)
 {
-       int rc, mrrs;
+       int rc, mrrs, dev_mpss;
 
-       if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
-               int dev_mpss = 128 << dev->pcie_mpss;
+       /* In the "safe" case, do not configure the MRRS.  There appear to be
+        * issues with setting MRRS to 0 on a number of devices.
+        */
 
-               /* For Max performance, the MRRS must be set to the largest
-                * supported value.  However, it cannot be configured larger
-                * than the MPS the device or the bus can support.  This assumes
-                * that the largest MRRS available on the device cannot be
-                * smaller than the device MPSS.
-                */
-               mrrs = mps < dev_mpss ? mps : dev_mpss;
-       } else
-               /* In the "safe" case, configure the MRRS for fairness on the
-                * bus by making all devices have the same size
-                */
-               mrrs = mps;
+       if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
+               return;
 
+       dev_mpss = 128 << dev->pcie_mpss;
+
+       /* For Max performance, the MRRS must be set to the largest supported
+        * value.  However, it cannot be configured larger than the MPS the
+        * device or the bus can support.  This assumes that the largest MRRS
+        * available on the device cannot be smaller than the device MPSS.
+        */
+       mrrs = min(mps, dev_mpss);
 
        /* MRRS is a R/W register.  Invalid values can be written, but a
-        * subsiquent read will verify if the value is acceptable or not.
+        * subsequent read will verify if the value is acceptable or not.
         * If the MRRS value provided is not acceptable (e.g., too large),
         * shrink the value until it is acceptable to the HW.
         */
        while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
+               dev_warn(&dev->dev, "Attempting to modify the PCI-E MRRS value"
+                        " to %d.  If any issues are encountered, please try "
+                        "running with pci=pcie_bus_safe\n", mrrs);
                rc = pcie_set_readrq(dev, mrrs);
                if (rc)
-                       dev_err(&dev->dev, "Failed attempting to set the MRRS\n");
+                       dev_err(&dev->dev,
+                               "Failed attempting to set the MRRS\n");
 
                mrrs /= 2;
        }
@@ -1436,13 +1439,13 @@ static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
        if (!pci_is_pcie(dev))
                return 0;
 
-       dev_info(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n",
+       dev_dbg(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n",
                 pcie_get_mps(dev), 128<<dev->pcie_mpss, pcie_get_readrq(dev));
 
        pcie_write_mps(dev, mps);
        pcie_write_mrrs(dev, mps);
 
-       dev_info(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n",
+       dev_dbg(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n",
                 pcie_get_mps(dev), 128<<dev->pcie_mpss, pcie_get_readrq(dev));
 
        return 0;
@@ -1456,9 +1459,6 @@ void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss)
 {
        u8 smpss = mpss;
 
-       if (!bus->self)
-               return;
-
        if (!pci_is_pcie(bus->self))
                return;
 
index 335551d333b24f10a428c4f332792190181ec82f..14a42a1edc66d55be04a79c67aa622db7b10b38d 100644 (file)
@@ -36,6 +36,7 @@
  */
 struct ep93xx_rtc {
        void __iomem    *mmio_base;
+       struct rtc_device *rtc;
 };
 
 static int ep93xx_rtc_get_swcomp(struct device *dev, unsigned short *preload,
@@ -130,7 +131,6 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
 {
        struct ep93xx_rtc *ep93xx_rtc;
        struct resource *res;
-       struct rtc_device *rtc;
        int err;
 
        ep93xx_rtc = devm_kzalloc(&pdev->dev, sizeof(*ep93xx_rtc), GFP_KERNEL);
@@ -151,12 +151,12 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
                return -ENXIO;
 
        pdev->dev.platform_data = ep93xx_rtc;
-       platform_set_drvdata(pdev, rtc);
+       platform_set_drvdata(pdev, ep93xx_rtc);
 
-       rtc = rtc_device_register(pdev->name,
+       ep93xx_rtc->rtc = rtc_device_register(pdev->name,
                                &pdev->dev, &ep93xx_rtc_ops, THIS_MODULE);
-       if (IS_ERR(rtc)) {
-               err = PTR_ERR(rtc);
+       if (IS_ERR(ep93xx_rtc->rtc)) {
+               err = PTR_ERR(ep93xx_rtc->rtc);
                goto exit;
        }
 
@@ -167,7 +167,7 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
        return 0;
 
 fail:
-       rtc_device_unregister(rtc);
+       rtc_device_unregister(ep93xx_rtc->rtc);
 exit:
        platform_set_drvdata(pdev, NULL);
        pdev->dev.platform_data = NULL;
@@ -176,11 +176,11 @@ exit:
 
 static int __exit ep93xx_rtc_remove(struct platform_device *pdev)
 {
-       struct rtc_device *rtc = platform_get_drvdata(pdev);
+       struct ep93xx_rtc *ep93xx_rtc = platform_get_drvdata(pdev);
 
        sysfs_remove_group(&pdev->dev.kobj, &ep93xx_rtc_sysfs_files);
        platform_set_drvdata(pdev, NULL);
-       rtc_device_unregister(rtc);
+       rtc_device_unregister(ep93xx_rtc->rtc);
        pdev->dev.platform_data = NULL;
 
        return 0;
index 075f1708deae844499b8de25e2b0c523846b9299..c4cf05731118a245cff79181ba81eac90ca92be6 100644 (file)
@@ -85,6 +85,8 @@ void rtc_time_to_tm(unsigned long time, struct rtc_time *tm)
        time -= tm->tm_hour * 3600;
        tm->tm_min = time / 60;
        tm->tm_sec = time - tm->tm_min * 60;
+
+       tm->tm_isdst = 0;
 }
 EXPORT_SYMBOL(rtc_time_to_tm);
 
index 9a81f778d6b22216469b641d0dbe44c809667fd8..20687d55e7a72d5eb2871fd8504d6a2fa457e9e5 100644 (file)
@@ -362,14 +362,6 @@ static irqreturn_t twl_rtc_interrupt(int irq, void *rtc)
        int res;
        u8 rd_reg;
 
-#ifdef CONFIG_LOCKDEP
-       /* WORKAROUND for lockdep forcing IRQF_DISABLED on us, which
-        * we don't want and can't tolerate.  Although it might be
-        * friendlier not to borrow this thread context...
-        */
-       local_irq_enable();
-#endif
-
        res = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
        if (res)
                goto out;
@@ -428,24 +420,12 @@ static struct rtc_class_ops twl_rtc_ops = {
 static int __devinit twl_rtc_probe(struct platform_device *pdev)
 {
        struct rtc_device *rtc;
-       int ret = 0;
+       int ret = -EINVAL;
        int irq = platform_get_irq(pdev, 0);
        u8 rd_reg;
 
        if (irq <= 0)
-               return -EINVAL;
-
-       rtc = rtc_device_register(pdev->name,
-                                 &pdev->dev, &twl_rtc_ops, THIS_MODULE);
-       if (IS_ERR(rtc)) {
-               ret = PTR_ERR(rtc);
-               dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
-                       PTR_ERR(rtc));
-               goto out0;
-
-       }
-
-       platform_set_drvdata(pdev, rtc);
+               goto out1;
 
        ret = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
        if (ret < 0)
@@ -462,14 +442,6 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev)
        if (ret < 0)
                goto out1;
 
-       ret = request_irq(irq, twl_rtc_interrupt,
-                               IRQF_TRIGGER_RISING,
-                               dev_name(&rtc->dev), rtc);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "IRQ is not free.\n");
-               goto out1;
-       }
-
        if (twl_class_is_6030()) {
                twl6030_interrupt_unmask(TWL6030_RTC_INT_MASK,
                        REG_INT_MSK_LINE_A);
@@ -480,28 +452,44 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev)
        /* Check RTC module status, Enable if it is off */
        ret = twl_rtc_read_u8(&rd_reg, REG_RTC_CTRL_REG);
        if (ret < 0)
-               goto out2;
+               goto out1;
 
        if (!(rd_reg & BIT_RTC_CTRL_REG_STOP_RTC_M)) {
                dev_info(&pdev->dev, "Enabling TWL-RTC.\n");
                rd_reg = BIT_RTC_CTRL_REG_STOP_RTC_M;
                ret = twl_rtc_write_u8(rd_reg, REG_RTC_CTRL_REG);
                if (ret < 0)
-                       goto out2;
+                       goto out1;
        }
 
        /* init cached IRQ enable bits */
        ret = twl_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG);
        if (ret < 0)
+               goto out1;
+
+       rtc = rtc_device_register(pdev->name,
+                                 &pdev->dev, &twl_rtc_ops, THIS_MODULE);
+       if (IS_ERR(rtc)) {
+               ret = PTR_ERR(rtc);
+               dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
+                       PTR_ERR(rtc));
+               goto out1;
+       }
+
+       ret = request_threaded_irq(irq, NULL, twl_rtc_interrupt,
+                                  IRQF_TRIGGER_RISING,
+                                  dev_name(&rtc->dev), rtc);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "IRQ is not free.\n");
                goto out2;
+       }
 
-       return ret;
+       platform_set_drvdata(pdev, rtc);
+       return 0;
 
 out2:
-       free_irq(irq, rtc);
-out1:
        rtc_device_unregister(rtc);
-out0:
+out1:
        return ret;
 }
 
index 2c33ce6eac1ee57ed2fbcf26832fa25835502340..0f5599e0abf6a6d362e0f94312faaa7abe3ea2a7 100644 (file)
@@ -1,6 +1,6 @@
 config SCSI_QLA_ISCSI
        tristate "QLogic ISP4XXX and ISP82XX host adapter family support"
-       depends on PCI && SCSI
+       depends on PCI && SCSI && NET
        select SCSI_ISCSI_ATTRS
        ---help---
        This driver supports the QLogic 40xx (ISP4XXX) and 8022 (ISP82XX)
index 80d292fb92d8a3c10825130867dbdc55b586f939..7363c1b169e8f5719e5ba9bc1dee3542de468169 100644 (file)
@@ -19,7 +19,7 @@
 #include <asm/backlight.h>
 #endif
 
-static const char const *backlight_types[] = {
+static const char *const backlight_types[] = {
        [BACKLIGHT_RAW] = "raw",
        [BACKLIGHT_PLATFORM] = "platform",
        [BACKLIGHT_FIRMWARE] = "firmware",
index 46ce357ca1abd46d5bff175aa5518b9e1f7f778f..410ffd6ceb5fb10961d9230e056cd2fe87bef148 100644 (file)
@@ -54,9 +54,9 @@ extern struct kmem_cache *v9fs_inode_cache;
 
 struct inode *v9fs_alloc_inode(struct super_block *sb);
 void v9fs_destroy_inode(struct inode *inode);
-struct inode *v9fs_get_inode(struct super_block *sb, int mode);
+struct inode *v9fs_get_inode(struct super_block *sb, int mode, dev_t);
 int v9fs_init_inode(struct v9fs_session_info *v9ses,
-                   struct inode *inode, int mode);
+                   struct inode *inode, int mode, dev_t);
 void v9fs_evict_inode(struct inode *inode);
 ino_t v9fs_qid2ino(struct p9_qid *qid);
 void v9fs_stat2inode(struct p9_wstat *, struct inode *, struct super_block *);
@@ -83,4 +83,6 @@ static inline void v9fs_invalidate_inode_attr(struct inode *inode)
        v9inode->cache_validity |= V9FS_INO_INVALID_ATTR;
        return;
 }
+
+int v9fs_open_to_dotl_flags(int flags);
 #endif
index 3c173fcc2c5a0902be016dd20e6a48411f5d3a55..62857a810a79d00332a150c6a02a433e638d72b8 100644 (file)
@@ -65,7 +65,7 @@ int v9fs_file_open(struct inode *inode, struct file *file)
        v9inode = V9FS_I(inode);
        v9ses = v9fs_inode2v9ses(inode);
        if (v9fs_proto_dotl(v9ses))
-               omode = file->f_flags;
+               omode = v9fs_open_to_dotl_flags(file->f_flags);
        else
                omode = v9fs_uflags2omode(file->f_flags,
                                        v9fs_proto_dotu(v9ses));
@@ -169,7 +169,18 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
 
        /* convert posix lock to p9 tlock args */
        memset(&flock, 0, sizeof(flock));
-       flock.type = fl->fl_type;
+       /* map the lock type */
+       switch (fl->fl_type) {
+       case F_RDLCK:
+               flock.type = P9_LOCK_TYPE_RDLCK;
+               break;
+       case F_WRLCK:
+               flock.type = P9_LOCK_TYPE_WRLCK;
+               break;
+       case F_UNLCK:
+               flock.type = P9_LOCK_TYPE_UNLCK;
+               break;
+       }
        flock.start = fl->fl_start;
        if (fl->fl_end == OFFSET_MAX)
                flock.length = 0;
@@ -245,7 +256,7 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
 
        /* convert posix lock to p9 tgetlock args */
        memset(&glock, 0, sizeof(glock));
-       glock.type = fl->fl_type;
+       glock.type  = P9_LOCK_TYPE_UNLCK;
        glock.start = fl->fl_start;
        if (fl->fl_end == OFFSET_MAX)
                glock.length = 0;
@@ -257,17 +268,26 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
        res = p9_client_getlock_dotl(fid, &glock);
        if (res < 0)
                return res;
-       if (glock.type != F_UNLCK) {
-               fl->fl_type = glock.type;
+       /* map 9p lock type to os lock type */
+       switch (glock.type) {
+       case P9_LOCK_TYPE_RDLCK:
+               fl->fl_type = F_RDLCK;
+               break;
+       case P9_LOCK_TYPE_WRLCK:
+               fl->fl_type = F_WRLCK;
+               break;
+       case P9_LOCK_TYPE_UNLCK:
+               fl->fl_type = F_UNLCK;
+               break;
+       }
+       if (glock.type != P9_LOCK_TYPE_UNLCK) {
                fl->fl_start = glock.start;
                if (glock.length == 0)
                        fl->fl_end = OFFSET_MAX;
                else
                        fl->fl_end = glock.start + glock.length - 1;
                fl->fl_pid = glock.proc_id;
-       } else
-               fl->fl_type = F_UNLCK;
-
+       }
        return res;
 }
 
index 8bb5507e822f4151574f564150f48acd4531f002..e3c03db3c788149b83cd629468d782783f99e15b 100644 (file)
@@ -95,15 +95,18 @@ static int unixmode2p9mode(struct v9fs_session_info *v9ses, int mode)
 /**
  * p9mode2unixmode- convert plan9 mode bits to unix mode bits
  * @v9ses: v9fs session information
- * @mode: mode to convert
+ * @stat: p9_wstat from which mode need to be derived
+ * @rdev: major number, minor number in case of device files.
  *
  */
-
-static int p9mode2unixmode(struct v9fs_session_info *v9ses, int mode)
+static int p9mode2unixmode(struct v9fs_session_info *v9ses,
+                          struct p9_wstat *stat, dev_t *rdev)
 {
        int res;
+       int mode = stat->mode;
 
-       res = mode & 0777;
+       res = mode & S_IALLUGO;
+       *rdev = 0;
 
        if ((mode & P9_DMDIR) == P9_DMDIR)
                res |= S_IFDIR;
@@ -116,9 +119,26 @@ static int p9mode2unixmode(struct v9fs_session_info *v9ses, int mode)
                 && (v9ses->nodev == 0))
                res |= S_IFIFO;
        else if ((mode & P9_DMDEVICE) && (v9fs_proto_dotu(v9ses))
-                && (v9ses->nodev == 0))
-               res |= S_IFBLK;
-       else
+                && (v9ses->nodev == 0)) {
+               char type = 0, ext[32];
+               int major = -1, minor = -1;
+
+               strncpy(ext, stat->extension, sizeof(ext));
+               sscanf(ext, "%c %u %u", &type, &major, &minor);
+               switch (type) {
+               case 'c':
+                       res |= S_IFCHR;
+                       break;
+               case 'b':
+                       res |= S_IFBLK;
+                       break;
+               default:
+                       P9_DPRINTK(P9_DEBUG_ERROR,
+                               "Unknown special type %c %s\n", type,
+                               stat->extension);
+               };
+               *rdev = MKDEV(major, minor);
+       } else
                res |= S_IFREG;
 
        if (v9fs_proto_dotu(v9ses)) {
@@ -131,7 +151,6 @@ static int p9mode2unixmode(struct v9fs_session_info *v9ses, int mode)
                if ((mode & P9_DMSETVTX) == P9_DMSETVTX)
                        res |= S_ISVTX;
        }
-
        return res;
 }
 
@@ -242,13 +261,13 @@ void v9fs_destroy_inode(struct inode *inode)
 }
 
 int v9fs_init_inode(struct v9fs_session_info *v9ses,
-                   struct inode *inode, int mode)
+                   struct inode *inode, int mode, dev_t rdev)
 {
        int err = 0;
 
        inode_init_owner(inode, NULL, mode);
        inode->i_blocks = 0;
-       inode->i_rdev = 0;
+       inode->i_rdev = rdev;
        inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
        inode->i_mapping->a_ops = &v9fs_addr_operations;
 
@@ -335,7 +354,7 @@ error:
  *
  */
 
-struct inode *v9fs_get_inode(struct super_block *sb, int mode)
+struct inode *v9fs_get_inode(struct super_block *sb, int mode, dev_t rdev)
 {
        int err;
        struct inode *inode;
@@ -348,7 +367,7 @@ struct inode *v9fs_get_inode(struct super_block *sb, int mode)
                P9_EPRINTK(KERN_WARNING, "Problem allocating inode\n");
                return ERR_PTR(-ENOMEM);
        }
-       err = v9fs_init_inode(v9ses, inode, mode);
+       err = v9fs_init_inode(v9ses, inode, mode, rdev);
        if (err) {
                iput(inode);
                return ERR_PTR(err);
@@ -435,11 +454,12 @@ void v9fs_evict_inode(struct inode *inode)
 static int v9fs_test_inode(struct inode *inode, void *data)
 {
        int umode;
+       dev_t rdev;
        struct v9fs_inode *v9inode = V9FS_I(inode);
        struct p9_wstat *st = (struct p9_wstat *)data;
        struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode);
 
-       umode = p9mode2unixmode(v9ses, st->mode);
+       umode = p9mode2unixmode(v9ses, st, &rdev);
        /* don't match inode of different type */
        if ((inode->i_mode & S_IFMT) != (umode & S_IFMT))
                return 0;
@@ -473,6 +493,7 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
                                   struct p9_wstat *st,
                                   int new)
 {
+       dev_t rdev;
        int retval, umode;
        unsigned long i_ino;
        struct inode *inode;
@@ -496,8 +517,8 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
         * later.
         */
        inode->i_ino = i_ino;
-       umode = p9mode2unixmode(v9ses, st->mode);
-       retval = v9fs_init_inode(v9ses, inode, umode);
+       umode = p9mode2unixmode(v9ses, st, &rdev);
+       retval = v9fs_init_inode(v9ses, inode, umode, rdev);
        if (retval)
                goto error;
 
@@ -531,6 +552,19 @@ v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid,
        return inode;
 }
 
+/**
+ * v9fs_at_to_dotl_flags- convert Linux specific AT flags to
+ * plan 9 AT flag.
+ * @flags: flags to convert
+ */
+static int v9fs_at_to_dotl_flags(int flags)
+{
+       int rflags = 0;
+       if (flags & AT_REMOVEDIR)
+               rflags |= P9_DOTL_AT_REMOVEDIR;
+       return rflags;
+}
+
 /**
  * v9fs_remove - helper function to remove files and directories
  * @dir: directory inode that is being deleted
@@ -558,7 +592,8 @@ static int v9fs_remove(struct inode *dir, struct dentry *dentry, int flags)
                return retval;
        }
        if (v9fs_proto_dotl(v9ses))
-               retval = p9_client_unlinkat(dfid, dentry->d_name.name, flags);
+               retval = p9_client_unlinkat(dfid, dentry->d_name.name,
+                                           v9fs_at_to_dotl_flags(flags));
        if (retval == -EOPNOTSUPP) {
                /* Try the one based on path */
                v9fid = v9fs_fid_clone(dentry);
@@ -645,13 +680,11 @@ v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir,
                P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err);
                goto error;
        }
-       d_instantiate(dentry, inode);
        err = v9fs_fid_add(dentry, fid);
        if (err < 0)
                goto error;
-
+       d_instantiate(dentry, inode);
        return ofid;
-
 error:
        if (ofid)
                p9_client_clunk(ofid);
@@ -792,6 +825,7 @@ static int v9fs_vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
 struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
                                      struct nameidata *nameidata)
 {
+       struct dentry *res;
        struct super_block *sb;
        struct v9fs_session_info *v9ses;
        struct p9_fid *dfid, *fid;
@@ -823,22 +857,35 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
 
                return ERR_PTR(result);
        }
-
-       inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb);
+       /*
+        * Make sure we don't use a wrong inode due to parallel
+        * unlink. For cached mode create calls request for new
+        * inode. But with cache disabled, lookup should do this.
+        */
+       if (v9ses->cache)
+               inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb);
+       else
+               inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
        if (IS_ERR(inode)) {
                result = PTR_ERR(inode);
                inode = NULL;
                goto error;
        }
-
        result = v9fs_fid_add(dentry, fid);
        if (result < 0)
                goto error_iput;
-
 inst_out:
-       d_add(dentry, inode);
-       return NULL;
-
+       /*
+        * If we had a rename on the server and a parallel lookup
+        * for the new name, then make sure we instantiate with
+        * the new name. ie look up for a/b, while on server somebody
+        * moved b under k and client parallely did a lookup for
+        * k/b.
+        */
+       res = d_materialise_unique(dentry, inode);
+       if (!IS_ERR(res))
+               return res;
+       result = PTR_ERR(res);
 error_iput:
        iput(inode);
 error:
@@ -1002,7 +1049,7 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
                return PTR_ERR(st);
 
        v9fs_stat2inode(st, dentry->d_inode, dentry->d_inode->i_sb);
-               generic_fillattr(dentry->d_inode, stat);
+       generic_fillattr(dentry->d_inode, stat);
 
        p9stat_free(st);
        kfree(st);
@@ -1086,6 +1133,7 @@ void
 v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
        struct super_block *sb)
 {
+       mode_t mode;
        char ext[32];
        char tag_name[14];
        unsigned int i_nlink;
@@ -1121,31 +1169,9 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
                                inode->i_nlink = i_nlink;
                }
        }
-       inode->i_mode = p9mode2unixmode(v9ses, stat->mode);
-       if ((S_ISBLK(inode->i_mode)) || (S_ISCHR(inode->i_mode))) {
-               char type = 0;
-               int major = -1;
-               int minor = -1;
-
-               strncpy(ext, stat->extension, sizeof(ext));
-               sscanf(ext, "%c %u %u", &type, &major, &minor);
-               switch (type) {
-               case 'c':
-                       inode->i_mode &= ~S_IFBLK;
-                       inode->i_mode |= S_IFCHR;
-                       break;
-               case 'b':
-                       break;
-               default:
-                       P9_DPRINTK(P9_DEBUG_ERROR,
-                               "Unknown special type %c %s\n", type,
-                               stat->extension);
-               };
-               inode->i_rdev = MKDEV(major, minor);
-               init_special_inode(inode, inode->i_mode, inode->i_rdev);
-       } else
-               inode->i_rdev = 0;
-
+       mode = stat->mode & S_IALLUGO;
+       mode |= inode->i_mode & ~S_IALLUGO;
+       inode->i_mode = mode;
        i_size_write(inode, stat->length);
 
        /* not real number of blocks, but 512 byte ones ... */
@@ -1411,6 +1437,8 @@ v9fs_vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev)
 
 int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
 {
+       int umode;
+       dev_t rdev;
        loff_t i_size;
        struct p9_wstat *st;
        struct v9fs_session_info *v9ses;
@@ -1419,6 +1447,12 @@ int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
        st = p9_client_stat(fid);
        if (IS_ERR(st))
                return PTR_ERR(st);
+       /*
+        * Don't update inode if the file type is different
+        */
+       umode = p9mode2unixmode(v9ses, st, &rdev);
+       if ((inode->i_mode & S_IFMT) != (umode & S_IFMT))
+               goto out;
 
        spin_lock(&inode->i_lock);
        /*
@@ -1430,6 +1464,7 @@ int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
        if (v9ses->cache)
                inode->i_size = i_size;
        spin_unlock(&inode->i_lock);
+out:
        p9stat_free(st);
        kfree(st);
        return 0;
index b6c8ed205192e5ab34f1b8ae15323dcc01fdec34..aded79fcd5cfdadc359929f3b39fad1f92ff2001 100644 (file)
@@ -153,7 +153,8 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
         * later.
         */
        inode->i_ino = i_ino;
-       retval = v9fs_init_inode(v9ses, inode, st->st_mode);
+       retval = v9fs_init_inode(v9ses, inode,
+                                st->st_mode, new_decode_dev(st->st_rdev));
        if (retval)
                goto error;
 
@@ -190,6 +191,58 @@ v9fs_inode_from_fid_dotl(struct v9fs_session_info *v9ses, struct p9_fid *fid,
        return inode;
 }
 
+struct dotl_openflag_map {
+       int open_flag;
+       int dotl_flag;
+};
+
+static int v9fs_mapped_dotl_flags(int flags)
+{
+       int i;
+       int rflags = 0;
+       struct dotl_openflag_map dotl_oflag_map[] = {
+               { O_CREAT,      P9_DOTL_CREATE },
+               { O_EXCL,       P9_DOTL_EXCL },
+               { O_NOCTTY,     P9_DOTL_NOCTTY },
+               { O_TRUNC,      P9_DOTL_TRUNC },
+               { O_APPEND,     P9_DOTL_APPEND },
+               { O_NONBLOCK,   P9_DOTL_NONBLOCK },
+               { O_DSYNC,      P9_DOTL_DSYNC },
+               { FASYNC,       P9_DOTL_FASYNC },
+               { O_DIRECT,     P9_DOTL_DIRECT },
+               { O_LARGEFILE,  P9_DOTL_LARGEFILE },
+               { O_DIRECTORY,  P9_DOTL_DIRECTORY },
+               { O_NOFOLLOW,   P9_DOTL_NOFOLLOW },
+               { O_NOATIME,    P9_DOTL_NOATIME },
+               { O_CLOEXEC,    P9_DOTL_CLOEXEC },
+               { O_SYNC,       P9_DOTL_SYNC},
+       };
+       for (i = 0; i < ARRAY_SIZE(dotl_oflag_map); i++) {
+               if (flags & dotl_oflag_map[i].open_flag)
+                       rflags |= dotl_oflag_map[i].dotl_flag;
+       }
+       return rflags;
+}
+
+/**
+ * v9fs_open_to_dotl_flags- convert Linux specific open flags to
+ * plan 9 open flag.
+ * @flags: flags to convert
+ */
+int v9fs_open_to_dotl_flags(int flags)
+{
+       int rflags = 0;
+
+       /*
+        * We have same bits for P9_DOTL_READONLY, P9_DOTL_WRONLY
+        * and P9_DOTL_NOACCESS
+        */
+       rflags |= flags & O_ACCMODE;
+       rflags |= v9fs_mapped_dotl_flags(flags);
+
+       return rflags;
+}
+
 /**
  * v9fs_vfs_create_dotl - VFS hook to create files for 9P2000.L protocol.
  * @dir: directory inode that is being created
@@ -258,7 +311,8 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
                           "Failed to get acl values in creat %d\n", err);
                goto error;
        }
-       err = p9_client_create_dotl(ofid, name, flags, mode, gid, &qid);
+       err = p9_client_create_dotl(ofid, name, v9fs_open_to_dotl_flags(flags),
+                                   mode, gid, &qid);
        if (err < 0) {
                P9_DPRINTK(P9_DEBUG_VFS,
                                "p9_client_open_dotl failed in creat %d\n",
@@ -281,10 +335,10 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
                P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err);
                goto error;
        }
-       d_instantiate(dentry, inode);
        err = v9fs_fid_add(dentry, fid);
        if (err < 0)
                goto error;
+       d_instantiate(dentry, inode);
 
        /* Now set the ACL based on the default value */
        v9fs_set_create_acl(dentry, &dacl, &pacl);
@@ -403,10 +457,10 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
                                err);
                        goto error;
                }
-               d_instantiate(dentry, inode);
                err = v9fs_fid_add(dentry, fid);
                if (err < 0)
                        goto error;
+               d_instantiate(dentry, inode);
                fid = NULL;
        } else {
                /*
@@ -414,7 +468,7 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
                 * inode with stat. We need to get an inode
                 * so that we can set the acl with dentry
                 */
-               inode = v9fs_get_inode(dir->i_sb, mode);
+               inode = v9fs_get_inode(dir->i_sb, mode, 0);
                if (IS_ERR(inode)) {
                        err = PTR_ERR(inode);
                        goto error;
@@ -540,6 +594,7 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
 void
 v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
 {
+       mode_t mode;
        struct v9fs_inode *v9inode = V9FS_I(inode);
 
        if ((stat->st_result_mask & P9_STATS_BASIC) == P9_STATS_BASIC) {
@@ -552,11 +607,10 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
                inode->i_uid = stat->st_uid;
                inode->i_gid = stat->st_gid;
                inode->i_nlink = stat->st_nlink;
-               inode->i_mode = stat->st_mode;
-               inode->i_rdev = new_decode_dev(stat->st_rdev);
 
-               if ((S_ISBLK(inode->i_mode)) || (S_ISCHR(inode->i_mode)))
-                       init_special_inode(inode, inode->i_mode, inode->i_rdev);
+               mode = stat->st_mode & S_IALLUGO;
+               mode |= inode->i_mode & ~S_IALLUGO;
+               inode->i_mode = mode;
 
                i_size_write(inode, stat->st_size);
                inode->i_blocks = stat->st_blocks;
@@ -657,14 +711,14 @@ v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry,
                                        err);
                        goto error;
                }
-               d_instantiate(dentry, inode);
                err = v9fs_fid_add(dentry, fid);
                if (err < 0)
                        goto error;
+               d_instantiate(dentry, inode);
                fid = NULL;
        } else {
                /* Not in cached mode. No need to populate inode with stat */
-               inode = v9fs_get_inode(dir->i_sb, S_IFLNK);
+               inode = v9fs_get_inode(dir->i_sb, S_IFLNK, 0);
                if (IS_ERR(inode)) {
                        err = PTR_ERR(inode);
                        goto error;
@@ -810,17 +864,17 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
                                err);
                        goto error;
                }
-               d_instantiate(dentry, inode);
                err = v9fs_fid_add(dentry, fid);
                if (err < 0)
                        goto error;
+               d_instantiate(dentry, inode);
                fid = NULL;
        } else {
                /*
                 * Not in cached mode. No need to populate inode with stat.
                 * socket syscall returns a fd, so we need instantiate
                 */
-               inode = v9fs_get_inode(dir->i_sb, mode);
+               inode = v9fs_get_inode(dir->i_sb, mode, rdev);
                if (IS_ERR(inode)) {
                        err = PTR_ERR(inode);
                        goto error;
@@ -886,6 +940,11 @@ int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode)
        st = p9_client_getattr_dotl(fid, P9_STATS_ALL);
        if (IS_ERR(st))
                return PTR_ERR(st);
+       /*
+        * Don't update inode if the file type is different
+        */
+       if ((inode->i_mode & S_IFMT) != (st->st_mode & S_IFMT))
+               goto out;
 
        spin_lock(&inode->i_lock);
        /*
@@ -897,6 +956,7 @@ int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode)
        if (v9ses->cache)
                inode->i_size = i_size;
        spin_unlock(&inode->i_lock);
+out:
        kfree(st);
        return 0;
 }
index feef6cdc1fd22d23ba45a8f98f2f21ec0a0dc95a..c70251d47ed196c65594365091c1125f4e86d987 100644 (file)
@@ -149,7 +149,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
        else
                sb->s_d_op = &v9fs_dentry_operations;
 
-       inode = v9fs_get_inode(sb, S_IFDIR | mode);
+       inode = v9fs_get_inode(sb, S_IFDIR | mode, 0);
        if (IS_ERR(inode)) {
                retval = PTR_ERR(inode);
                goto release_sb;
index ff77262e887cb66819f31d781e679eccb2d1975a..95f786ec7f088f1c85b7e82dfd01fe9bb7246fe3 100644 (file)
@@ -1429,6 +1429,11 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
                WARN_ON_ONCE(bdev->bd_holders);
                sync_blockdev(bdev);
                kill_bdev(bdev);
+               /* ->release can cause the old bdi to disappear,
+                * so must switch it out first
+                */
+               bdev_inode_switch_bdi(bdev->bd_inode,
+                                       &default_backing_dev_info);
        }
        if (bdev->bd_contains == bdev) {
                if (disk->fops->release)
@@ -1442,8 +1447,6 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
                disk_put_part(bdev->bd_part);
                bdev->bd_part = NULL;
                bdev->bd_disk = NULL;
-               bdev_inode_switch_bdi(bdev->bd_inode,
-                                       &default_backing_dev_info);
                if (bdev != bdev->bd_contains)
                        victim = bdev->bd_contains;
                bdev->bd_contains = NULL;
index fee028b5332e0d802b26fdc94dab21057b41028a..86c59e16ba74e459ad0c3f29743ace6663f29206 100644 (file)
@@ -1595,7 +1595,7 @@ static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
                r = build_dentry_path(rdentry, ppath, pathlen, ino, freepath);
                dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
                     *ppath);
-       } else if (rpath) {
+       } else if (rpath || rino) {
                *ino = rino;
                *ppath = rpath;
                *pathlen = strlen(rpath);
index d47c5ec7fb1ff0500ebbeb7f403debfa865c87a0..88bacaf385d960fcd1ba98e99049e7a47658abe7 100644 (file)
@@ -813,8 +813,8 @@ static struct dentry *ceph_mount(struct file_system_type *fs_type,
        fsc = create_fs_client(fsopt, opt);
        if (IS_ERR(fsc)) {
                res = ERR_CAST(fsc);
-               kfree(fsopt);
-               kfree(opt);
+               destroy_mount_options(fsopt);
+               ceph_destroy_options(opt);
                goto out_final;
        }
 
index 2826db35dc257bb15241376e3b6611582b2e4c92..b52bc685465f5ecf1257d7982887b1435a7a6c2d 100644 (file)
@@ -727,25 +727,22 @@ static int follow_automount(struct path *path, unsigned flags,
        if ((flags & LOOKUP_NO_AUTOMOUNT) && !(flags & LOOKUP_PARENT))
                return -EISDIR; /* we actually want to stop here */
 
-       /*
-        * We don't want to mount if someone's just doing a stat and they've
-        * set AT_SYMLINK_NOFOLLOW - unless they're stat'ing a directory and
-        * appended a '/' to the name.
+       /* We don't want to mount if someone's just doing a stat -
+        * unless they're stat'ing a directory and appended a '/' to
+        * the name.
+        *
+        * We do, however, want to mount if someone wants to open or
+        * create a file of any type under the mountpoint, wants to
+        * traverse through the mountpoint or wants to open the
+        * mounted directory.  Also, autofs may mark negative dentries
+        * as being automount points.  These will need the attentions
+        * of the daemon to instantiate them before they can be used.
         */
-       if (!(flags & LOOKUP_FOLLOW)) {
-               /* We do, however, want to mount if someone wants to open or
-                * create a file of any type under the mountpoint, wants to
-                * traverse through the mountpoint or wants to open the mounted
-                * directory.
-                * Also, autofs may mark negative dentries as being automount
-                * points.  These will need the attentions of the daemon to
-                * instantiate them before they can be used.
-                */
-               if (!(flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
-                            LOOKUP_OPEN | LOOKUP_CREATE)) &&
-                   path->dentry->d_inode)
-                       return -EISDIR;
-       }
+       if (!(flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
+                    LOOKUP_OPEN | LOOKUP_CREATE)) &&
+           path->dentry->d_inode)
+               return -EISDIR;
+
        current->total_link_count++;
        if (current->total_link_count >= 40)
                return -ELOOP;
index 45174b53437730b350d97658318721579448d20f..feb361e252acfe801d186849430c553e39a4268e 100644 (file)
@@ -335,9 +335,9 @@ void dbg_debugfs_exit_fs(struct ubifs_info *c);
 #define DBGKEY(key)  ((char *)(key))
 #define DBGKEY1(key) ((char *)(key))
 
-#define ubifs_dbg_msg(fmt, ...) do {               \
-       if (0)                                     \
-               pr_debug(fmt "\n", ##__VA_ARGS__); \
+#define ubifs_dbg_msg(fmt, ...) do {                        \
+       if (0)                                              \
+               printk(KERN_DEBUG fmt "\n", ##__VA_ARGS__); \
 } while (0)
 
 #define dbg_dump_stack()
index 245bafdafd5ec78e1461b7cbb5817714a28952ed..c816075c01ceb3772241e17729dabd244be62764 100644 (file)
@@ -944,8 +944,10 @@ extern void perf_pmu_unregister(struct pmu *pmu);
 
 extern int perf_num_counters(void);
 extern const char *perf_pmu_name(void);
-extern void __perf_event_task_sched_in(struct task_struct *task);
-extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
+extern void __perf_event_task_sched_in(struct task_struct *prev,
+                                      struct task_struct *task);
+extern void __perf_event_task_sched_out(struct task_struct *prev,
+                                       struct task_struct *next);
 extern int perf_event_init_task(struct task_struct *child);
 extern void perf_event_exit_task(struct task_struct *child);
 extern void perf_event_free_task(struct task_struct *task);
@@ -1059,17 +1061,20 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
 
 extern struct jump_label_key perf_sched_events;
 
-static inline void perf_event_task_sched_in(struct task_struct *task)
+static inline void perf_event_task_sched_in(struct task_struct *prev,
+                                           struct task_struct *task)
 {
        if (static_branch(&perf_sched_events))
-               __perf_event_task_sched_in(task);
+               __perf_event_task_sched_in(prev, task);
 }
 
-static inline void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next)
+static inline void perf_event_task_sched_out(struct task_struct *prev,
+                                            struct task_struct *next)
 {
        perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
 
-       __perf_event_task_sched_out(task, next);
+       if (static_branch(&perf_sched_events))
+               __perf_event_task_sched_out(prev, next);
 }
 
 extern void perf_event_mmap(struct vm_area_struct *vma);
@@ -1139,10 +1144,11 @@ extern void perf_event_disable(struct perf_event *event);
 extern void perf_event_task_tick(void);
 #else
 static inline void
-perf_event_task_sched_in(struct task_struct *task)                     { }
+perf_event_task_sched_in(struct task_struct *prev,
+                        struct task_struct *task)                      { }
 static inline void
-perf_event_task_sched_out(struct task_struct *task,
-                           struct task_struct *next)                   { }
+perf_event_task_sched_out(struct task_struct *prev,
+                         struct task_struct *next)                     { }
 static inline int perf_event_init_task(struct task_struct *child)      { return 0; }
 static inline void perf_event_exit_task(struct task_struct *child)     { }
 static inline void perf_event_free_task(struct task_struct *task)      { }
index 26f6ea4444e39fedda935b5505439a6de3301f41..b47771aa57180b6d553fbdb9363973f28569fde6 100644 (file)
@@ -123,7 +123,7 @@ struct regulator_bulk_data {
        const char *supply;
        struct regulator *consumer;
 
-       /* Internal use */
+       /* private: Internal use */
        int ret;
 };
 
index 342dcf13d039a8470ddcf4628b55c510c7fb13fe..a6326ef8ade6ccfbf19b615167f9863bc941d663 100644 (file)
@@ -288,6 +288,35 @@ enum p9_perm_t {
        P9_DMSETVTX = 0x00010000,
 };
 
+/* 9p2000.L open flags */
+#define P9_DOTL_RDONLY        00000000
+#define P9_DOTL_WRONLY        00000001
+#define P9_DOTL_RDWR          00000002
+#define P9_DOTL_NOACCESS      00000003
+#define P9_DOTL_CREATE        00000100
+#define P9_DOTL_EXCL          00000200
+#define P9_DOTL_NOCTTY        00000400
+#define P9_DOTL_TRUNC         00001000
+#define P9_DOTL_APPEND        00002000
+#define P9_DOTL_NONBLOCK      00004000
+#define P9_DOTL_DSYNC         00010000
+#define P9_DOTL_FASYNC        00020000
+#define P9_DOTL_DIRECT        00040000
+#define P9_DOTL_LARGEFILE     00100000
+#define P9_DOTL_DIRECTORY     00200000
+#define P9_DOTL_NOFOLLOW      00400000
+#define P9_DOTL_NOATIME       01000000
+#define P9_DOTL_CLOEXEC       02000000
+#define P9_DOTL_SYNC          04000000
+
+/* 9p2000.L at flags */
+#define P9_DOTL_AT_REMOVEDIR           0x200
+
+/* 9p2000.L lock type */
+#define P9_LOCK_TYPE_RDLCK 0
+#define P9_LOCK_TYPE_WRLCK 1
+#define P9_LOCK_TYPE_UNLCK 2
+
 /**
  * enum p9_qid_t - QID types
  * @P9_QTDIR: directory
index 408ae4882d2262d4d8b084701cdb530cb39cbfe3..401d73bd151f3a471d7be694531b0ae49837a00b 100644 (file)
@@ -1744,6 +1744,8 @@ struct wiphy_wowlan_support {
  *     by default for perm_addr. In this case, the mask should be set to
  *     all-zeroes. In this case it is assumed that the device can handle
  *     the same number of arbitrary MAC addresses.
+ * @registered: protects ->resume and ->suspend sysfs callbacks against
+ *     unregister hardware
  * @debugfsdir: debugfs directory used for this wiphy, will be renamed
  *     automatically on wiphy renames
  * @dev: (virtual) struct device for this wiphy
index b8785e26ee1cd28c33a1c0429a49bb515c34c8d2..0f857782d06f45eace0bc2bedb7bb1ccf3bc3512 100644 (file)
@@ -399,14 +399,54 @@ void perf_cgroup_switch(struct task_struct *task, int mode)
        local_irq_restore(flags);
 }
 
-static inline void perf_cgroup_sched_out(struct task_struct *task)
+static inline void perf_cgroup_sched_out(struct task_struct *task,
+                                        struct task_struct *next)
 {
-       perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
+       struct perf_cgroup *cgrp1;
+       struct perf_cgroup *cgrp2 = NULL;
+
+       /*
+        * we come here when we know perf_cgroup_events > 0
+        */
+       cgrp1 = perf_cgroup_from_task(task);
+
+       /*
+        * next is NULL when called from perf_event_enable_on_exec()
+        * that will systematically cause a cgroup_switch()
+        */
+       if (next)
+               cgrp2 = perf_cgroup_from_task(next);
+
+       /*
+        * only schedule out current cgroup events if we know
+        * that we are switching to a different cgroup. Otherwise,
+        * do no touch the cgroup events.
+        */
+       if (cgrp1 != cgrp2)
+               perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
 }
 
-static inline void perf_cgroup_sched_in(struct task_struct *task)
+static inline void perf_cgroup_sched_in(struct task_struct *prev,
+                                       struct task_struct *task)
 {
-       perf_cgroup_switch(task, PERF_CGROUP_SWIN);
+       struct perf_cgroup *cgrp1;
+       struct perf_cgroup *cgrp2 = NULL;
+
+       /*
+        * we come here when we know perf_cgroup_events > 0
+        */
+       cgrp1 = perf_cgroup_from_task(task);
+
+       /* prev can never be NULL */
+       cgrp2 = perf_cgroup_from_task(prev);
+
+       /*
+        * only need to schedule in cgroup events if we are changing
+        * cgroup during ctxsw. Cgroup events were not scheduled
+        * out of ctxsw out if that was not the case.
+        */
+       if (cgrp1 != cgrp2)
+               perf_cgroup_switch(task, PERF_CGROUP_SWIN);
 }
 
 static inline int perf_cgroup_connect(int fd, struct perf_event *event,
@@ -518,11 +558,13 @@ static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
 {
 }
 
-static inline void perf_cgroup_sched_out(struct task_struct *task)
+static inline void perf_cgroup_sched_out(struct task_struct *task,
+                                        struct task_struct *next)
 {
 }
 
-static inline void perf_cgroup_sched_in(struct task_struct *task)
+static inline void perf_cgroup_sched_in(struct task_struct *prev,
+                                       struct task_struct *task)
 {
 }
 
@@ -1988,7 +2030,7 @@ void __perf_event_task_sched_out(struct task_struct *task,
         * cgroup event are system-wide mode only
         */
        if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
-               perf_cgroup_sched_out(task);
+               perf_cgroup_sched_out(task, next);
 }
 
 static void task_ctx_sched_out(struct perf_event_context *ctx)
@@ -2153,7 +2195,8 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
  * accessing the event control register. If a NMI hits, then it will
  * keep the event running.
  */
-void __perf_event_task_sched_in(struct task_struct *task)
+void __perf_event_task_sched_in(struct task_struct *prev,
+                               struct task_struct *task)
 {
        struct perf_event_context *ctx;
        int ctxn;
@@ -2171,7 +2214,7 @@ void __perf_event_task_sched_in(struct task_struct *task)
         * cgroup event are system-wide mode only
         */
        if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
-               perf_cgroup_sched_in(task);
+               perf_cgroup_sched_in(prev, task);
 }
 
 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
@@ -2427,7 +2470,7 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
         * ctxswin cgroup events which are already scheduled
         * in.
         */
-       perf_cgroup_sched_out(current);
+       perf_cgroup_sched_out(current, NULL);
 
        raw_spin_lock(&ctx->lock);
        task_ctx_sched_out(ctx);
@@ -3353,8 +3396,8 @@ static int perf_event_index(struct perf_event *event)
 }
 
 static void calc_timer_values(struct perf_event *event,
-                               u64 *running,
-                               u64 *enabled)
+                               u64 *enabled,
+                               u64 *running)
 {
        u64 now, ctx_time;
 
index ccacdbdecf452bda8769878ca6e558d13ebb4e74..ec5f472bc5b9cec2a5c43ab7f8a054ecca75b39a 100644 (file)
@@ -3065,7 +3065,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
        local_irq_disable();
 #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
-       perf_event_task_sched_in(current);
+       perf_event_task_sched_in(prev, current);
 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
        local_irq_enable();
 #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
@@ -4279,9 +4279,9 @@ pick_next_task(struct rq *rq)
 }
 
 /*
- * schedule() is the main scheduler function.
+ * __schedule() is the main scheduler function.
  */
-asmlinkage void __sched schedule(void)
+static void __sched __schedule(void)
 {
        struct task_struct *prev, *next;
        unsigned long *switch_count;
@@ -4322,16 +4322,6 @@ need_resched:
                                if (to_wakeup)
                                        try_to_wake_up_local(to_wakeup);
                        }
-
-                       /*
-                        * If we are going to sleep and we have plugged IO
-                        * queued, make sure to submit it to avoid deadlocks.
-                        */
-                       if (blk_needs_flush_plug(prev)) {
-                               raw_spin_unlock(&rq->lock);
-                               blk_schedule_flush_plug(prev);
-                               raw_spin_lock(&rq->lock);
-                       }
                }
                switch_count = &prev->nvcsw;
        }
@@ -4369,6 +4359,26 @@ need_resched:
        if (need_resched())
                goto need_resched;
 }
+
+static inline void sched_submit_work(struct task_struct *tsk)
+{
+       if (!tsk->state)
+               return;
+       /*
+        * If we are going to sleep and we have plugged IO queued,
+        * make sure to submit it to avoid deadlocks.
+        */
+       if (blk_needs_flush_plug(tsk))
+               blk_schedule_flush_plug(tsk);
+}
+
+asmlinkage void schedule(void)
+{
+       struct task_struct *tsk = current;
+
+       sched_submit_work(tsk);
+       __schedule();
+}
 EXPORT_SYMBOL(schedule);
 
 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
@@ -4435,7 +4445,7 @@ asmlinkage void __sched notrace preempt_schedule(void)
 
        do {
                add_preempt_count_notrace(PREEMPT_ACTIVE);
-               schedule();
+               __schedule();
                sub_preempt_count_notrace(PREEMPT_ACTIVE);
 
                /*
@@ -4463,7 +4473,7 @@ asmlinkage void __sched preempt_schedule_irq(void)
        do {
                add_preempt_count(PREEMPT_ACTIVE);
                local_irq_enable();
-               schedule();
+               __schedule();
                local_irq_disable();
                sub_preempt_count(PREEMPT_ACTIVE);
 
@@ -5588,7 +5598,7 @@ static inline int should_resched(void)
 static void __cond_resched(void)
 {
        add_preempt_count(PREEMPT_ACTIVE);
-       schedule();
+       __schedule();
        sub_preempt_count(PREEMPT_ACTIVE);
 }
 
@@ -7443,6 +7453,7 @@ static void __sdt_free(const struct cpumask *cpu_map)
                        struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
                        if (sd && (sd->flags & SD_OVERLAP))
                                free_sched_groups(sd->groups, 0);
+                       kfree(*per_cpu_ptr(sdd->sd, j));
                        kfree(*per_cpu_ptr(sdd->sg, j));
                        kfree(*per_cpu_ptr(sdd->sgp, j));
                }
index 59f369f98a04311f5bfa49d01e706d4b12ca97c4..ea5e1a928d5b08c04321ab1b39a87c486118b1d2 100644 (file)
@@ -441,6 +441,8 @@ static int alarm_timer_create(struct k_itimer *new_timer)
 static void alarm_timer_get(struct k_itimer *timr,
                                struct itimerspec *cur_setting)
 {
+       memset(cur_setting, 0, sizeof(struct itimerspec));
+
        cur_setting->it_interval =
                        ktime_to_timespec(timr->it.alarmtimer.period);
        cur_setting->it_value =
@@ -479,11 +481,17 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
        if (!rtcdev)
                return -ENOTSUPP;
 
-       /* Save old values */
-       old_setting->it_interval =
-                       ktime_to_timespec(timr->it.alarmtimer.period);
-       old_setting->it_value =
-                       ktime_to_timespec(timr->it.alarmtimer.node.expires);
+       /*
+        * XXX HACK! Currently we can DOS a system if the interval
+        * period on alarmtimers is too small. Cap the interval here
+        * to 100us and solve this properly in a future patch! -jstultz
+        */
+       if ((new_setting->it_interval.tv_sec == 0) &&
+                       (new_setting->it_interval.tv_nsec < 100000))
+               new_setting->it_interval.tv_nsec = 100000;
+
+       if (old_setting)
+               alarm_timer_get(timr, old_setting);
 
        /* If the timer was already set, cancel it */
        alarm_cancel(&timr->it.alarmtimer);
index 175b5135bdcf524d324c73298cd17da0819c9a1e..e317583fcc7314d877b9a899df24eb2a475fe9ee 100644 (file)
@@ -263,7 +263,6 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
 {
        int in, out, inp, outp;
        struct virtio_chan *chan = client->trans;
-       char *rdata = (char *)req->rc+sizeof(struct p9_fcall);
        unsigned long flags;
        size_t pdata_off = 0;
        struct trans_rpage_info *rpinfo = NULL;
@@ -346,7 +345,8 @@ req_retry_pinned:
                 * Arrange in such a way that server places header in the
                 * alloced memory and payload onto the user buffer.
                 */
-               inp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, rdata, 11);
+               inp = pack_sg_list(chan->sg, out,
+                                  VIRTQUEUE_NUM, req->rc->sdata, 11);
                /*
                 * Running executables in the filesystem may result in
                 * a read request with kernel buffer as opposed to user buffer.
@@ -366,8 +366,8 @@ req_retry_pinned:
                }
                in += inp;
        } else {
-               in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, rdata,
-                               req->rc->capacity);
+               in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM,
+                                 req->rc->sdata, req->rc->capacity);
        }
 
        err = virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc);
@@ -592,7 +592,14 @@ static struct p9_trans_module p9_virtio_trans = {
        .close = p9_virtio_close,
        .request = p9_virtio_request,
        .cancel = p9_virtio_cancel,
-       .maxsize = PAGE_SIZE*VIRTQUEUE_NUM,
+
+       /*
+        * We leave one entry for input and one entry for response
+        * headers. We also skip one more entry to accomodate, address
+        * that are not at page boundary, that can result in an extra
+        * page in zero copy.
+        */
+       .maxsize = PAGE_SIZE * (VIRTQUEUE_NUM - 3),
        .pref = P9_TRANS_PREF_PAYLOAD_SEP,
        .def = 0,
        .owner = THIS_MODULE,
index d5f2d97ac05caf40124fe4f628efe77a514d8e55..1f4cb30a42c558e89cfa696af9efcacc4209c0d6 100644 (file)
@@ -7,27 +7,37 @@
 
 #include <linux/ceph/msgpool.h>
 
-static void *alloc_fn(gfp_t gfp_mask, void *arg)
+static void *msgpool_alloc(gfp_t gfp_mask, void *arg)
 {
        struct ceph_msgpool *pool = arg;
-       void *p;
+       struct ceph_msg *msg;
 
-       p = ceph_msg_new(0, pool->front_len, gfp_mask);
-       if (!p)
-               pr_err("msgpool %s alloc failed\n", pool->name);
-       return p;
+       msg = ceph_msg_new(0, pool->front_len, gfp_mask);
+       if (!msg) {
+               dout("msgpool_alloc %s failed\n", pool->name);
+       } else {
+               dout("msgpool_alloc %s %p\n", pool->name, msg);
+               msg->pool = pool;
+       }
+       return msg;
 }
 
-static void free_fn(void *element, void *arg)
+static void msgpool_free(void *element, void *arg)
 {
-       ceph_msg_put(element);
+       struct ceph_msgpool *pool = arg;
+       struct ceph_msg *msg = element;
+
+       dout("msgpool_release %s %p\n", pool->name, msg);
+       msg->pool = NULL;
+       ceph_msg_put(msg);
 }
 
 int ceph_msgpool_init(struct ceph_msgpool *pool,
                      int front_len, int size, bool blocking, const char *name)
 {
+       dout("msgpool %s init\n", name);
        pool->front_len = front_len;
-       pool->pool = mempool_create(size, alloc_fn, free_fn, pool);
+       pool->pool = mempool_create(size, msgpool_alloc, msgpool_free, pool);
        if (!pool->pool)
                return -ENOMEM;
        pool->name = name;
@@ -36,14 +46,17 @@ int ceph_msgpool_init(struct ceph_msgpool *pool,
 
 void ceph_msgpool_destroy(struct ceph_msgpool *pool)
 {
+       dout("msgpool %s destroy\n", pool->name);
        mempool_destroy(pool->pool);
 }
 
 struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool,
                                  int front_len)
 {
+       struct ceph_msg *msg;
+
        if (front_len > pool->front_len) {
-               pr_err("msgpool_get pool %s need front %d, pool size is %d\n",
+               dout("msgpool_get %s need front %d, pool size is %d\n",
                       pool->name, front_len, pool->front_len);
                WARN_ON(1);
 
@@ -51,14 +64,19 @@ struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool,
                return ceph_msg_new(0, front_len, GFP_NOFS);
        }
 
-       return mempool_alloc(pool->pool, GFP_NOFS);
+       msg = mempool_alloc(pool->pool, GFP_NOFS);
+       dout("msgpool_get %s %p\n", pool->name, msg);
+       return msg;
 }
 
 void ceph_msgpool_put(struct ceph_msgpool *pool, struct ceph_msg *msg)
 {
+       dout("msgpool_put %s %p\n", pool->name, msg);
+
        /* reset msg front_len; user may have changed it */
        msg->front.iov_len = pool->front_len;
        msg->hdr.front_len = cpu_to_le32(pool->front_len);
 
        kref_init(&msg->kref);  /* retake single ref */
+       mempool_free(msg, pool->pool);
 }
index ce310eee708d9f76c0a631b32edb6eb046cff153..16836a7df7a6364c5f6823975d559ff3b30106a2 100644 (file)
@@ -685,6 +685,18 @@ static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
        put_osd(osd);
 }
 
+static void remove_all_osds(struct ceph_osd_client *osdc)
+{
+       dout("__remove_old_osds %p\n", osdc);
+       mutex_lock(&osdc->request_mutex);
+       while (!RB_EMPTY_ROOT(&osdc->osds)) {
+               struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
+                                               struct ceph_osd, o_node);
+               __remove_osd(osdc, osd);
+       }
+       mutex_unlock(&osdc->request_mutex);
+}
+
 static void __move_osd_to_lru(struct ceph_osd_client *osdc,
                              struct ceph_osd *osd)
 {
@@ -701,14 +713,14 @@ static void __remove_osd_from_lru(struct ceph_osd *osd)
                list_del_init(&osd->o_osd_lru);
 }
 
-static void remove_old_osds(struct ceph_osd_client *osdc, int remove_all)
+static void remove_old_osds(struct ceph_osd_client *osdc)
 {
        struct ceph_osd *osd, *nosd;
 
        dout("__remove_old_osds %p\n", osdc);
        mutex_lock(&osdc->request_mutex);
        list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
-               if (!remove_all && time_before(jiffies, osd->lru_ttl))
+               if (time_before(jiffies, osd->lru_ttl))
                        break;
                __remove_osd(osdc, osd);
        }
@@ -751,6 +763,7 @@ static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new)
        struct rb_node *parent = NULL;
        struct ceph_osd *osd = NULL;
 
+       dout("__insert_osd %p osd%d\n", new, new->o_osd);
        while (*p) {
                parent = *p;
                osd = rb_entry(parent, struct ceph_osd, o_node);
@@ -1144,7 +1157,7 @@ static void handle_osds_timeout(struct work_struct *work)
 
        dout("osds timeout\n");
        down_read(&osdc->map_sem);
-       remove_old_osds(osdc, 0);
+       remove_old_osds(osdc);
        up_read(&osdc->map_sem);
 
        schedule_delayed_work(&osdc->osds_timeout_work,
@@ -1862,8 +1875,7 @@ void ceph_osdc_stop(struct ceph_osd_client *osdc)
                ceph_osdmap_destroy(osdc->osdmap);
                osdc->osdmap = NULL;
        }
-       remove_old_osds(osdc, 1);
-       WARN_ON(!RB_EMPTY_ROOT(&osdc->osds));
+       remove_all_osds(osdc);
        mempool_destroy(osdc->req_mempool);
        ceph_msgpool_destroy(&osdc->msgpool_op);
        ceph_msgpool_destroy(&osdc->msgpool_op_reply);