]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'akpm-current/current'
authorStephen Rothwell <sfr@canb.auug.org.au>
Mon, 2 Nov 2015 03:45:18 +0000 (14:45 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Mon, 2 Nov 2015 03:45:26 +0000 (14:45 +1100)
99 files changed:
1  2 
Documentation/filesystems/proc.txt
Documentation/kernel-parameters.txt
MAINTAINERS
arch/arc/mm/cache.c
arch/arm/mm/dma-mapping.c
arch/arm/xen/mm.c
arch/arm64/include/asm/pgtable.h
arch/arm64/mm/dma-mapping.c
arch/mips/mm/tlbex.c
arch/powerpc/include/asm/pgtable-ppc64.h
arch/powerpc/mm/hugetlbpage.c
arch/powerpc/mm/numa.c
arch/powerpc/sysdev/fsl_pci.c
arch/s390/include/asm/pgtable.h
arch/x86/Kconfig
arch/x86/entry/syscalls/syscall_32.tbl
arch/x86/include/asm/pgtable.h
arch/x86/include/asm/pgtable_types.h
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/pci-dma.c
arch/x86/kernel/smpboot.c
arch/x86/mm/gup.c
block/blk-core.c
block/blk-mq-tag.c
block/blk-mq.c
block/genhd.c
drivers/block/drbd/drbd_bitmap.c
drivers/block/nbd.c
drivers/block/pktcdvd.c
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/i915/i915_gem.c
drivers/infiniband/core/sa_query.c
drivers/iommu/amd_iommu.c
drivers/iommu/intel-iommu.c
drivers/md/dm-crypt.c
drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c
drivers/media/pci/solo6x10/solo6x10-v4l2.c
drivers/media/pci/tw68/tw68-video.c
drivers/misc/vmw_balloon.c
drivers/mtd/mtdcore.c
drivers/nvme/host/pci.c
drivers/staging/android/ion/ion_system_heap.c
drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
drivers/staging/rdma/hfi1/init.c
drivers/staging/rdma/ipath/ipath_file_ops.c
drivers/usb/gadget/function/f_mass_storage.c
drivers/usb/host/u132-hcd.c
fs/9p/vfs_file.c
fs/cifs/file.c
fs/coredump.c
fs/direct-io.c
fs/ext4/inode.c
fs/ext4/readpage.c
fs/ext4/super.c
fs/fs-writeback.c
fs/jffs2/wbuf.c
fs/mpage.c
fs/namei.c
fs/nfs/file.c
fs/ocfs2/cluster/heartbeat.c
fs/proc/array.c
fs/proc/task_mmu.c
fs/xfs/xfs_qm.c
include/asm-generic/pgtable.h
include/drm/drmP.h
include/linux/compiler-gcc.h
include/linux/compiler.h
include/linux/fs.h
include/linux/hugetlb_cgroup.h
include/linux/memcontrol.h
include/linux/sched.h
include/linux/skbuff.h
include/net/sock.h
kernel/audit.c
kernel/cgroup.c
kernel/cpuset.c
kernel/fork.c
kernel/futex.c
kernel/kexec_core.c
kernel/params.c
kernel/sysctl.c
lib/Kconfig.debug
lib/Makefile
lib/dma-debug.c
lib/kobject.c
mm/backing-dev.c
mm/failslab.c
mm/huge_memory.c
mm/memcontrol.c
mm/memory_hotplug.c
mm/page_alloc.c
mm/pgtable-generic.c
mm/vmscan.c
net/core/sock.c
net/netlink/af_netlink.c
net/openvswitch/flow.c
net/rds/ib_recv.c
net/rxrpc/ar-connection.c
tools/testing/selftests/Makefile

Simple merge
Simple merge
diff --cc MAINTAINERS
Simple merge
index ff7ff6cbb8112408c05a38a2f8e001265d5d3726,875ac2e918c55d7fcc17b738c8dcfdeafacb8d79..b65f797e9ad6723abd7c38bba09e382df52450b4
@@@ -617,10 -582,10 +617,10 @@@ void flush_dcache_page(struct page *pag
         */
        if (!mapping_mapped(mapping)) {
                clear_bit(PG_dc_clean, &page->flags);
-       } else if (page_mapped(page)) {
+       } else if (page_mapcount(page)) {
  
                /* kernel reading from page with U-mapping */
 -              unsigned long paddr = (unsigned long)page_address(page);
 +              phys_addr_t paddr = (unsigned long)page_address(page);
                unsigned long vaddr = page->index << PAGE_CACHE_SHIFT;
  
                if (addr_not_cache_congruent(paddr, vaddr))
@@@ -853,12 -818,9 +853,12 @@@ void copy_user_highpage(struct page *to
         *
         * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
         * equally valid for SRC page as well
 +       *
 +       * For !VIPT cache, all of this gets compiled out as
 +       * addr_not_cache_congruent() is 0
         */
-       if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
+       if (page_mapcount(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
 -              __flush_dcache_page(kfrom, u_vaddr);
 +              __flush_dcache_page((unsigned long)kfrom, u_vaddr);
                clean_src_k_mappings = 1;
        }
  
Simple merge
Simple merge
Simple merge
index 6320361d8d4c703cf4072ba2e47fdf7884c7f779,478234383c2cd8c90087eca0031b425a3d01b606..131a199114b405e8403f05137e560a2b317f4941
@@@ -533,460 -533,3 +533,460 @@@ static int __init dma_debug_do_init(voi
        return 0;
  }
  fs_initcall(dma_debug_do_init);
-       if (gfp & __GFP_WAIT) {
 +
 +
 +#ifdef CONFIG_IOMMU_DMA
 +#include <linux/dma-iommu.h>
 +#include <linux/platform_device.h>
 +#include <linux/amba/bus.h>
 +
 +/* Thankfully, all cache ops are by VA so we can ignore phys here */
 +static void flush_page(struct device *dev, const void *virt, phys_addr_t phys)
 +{
 +      __dma_flush_range(virt, virt + PAGE_SIZE);
 +}
 +
 +static void *__iommu_alloc_attrs(struct device *dev, size_t size,
 +                               dma_addr_t *handle, gfp_t gfp,
 +                               struct dma_attrs *attrs)
 +{
 +      bool coherent = is_device_dma_coherent(dev);
 +      int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent);
 +      void *addr;
 +
 +      if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n"))
 +              return NULL;
 +      /*
 +       * Some drivers rely on this, and we probably don't want the
 +       * possibility of stale kernel data being read by devices anyway.
 +       */
 +      gfp |= __GFP_ZERO;
 +
++      if (gfpflags_allow_blocking(gfp)) {
 +              struct page **pages;
 +              pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
 +
 +              pages = iommu_dma_alloc(dev, size, gfp, ioprot, handle,
 +                                      flush_page);
 +              if (!pages)
 +                      return NULL;
 +
 +              addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
 +                                            __builtin_return_address(0));
 +              if (!addr)
 +                      iommu_dma_free(dev, pages, size, handle);
 +      } else {
 +              struct page *page;
 +              /*
 +               * In atomic context we can't remap anything, so we'll only
 +               * get the virtually contiguous buffer we need by way of a
 +               * physically contiguous allocation.
 +               */
 +              if (coherent) {
 +                      page = alloc_pages(gfp, get_order(size));
 +                      addr = page ? page_address(page) : NULL;
 +              } else {
 +                      addr = __alloc_from_pool(size, &page, gfp);
 +              }
 +              if (!addr)
 +                      return NULL;
 +
 +              *handle = iommu_dma_map_page(dev, page, 0, size, ioprot);
 +              if (iommu_dma_mapping_error(dev, *handle)) {
 +                      if (coherent)
 +                              __free_pages(page, get_order(size));
 +                      else
 +                              __free_from_pool(addr, size);
 +                      addr = NULL;
 +              }
 +      }
 +      return addr;
 +}
 +
 +static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
 +                             dma_addr_t handle, struct dma_attrs *attrs)
 +{
 +      /*
 +       * @cpu_addr will be one of 3 things depending on how it was allocated:
 +       * - A remapped array of pages from iommu_dma_alloc(), for all
 +       *   non-atomic allocations.
 +       * - A non-cacheable alias from the atomic pool, for atomic
 +       *   allocations by non-coherent devices.
 +       * - A normal lowmem address, for atomic allocations by
 +       *   coherent devices.
 +       * Hence how dodgy the below logic looks...
 +       */
 +      if (__in_atomic_pool(cpu_addr, size)) {
 +              iommu_dma_unmap_page(dev, handle, size, 0, NULL);
 +              __free_from_pool(cpu_addr, size);
 +      } else if (is_vmalloc_addr(cpu_addr)){
 +              struct vm_struct *area = find_vm_area(cpu_addr);
 +
 +              if (WARN_ON(!area || !area->pages))
 +                      return;
 +              iommu_dma_free(dev, area->pages, size, &handle);
 +              dma_common_free_remap(cpu_addr, size, VM_USERMAP);
 +      } else {
 +              iommu_dma_unmap_page(dev, handle, size, 0, NULL);
 +              __free_pages(virt_to_page(cpu_addr), get_order(size));
 +      }
 +}
 +
 +static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
 +                            void *cpu_addr, dma_addr_t dma_addr, size_t size,
 +                            struct dma_attrs *attrs)
 +{
 +      struct vm_struct *area;
 +      int ret;
 +
 +      vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
 +                                           is_device_dma_coherent(dev));
 +
 +      if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
 +              return ret;
 +
 +      area = find_vm_area(cpu_addr);
 +      if (WARN_ON(!area || !area->pages))
 +              return -ENXIO;
 +
 +      return iommu_dma_mmap(area->pages, size, vma);
 +}
 +
 +static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
 +                             void *cpu_addr, dma_addr_t dma_addr,
 +                             size_t size, struct dma_attrs *attrs)
 +{
 +      unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
 +      struct vm_struct *area = find_vm_area(cpu_addr);
 +
 +      if (WARN_ON(!area || !area->pages))
 +              return -ENXIO;
 +
 +      return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size,
 +                                       GFP_KERNEL);
 +}
 +
 +static void __iommu_sync_single_for_cpu(struct device *dev,
 +                                      dma_addr_t dev_addr, size_t size,
 +                                      enum dma_data_direction dir)
 +{
 +      phys_addr_t phys;
 +
 +      if (is_device_dma_coherent(dev))
 +              return;
 +
 +      phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
 +      __dma_unmap_area(phys_to_virt(phys), size, dir);
 +}
 +
 +static void __iommu_sync_single_for_device(struct device *dev,
 +                                         dma_addr_t dev_addr, size_t size,
 +                                         enum dma_data_direction dir)
 +{
 +      phys_addr_t phys;
 +
 +      if (is_device_dma_coherent(dev))
 +              return;
 +
 +      phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
 +      __dma_map_area(phys_to_virt(phys), size, dir);
 +}
 +
 +static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
 +                                 unsigned long offset, size_t size,
 +                                 enum dma_data_direction dir,
 +                                 struct dma_attrs *attrs)
 +{
 +      bool coherent = is_device_dma_coherent(dev);
 +      int prot = dma_direction_to_prot(dir, coherent);
 +      dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
 +
 +      if (!iommu_dma_mapping_error(dev, dev_addr) &&
 +          !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
 +              __iommu_sync_single_for_device(dev, dev_addr, size, dir);
 +
 +      return dev_addr;
 +}
 +
 +static void __iommu_unmap_page(struct device *dev, dma_addr_t dev_addr,
 +                             size_t size, enum dma_data_direction dir,
 +                             struct dma_attrs *attrs)
 +{
 +      if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
 +              __iommu_sync_single_for_cpu(dev, dev_addr, size, dir);
 +
 +      iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs);
 +}
 +
 +static void __iommu_sync_sg_for_cpu(struct device *dev,
 +                                  struct scatterlist *sgl, int nelems,
 +                                  enum dma_data_direction dir)
 +{
 +      struct scatterlist *sg;
 +      int i;
 +
 +      if (is_device_dma_coherent(dev))
 +              return;
 +
 +      for_each_sg(sgl, sg, nelems, i)
 +              __dma_unmap_area(sg_virt(sg), sg->length, dir);
 +}
 +
 +static void __iommu_sync_sg_for_device(struct device *dev,
 +                                     struct scatterlist *sgl, int nelems,
 +                                     enum dma_data_direction dir)
 +{
 +      struct scatterlist *sg;
 +      int i;
 +
 +      if (is_device_dma_coherent(dev))
 +              return;
 +
 +      for_each_sg(sgl, sg, nelems, i)
 +              __dma_map_area(sg_virt(sg), sg->length, dir);
 +}
 +
 +static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
 +                              int nelems, enum dma_data_direction dir,
 +                              struct dma_attrs *attrs)
 +{
 +      bool coherent = is_device_dma_coherent(dev);
 +
 +      if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
 +              __iommu_sync_sg_for_device(dev, sgl, nelems, dir);
 +
 +      return iommu_dma_map_sg(dev, sgl, nelems,
 +                      dma_direction_to_prot(dir, coherent));
 +}
 +
 +static void __iommu_unmap_sg_attrs(struct device *dev,
 +                                 struct scatterlist *sgl, int nelems,
 +                                 enum dma_data_direction dir,
 +                                 struct dma_attrs *attrs)
 +{
 +      if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
 +              __iommu_sync_sg_for_cpu(dev, sgl, nelems, dir);
 +
 +      iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs);
 +}
 +
 +static struct dma_map_ops iommu_dma_ops = {
 +      .alloc = __iommu_alloc_attrs,
 +      .free = __iommu_free_attrs,
 +      .mmap = __iommu_mmap_attrs,
 +      .get_sgtable = __iommu_get_sgtable,
 +      .map_page = __iommu_map_page,
 +      .unmap_page = __iommu_unmap_page,
 +      .map_sg = __iommu_map_sg_attrs,
 +      .unmap_sg = __iommu_unmap_sg_attrs,
 +      .sync_single_for_cpu = __iommu_sync_single_for_cpu,
 +      .sync_single_for_device = __iommu_sync_single_for_device,
 +      .sync_sg_for_cpu = __iommu_sync_sg_for_cpu,
 +      .sync_sg_for_device = __iommu_sync_sg_for_device,
 +      .dma_supported = iommu_dma_supported,
 +      .mapping_error = iommu_dma_mapping_error,
 +};
 +
 +/*
 + * TODO: Right now __iommu_setup_dma_ops() gets called too early to do
 + * everything it needs to - the device is only partially created and the
 + * IOMMU driver hasn't seen it yet, so it can't have a group. Thus we
 + * need this delayed attachment dance. Once IOMMU probe ordering is sorted
 + * to move the arch_setup_dma_ops() call later, all the notifier bits below
 + * become unnecessary, and will go away.
 + */
 +struct iommu_dma_notifier_data {
 +      struct list_head list;
 +      struct device *dev;
 +      const struct iommu_ops *ops;
 +      u64 dma_base;
 +      u64 size;
 +};
 +static LIST_HEAD(iommu_dma_masters);
 +static DEFINE_MUTEX(iommu_dma_notifier_lock);
 +
 +/*
 + * Temporarily "borrow" a domain feature flag to to tell if we had to resort
 + * to creating our own domain here, in case we need to clean it up again.
 + */
 +#define __IOMMU_DOMAIN_FAKE_DEFAULT           (1U << 31)
 +
 +static bool do_iommu_attach(struct device *dev, const struct iommu_ops *ops,
 +                         u64 dma_base, u64 size)
 +{
 +      struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
 +
 +      /*
 +       * Best case: The device is either part of a group which was
 +       * already attached to a domain in a previous call, or it's
 +       * been put in a default DMA domain by the IOMMU core.
 +       */
 +      if (!domain) {
 +              /*
 +               * Urgh. The IOMMU core isn't going to do default domains
 +               * for non-PCI devices anyway, until it has some means of
 +               * abstracting the entirely implementation-specific
 +               * sideband data/SoC topology/unicorn dust that may or
 +               * may not differentiate upstream masters.
 +               * So until then, HORRIBLE HACKS!
 +               */
 +              domain = ops->domain_alloc(IOMMU_DOMAIN_DMA);
 +              if (!domain)
 +                      goto out_no_domain;
 +
 +              domain->ops = ops;
 +              domain->type = IOMMU_DOMAIN_DMA | __IOMMU_DOMAIN_FAKE_DEFAULT;
 +
 +              if (iommu_attach_device(domain, dev))
 +                      goto out_put_domain;
 +      }
 +
 +      if (iommu_dma_init_domain(domain, dma_base, size))
 +              goto out_detach;
 +
 +      dev->archdata.dma_ops = &iommu_dma_ops;
 +      return true;
 +
 +out_detach:
 +      iommu_detach_device(domain, dev);
 +out_put_domain:
 +      if (domain->type & __IOMMU_DOMAIN_FAKE_DEFAULT)
 +              iommu_domain_free(domain);
 +out_no_domain:
 +      pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
 +              dev_name(dev));
 +      return false;
 +}
 +
 +static void queue_iommu_attach(struct device *dev, const struct iommu_ops *ops,
 +                            u64 dma_base, u64 size)
 +{
 +      struct iommu_dma_notifier_data *iommudata;
 +
 +      iommudata = kzalloc(sizeof(*iommudata), GFP_KERNEL);
 +      if (!iommudata)
 +              return;
 +
 +      iommudata->dev = dev;
 +      iommudata->ops = ops;
 +      iommudata->dma_base = dma_base;
 +      iommudata->size = size;
 +
 +      mutex_lock(&iommu_dma_notifier_lock);
 +      list_add(&iommudata->list, &iommu_dma_masters);
 +      mutex_unlock(&iommu_dma_notifier_lock);
 +}
 +
 +static int __iommu_attach_notifier(struct notifier_block *nb,
 +                                 unsigned long action, void *data)
 +{
 +      struct iommu_dma_notifier_data *master, *tmp;
 +
 +      if (action != BUS_NOTIFY_ADD_DEVICE)
 +              return 0;
 +
 +      mutex_lock(&iommu_dma_notifier_lock);
 +      list_for_each_entry_safe(master, tmp, &iommu_dma_masters, list) {
 +              if (do_iommu_attach(master->dev, master->ops,
 +                              master->dma_base, master->size)) {
 +                      list_del(&master->list);
 +                      kfree(master);
 +              }
 +      }
 +      mutex_unlock(&iommu_dma_notifier_lock);
 +      return 0;
 +}
 +
 +static int register_iommu_dma_ops_notifier(struct bus_type *bus)
 +{
 +      struct notifier_block *nb = kzalloc(sizeof(*nb), GFP_KERNEL);
 +      int ret;
 +
 +      if (!nb)
 +              return -ENOMEM;
 +      /*
 +       * The device must be attached to a domain before the driver probe
 +       * routine gets a chance to start allocating DMA buffers. However,
 +       * the IOMMU driver also needs a chance to configure the iommu_group
 +       * via its add_device callback first, so we need to make the attach
 +       * happen between those two points. Since the IOMMU core uses a bus
 +       * notifier with default priority for add_device, do the same but
 +       * with a lower priority to ensure the appropriate ordering.
 +       */
 +      nb->notifier_call = __iommu_attach_notifier;
 +      nb->priority = -100;
 +
 +      ret = bus_register_notifier(bus, nb);
 +      if (ret) {
 +              pr_warn("Failed to register DMA domain notifier; IOMMU DMA ops unavailable on bus '%s'\n",
 +                      bus->name);
 +              kfree(nb);
 +      }
 +      return ret;
 +}
 +
 +static int __init __iommu_dma_init(void)
 +{
 +      int ret;
 +
 +      ret = iommu_dma_init();
 +      if (!ret)
 +              ret = register_iommu_dma_ops_notifier(&platform_bus_type);
 +      if (!ret)
 +              ret = register_iommu_dma_ops_notifier(&amba_bustype);
 +      return ret;
 +}
 +arch_initcall(__iommu_dma_init);
 +
 +static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
 +                                const struct iommu_ops *ops)
 +{
 +      struct iommu_group *group;
 +
 +      if (!ops)
 +              return;
 +      /*
 +       * TODO: As a concession to the future, we're ready to handle being
 +       * called both early and late (i.e. after bus_add_device). Once all
 +       * the platform bus code is reworked to call us late and the notifier
 +       * junk above goes away, move the body of do_iommu_attach here.
 +       */
 +      group = iommu_group_get(dev);
 +      if (group) {
 +              do_iommu_attach(dev, ops, dma_base, size);
 +              iommu_group_put(group);
 +      } else {
 +              queue_iommu_attach(dev, ops, dma_base, size);
 +      }
 +}
 +
 +void arch_teardown_dma_ops(struct device *dev)
 +{
 +      struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
 +
 +      if (domain) {
 +              iommu_detach_device(domain, dev);
 +              if (domain->type & __IOMMU_DOMAIN_FAKE_DEFAULT)
 +                      iommu_domain_free(domain);
 +      }
 +
 +      dev->archdata.dma_ops = NULL;
 +}
 +
 +#else
 +
 +static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
 +                                struct iommu_ops *iommu)
 +{ }
 +
 +#endif  /* CONFIG_IOMMU_DMA */
 +
 +void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
 +                      struct iommu_ops *iommu, bool coherent)
 +{
 +      if (!acpi_disabled && !dev->archdata.dma_ops)
 +              dev->archdata.dma_ops = dma_ops;
 +
 +      dev->archdata.dma_coherent = coherent;
 +      __iommu_setup_dma_ops(dev, dma_base, size, iommu);
 +}
Simple merge
index 3245f2d96d4f59e5140348b8c4dddbe836c5dda6,3c3a45632a953d30b21aa9c703819605b4b5143f..21d961bbac0e1e284b6f9cdb76cb472c8be48227
@@@ -471,22 -465,7 +465,15 @@@ static inline int pmd_trans_huge(pmd_t 
        return (pmd_val(pmd) & 0x3) && (pmd_val(pmd) & _PAGE_THP_HUGE);
  }
  
- static inline int pmd_trans_splitting(pmd_t pmd)
- {
-       if (pmd_trans_huge(pmd))
-               return pmd_val(pmd) & _PAGE_SPLITTING;
-       return 0;
- }
  extern int has_transparent_hugepage(void);
 +#else
 +static inline void hpte_do_hugepage_flush(struct mm_struct *mm,
 +                                        unsigned long addr, pmd_t *pmdp,
 +                                        unsigned long old_pmd)
 +{
 +
 +      WARN(1, "%s called with THP disabled\n", __func__);
 +}
  #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  
  static inline int pmd_large(pmd_t pmd)
Simple merge
Simple merge
index 1c65ef92768dbb553563506373094338ad92e7f9,13b9bcf5485e5f70f340f98c72a51f6e2af4b62b..610f472f91d14c25cef49a118ecc7f1d9eaf73b1
@@@ -1037,10 -999,10 +1037,10 @@@ int fsl_pci_mcheck_exception(struct pt_
                        ret = get_user(regs->nip, &inst);
                        pagefault_enable();
                } else {
-                       ret = probe_kernel_address(regs->nip, inst);
+                       ret = probe_kernel_address((void *)regs->nip, inst);
                }
  
 -              if (mcheck_handle_load(regs, inst)) {
 +              if (!ret && mcheck_handle_load(regs, inst)) {
                        regs->nip += 4;
                        return 1;
                }
index 024f85f947aec50ea93c881e56a73ba3a5591d3c,5690abafe13ea240109441cd83e309b828a48796..64ead80912488b476e19a004eaf01924dbdc6b4c
@@@ -1424,8 -1364,7 +1417,7 @@@ static inline pmd_t pmd_modify(pmd_t pm
        if (pmd_large(pmd)) {
                pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
                        _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
-                       _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SPLIT |
-                       _SEGMENT_ENTRY_SOFT_DIRTY;
 -                      _SEGMENT_ENTRY_LARGE;
++                      _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY;
                pmd_val(pmd) |= massage_pgprot_pmd(newprot);
                if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
                        pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 89eec79658702a7e53712bc52178dae25bddcc22,0391206868e9a81dca0add4a72fc065c3b66d5c0..5dd1f54d793549e50180b0e4840f8667536351ad
@@@ -630,40 -629,6 +630,40 @@@ struct request_queue *blk_alloc_queue(g
  }
  EXPORT_SYMBOL(blk_alloc_queue);
  
-               if (!(gfp & __GFP_WAIT))
 +int blk_queue_enter(struct request_queue *q, gfp_t gfp)
 +{
 +      while (true) {
 +              int ret;
 +
 +              if (percpu_ref_tryget_live(&q->q_usage_counter))
 +                      return 0;
 +
++              if (!gfpflags_allow_blocking(gfp))
 +                      return -EBUSY;
 +
 +              ret = wait_event_interruptible(q->mq_freeze_wq,
 +                              !atomic_read(&q->mq_freeze_depth) ||
 +                              blk_queue_dying(q));
 +              if (blk_queue_dying(q))
 +                      return -ENODEV;
 +              if (ret)
 +                      return ret;
 +      }
 +}
 +
 +void blk_queue_exit(struct request_queue *q)
 +{
 +      percpu_ref_put(&q->q_usage_counter);
 +}
 +
 +static void blk_queue_usage_counter_release(struct percpu_ref *ref)
 +{
 +      struct request_queue *q =
 +              container_of(ref, struct request_queue, q_usage_counter);
 +
 +      wake_up_all(&q->mq_freeze_wq);
 +}
 +
  struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
  {
        struct request_queue *q;
@@@ -2038,19 -1966,9 +2038,19 @@@ void generic_make_request(struct bio *b
        do {
                struct request_queue *q = bdev_get_queue(bio->bi_bdev);
  
-               if (likely(blk_queue_enter(q, __GFP_WAIT) == 0)) {
 -              q->make_request_fn(q, bio);
++              if (likely(blk_queue_enter(q, ___GFP_DIRECT_RECLAIM) == 0)) {
 +
 +                      q->make_request_fn(q, bio);
 +
 +                      blk_queue_exit(q);
  
 -              bio = bio_list_pop(current->bio_list);
 +                      bio = bio_list_pop(current->bio_list);
 +              } else {
 +                      struct bio *bio_next = bio_list_pop(current->bio_list);
 +
 +                      bio_io_error(bio);
 +                      bio = bio_next;
 +              }
        } while (bio);
        current->bio_list = NULL; /* deactivate */
  }
Simple merge
diff --cc block/blk-mq.c
Simple merge
diff --cc block/genhd.c
Simple merge
Simple merge
index 1b87623381e2b1183b5c9d57c870b7c10924f65e,214de17d0659f54f21e2b3034a46d690f821a063..93b3f99b6865fe721f7124412553cadf3c328e7a
@@@ -439,14 -429,10 +439,12 @@@ static int nbd_thread_recv(struct nbd_d
  
        device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
  
 +      spin_lock_irqsave(&nbd->tasks_lock, flags);
        nbd->task_recv = NULL;
 +      spin_unlock_irqrestore(&nbd->tasks_lock, flags);
  
        if (signal_pending(current)) {
-               siginfo_t info;
-               ret = dequeue_signal_lock(current, &current->blocked, &info);
+               ret = kernel_dequeue_signal(NULL);
                dev_warn(nbd_to_dev(nbd), "pid %d, %s, got signal %d\n",
                         task_pid_nr(current), current->comm, ret);
                mutex_lock(&nbd->tx_lock);
@@@ -587,15 -567,7 +582,13 @@@ static int nbd_thread_send(void *data
                nbd_handle_req(nbd, req);
        }
  
 +      spin_lock_irqsave(&nbd->tasks_lock, flags);
        nbd->task_send = NULL;
-       if (signal_pending(current)) {
-               siginfo_t info;
-               dequeue_signal_lock(current, &current->blocked, &info);
-       }
 +      spin_unlock_irqrestore(&nbd->tasks_lock, flags);
 +
 +      /* Clear maybe pending signals */
++      if (signal_pending(current))
++              kernel_dequeue_signal(NULL);
  
        return 0;
  }
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 0a94895a358d47e8e51cf32722afbdce0d6104d9,67b3b9d9dfd13c471ac31908737a0d30e9284a78..692ccc69345e4a9998246a53b6af817a56a5d435
@@@ -2244,8 -2244,9 +2244,8 @@@ static int u132_urb_enqueue(struct usb_
  {
        struct u132 *u132 = hcd_to_u132(hcd);
        if (irqs_disabled()) {
-               if (__GFP_WAIT & mem_flags) {
+               if (gfpflags_allow_blocking(mem_flags)) {
 -                      printk(KERN_ERR "invalid context for function that migh"
 -                              "t sleep\n");
 +                      printk(KERN_ERR "invalid context for function that might sleep\n");
                        return -EINVAL;
                }
        }
index f23fd86697ea5ed4234ff96a8dd236b8884e6026,6b747394f6f566dfbd7e2608d1690f88f81e4206..7bf835f85bc822ef1119b639be82619af066d326
@@@ -231,7 -231,8 +231,8 @@@ out_unlock
        if (res < 0 && fl->fl_type != F_UNLCK) {
                fl_type = fl->fl_type;
                fl->fl_type = F_UNLCK;
-               res = locks_lock_file_wait(filp, fl);
+               /* Even if this fails we want to return the remote error */
 -              posix_lock_file_wait(filp, fl);
++              locks_lock_file_wait(filp, fl);
                fl->fl_type = fl_type;
        }
  out:
diff --cc fs/cifs/file.c
Simple merge
diff --cc fs/coredump.c
Simple merge
diff --cc fs/direct-io.c
Simple merge
diff --cc fs/ext4/inode.c
Simple merge
Simple merge
diff --cc fs/ext4/super.c
Simple merge
Simple merge
diff --cc fs/jffs2/wbuf.c
Simple merge
diff --cc fs/mpage.c
Simple merge
diff --cc fs/namei.c
Simple merge
diff --cc fs/nfs/file.c
Simple merge
Simple merge
diff --cc fs/proc/array.c
Simple merge
index b029d426c55892544afcd3bf2b8a5965f6e0e5ee,c00cb0ae24f7de274fc4bc4485156b93af395b0d..9ca699b05e78906167519fa17ccb3acdbde510ec
@@@ -753,22 -807,30 +807,30 @@@ static inline void clear_soft_dirty(str
        pte_t ptent = *pte;
  
        if (pte_present(ptent)) {
+               ptent = ptep_modify_prot_start(vma->vm_mm, addr, pte);
                ptent = pte_wrprotect(ptent);
 -              ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY);
 +              ptent = pte_clear_soft_dirty(ptent);
+               ptep_modify_prot_commit(vma->vm_mm, addr, pte, ptent);
        } else if (is_swap_pte(ptent)) {
                ptent = pte_swp_clear_soft_dirty(ptent);
+               set_pte_at(vma->vm_mm, addr, pte, ptent);
        }
-       set_pte_at(vma->vm_mm, addr, pte, ptent);
  }
+ #else
+ static inline void clear_soft_dirty(struct vm_area_struct *vma,
+               unsigned long addr, pte_t *pte)
+ {
+ }
+ #endif
  
+ #if defined(CONFIG_MEM_SOFT_DIRTY) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
  static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
                unsigned long addr, pmd_t *pmdp)
  {
-       pmd_t pmd = *pmdp;
+       pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
  
        pmd = pmd_wrprotect(pmd);
 -      pmd = pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);
 +      pmd = pmd_clear_soft_dirty(pmd);
  
        if (vma->vm_flags & VM_SOFTDIRTY)
                vma->vm_flags &= ~VM_SOFTDIRTY;
diff --cc fs/xfs/xfs_qm.c
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
diff --cc kernel/audit.c
Simple merge
diff --cc kernel/cgroup.c
Simple merge
diff --cc kernel/cpuset.c
Simple merge
diff --cc kernel/fork.c
Simple merge
diff --cc kernel/futex.c
Simple merge
Simple merge
diff --cc kernel/params.c
Simple merge
diff --cc kernel/sysctl.c
Simple merge
Simple merge
diff --cc lib/Makefile
Simple merge
diff --cc lib/dma-debug.c
Simple merge
diff --cc lib/kobject.c
Simple merge
Simple merge
diff --cc mm/failslab.c
index 98fb490311eb94386aebd2f4ceb77c729f4fa01e,35c876c82b9dc2f02cf22a5bc0af899d022bc742..79171b4a58269986491198403a322d6c2a7dc814
@@@ -3,12 -3,12 +3,12 @@@
  
  static struct {
        struct fault_attr attr;
-       bool ignore_gfp_wait;
 -      u32 ignore_gfp_reclaim;
 -      int cache_filter;
++      bool ignore_gfp_reclaim;
 +      bool cache_filter;
  } failslab = {
        .attr = FAULT_ATTR_INITIALIZER,
-       .ignore_gfp_wait = true,
 -      .ignore_gfp_reclaim = 1,
 -      .cache_filter = 0,
++      .ignore_gfp_reclaim = true,
 +      .cache_filter = false,
  };
  
  bool should_failslab(size_t size, gfp_t gfpflags, unsigned long cache_flags)
Simple merge
diff --cc mm/memcontrol.c
Simple merge
Simple merge
diff --cc mm/page_alloc.c
index 805bbad2e24e1a84b383ebc90fc825527238957a,cef70104614c05ccef23f2020b4ea26d78f4c08c..d0499fff8c7fb1ee2f33a34bd9e424420939a654
@@@ -2159,13 -2297,13 +2297,13 @@@ failed
  static struct {
        struct fault_attr attr;
  
 -      u32 ignore_gfp_highmem;
 -      u32 ignore_gfp_reclaim;
 +      bool ignore_gfp_highmem;
-       bool ignore_gfp_wait;
++      bool ignore_gfp_reclaim;
        u32 min_order;
  } fail_page_alloc = {
        .attr = FAULT_ATTR_INITIALIZER,
-       .ignore_gfp_wait = true,
 -      .ignore_gfp_reclaim = 1,
 -      .ignore_gfp_highmem = 1,
++      .ignore_gfp_reclaim = true,
 +      .ignore_gfp_highmem = true,
        .min_order = 1,
  };
  
index 7d3db0247983b22b121290c2203ba2c2fb544ec0,89b150f8c920f200310641ed6a70e56150d06a6c..69261d4c774dd3d9894447bdbf6342aac746ad34
@@@ -134,24 -128,14 +134,12 @@@ pmd_t pmdp_huge_clear_flush(struct vm_a
        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
        VM_BUG_ON(!pmd_trans_huge(*pmdp));
        pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
 -      flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
 +      flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
        return pmd;
  }
 -#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  #endif
  
- #ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
- void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
-                         pmd_t *pmdp)
- {
-       pmd_t pmd = pmd_mksplitting(*pmdp);
-       VM_BUG_ON(address & ~HPAGE_PMD_MASK);
-       set_pmd_at(vma->vm_mm, address, pmdp, pmd);
-       /* tlb flush only to serialize against gup-fast */
-       flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
- }
- #endif
  #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
 -#ifdef CONFIG_TRANSPARENT_HUGEPAGE
  void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
                                pgtable_t pgtable)
  {
diff --cc mm/vmscan.c
Simple merge
diff --cc net/core/sock.c
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge