]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 3 Nov 2015 22:40:01 +0000 (14:40 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 3 Nov 2015 22:40:01 +0000 (14:40 -0800)
Pull irq updates from Thomas Gleixner:
 "The irq departement delivers:

   - Rework the irqdomain core infrastructure to accomodate ACPI based
     systems.  This is required to support ARM64 without creating
     artificial device tree nodes.

   - Sanitize the ACPI based ARM GIC initialization by making use of the
     new firmware independent irqdomain core

   - Further improvements to the generic MSI management

   - Generalize the irq migration on CPU hotplug

   - Improvements to the threaded interrupt infrastructure

   - Allow the migration of "chained" low level interrupt handlers

   - Allow optional force masking of interrupts in disable_irq[_nosysnc]

   - Support for two new interrupt chips - Sigh!

   - A larger set of errata fixes for ARM gicv3

   - The usual pile of fixes, updates, improvements and cleanups all
     over the place"

* 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (71 commits)
  Document that IRQ_NONE should be returned when IRQ not actually handled
  PCI/MSI: Allow the MSI domain to be device-specific
  PCI: Add per-device MSI domain hook
  of/irq: Use the msi-map property to provide device-specific MSI domain
  of/irq: Split of_msi_map_rid to reuse msi-map lookup
  irqchip/gic-v3-its: Parse new version of msi-parent property
  PCI/MSI: Use of_msi_get_domain instead of open-coded "msi-parent" parsing
  of/irq: Use of_msi_get_domain instead of open-coded "msi-parent" parsing
  of/irq: Add support code for multi-parent version of "msi-parent"
  irqchip/gic-v3-its: Add handling of PCI requester id.
  PCI/MSI: Add helper function pci_msi_domain_get_msi_rid().
  of/irq: Add new function of_msi_map_rid()
  Docs: dt: Add PCI MSI map bindings
  irqchip/gic-v2m: Add support for multiple MSI frames
  irqchip/gic-v3: Fix translation of LPIs after conversion to irq_fwspec
  irqchip/mxs: Add Alphascale ASM9260 support
  irqchip/mxs: Prepare driver for hardware with different offsets
  irqchip/mxs: Panic if ioremap or domain creation fails
  irqdomain: Documentation updates
  irqdomain/msi: Use fwnode instead of of_node
  ...

1  2 
arch/arm/Kconfig
drivers/irqchip/irq-tegra.c
drivers/pci/msi.c
kernel/irq/msi.c
virt/kvm/arm/vgic.c

diff --combined arch/arm/Kconfig
index 823f90ea65c4458aa9e0f4096399e6c436cd8873,0d72535ed01dbbc37f9b558d3752cbb865a76de0..f1ed1109f4889e006e9df4c6110be001841c0f82
@@@ -645,7 -645,6 +645,7 @@@ config ARCH_SHMOBILE_LEGAC
  
  config ARCH_RPC
        bool "RiscPC"
 +      depends on MMU
        select ARCH_ACORN
        select ARCH_MAY_HAVE_PC_FDC
        select ARCH_SPARSEMEM_ENABLE
@@@ -820,6 -819,7 +820,7 @@@ config ARCH_VIR
        bool "Dummy Virtual Machine" if ARCH_MULTI_V7
        select ARM_AMBA
        select ARM_GIC
+       select ARM_GIC_V3
        select ARM_PSCI
        select HAVE_ARM_ARCH_TIMER
  
@@@ -1411,6 -1411,7 +1412,6 @@@ config HAVE_ARM_ARCH_TIME
  
  config HAVE_ARM_TWD
        bool
 -      depends on SMP
        select CLKSRC_OF if OF
        help
          This options enables support for the ARM timer and watchdog unit
@@@ -1470,8 -1471,6 +1471,8 @@@ choic
  
        config VMSPLIT_3G
                bool "3G/1G user/kernel split"
 +      config VMSPLIT_3G_OPT
 +              bool "3G/1G user/kernel split (for full 1G low memory)"
        config VMSPLIT_2G
                bool "2G/2G user/kernel split"
        config VMSPLIT_1G
@@@ -1483,7 -1482,6 +1484,7 @@@ config PAGE_OFFSE
        default PHYS_OFFSET if !MMU
        default 0x40000000 if VMSPLIT_1G
        default 0x80000000 if VMSPLIT_2G
 +      default 0xB0000000 if VMSPLIT_3G_OPT
        default 0xC0000000
  
  config NR_CPUS
@@@ -1698,9 -1696,8 +1699,9 @@@ config HIGHME
          If unsure, say n.
  
  config HIGHPTE
 -      bool "Allocate 2nd-level pagetables from highmem"
 +      bool "Allocate 2nd-level pagetables from highmem" if EXPERT
        depends on HIGHMEM
 +      default y
        help
          The VM uses one page of physical memory for each page table.
          For systems with a lot of processes, this can use a lot of
index fd88e687791aa8cc5a4249f550b2caa161eef46b,557e15e57e636f1a725c9b29279a2981a9e9adbe..121ec301372e69cbeca0bff90f41fc0653f0012a
@@@ -214,48 -214,49 +214,50 @@@ static struct irq_chip tegra_ictlr_chi
        .irq_unmask             = tegra_unmask,
        .irq_retrigger          = tegra_retrigger,
        .irq_set_wake           = tegra_set_wake,
 +      .irq_set_type           = irq_chip_set_type_parent,
        .flags                  = IRQCHIP_MASK_ON_SUSPEND,
  #ifdef CONFIG_SMP
        .irq_set_affinity       = irq_chip_set_affinity_parent,
  #endif
  };
  
- static int tegra_ictlr_domain_xlate(struct irq_domain *domain,
-                                   struct device_node *controller,
-                                   const u32 *intspec,
-                                   unsigned int intsize,
-                                   unsigned long *out_hwirq,
-                                   unsigned int *out_type)
+ static int tegra_ictlr_domain_translate(struct irq_domain *d,
+                                       struct irq_fwspec *fwspec,
+                                       unsigned long *hwirq,
+                                       unsigned int *type)
  {
-       if (domain->of_node != controller)
-               return -EINVAL; /* Shouldn't happen, really... */
-       if (intsize != 3)
-               return -EINVAL; /* Not GIC compliant */
-       if (intspec[0] != GIC_SPI)
-               return -EINVAL; /* No PPI should point to this domain */
+       if (is_of_node(fwspec->fwnode)) {
+               if (fwspec->param_count != 3)
+                       return -EINVAL;
  
-       *out_hwirq = intspec[1];
-       *out_type = intspec[2];
-       return 0;
+               /* No PPI should point to this domain */
+               if (fwspec->param[0] != 0)
+                       return -EINVAL;
+               *hwirq = fwspec->param[1];
+               *type = fwspec->param[2];
+               return 0;
+       }
+       return -EINVAL;
  }
  
  static int tegra_ictlr_domain_alloc(struct irq_domain *domain,
                                    unsigned int virq,
                                    unsigned int nr_irqs, void *data)
  {
-       struct of_phandle_args *args = data;
-       struct of_phandle_args parent_args;
+       struct irq_fwspec *fwspec = data;
+       struct irq_fwspec parent_fwspec;
        struct tegra_ictlr_info *info = domain->host_data;
        irq_hw_number_t hwirq;
        unsigned int i;
  
-       if (args->args_count != 3)
+       if (fwspec->param_count != 3)
                return -EINVAL; /* Not GIC compliant */
-       if (args->args[0] != GIC_SPI)
+       if (fwspec->param[0] != GIC_SPI)
                return -EINVAL; /* No PPI should point to this domain */
  
-       hwirq = args->args[1];
+       hwirq = fwspec->param[1];
        if (hwirq >= (num_ictlrs * 32))
                return -EINVAL;
  
                                              info->base[ictlr]);
        }
  
-       parent_args = *args;
-       parent_args.np = domain->parent->of_node;
-       return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &parent_args);
+       parent_fwspec = *fwspec;
+       parent_fwspec.fwnode = domain->parent->fwnode;
+       return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
+                                           &parent_fwspec);
  }
  
  static void tegra_ictlr_domain_free(struct irq_domain *domain,
  }
  
  static const struct irq_domain_ops tegra_ictlr_domain_ops = {
-       .xlate  = tegra_ictlr_domain_xlate,
-       .alloc  = tegra_ictlr_domain_alloc,
-       .free   = tegra_ictlr_domain_free,
+       .translate      = tegra_ictlr_domain_translate,
+       .alloc          = tegra_ictlr_domain_alloc,
+       .free           = tegra_ictlr_domain_free,
  };
  
  static int __init tegra_ictlr_init(struct device_node *node,
diff --combined drivers/pci/msi.c
index 4a7da3c3e0353c3c746e9b10be9a093c8d085920,4cd6f3abcecf2d302561ee2646be0450e6e490cc..45a51486d080a54fa987eaa230106df5895b3b09
@@@ -20,6 -20,7 +20,7 @@@
  #include <linux/io.h>
  #include <linux/slab.h>
  #include <linux/irqdomain.h>
+ #include <linux/of_irq.h>
  
  #include "pci.h"
  
@@@ -1243,15 -1244,11 +1244,15 @@@ static void pci_msi_domain_update_chip_
        BUG_ON(!chip);
        if (!chip->irq_write_msi_msg)
                chip->irq_write_msi_msg = pci_msi_domain_write_msg;
 +      if (!chip->irq_mask)
 +              chip->irq_mask = pci_msi_mask_irq;
 +      if (!chip->irq_unmask)
 +              chip->irq_unmask = pci_msi_unmask_irq;
  }
  
  /**
-  * pci_msi_create_irq_domain - Creat a MSI interrupt domain
-  * @node:     Optional device-tree node of the interrupt controller
+  * pci_msi_create_irq_domain - Create a MSI interrupt domain
+  * @fwnode:   Optional fwnode of the interrupt controller
   * @info:     MSI domain info
   * @parent:   Parent irq domain
   *
   * Returns:
   * A domain pointer or NULL in case of failure.
   */
- struct irq_domain *pci_msi_create_irq_domain(struct device_node *node,
+ struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
                                             struct msi_domain_info *info,
                                             struct irq_domain *parent)
  {
        if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
                pci_msi_domain_update_chip_ops(info);
  
-       domain = msi_create_irq_domain(node, info, parent);
+       domain = msi_create_irq_domain(fwnode, info, parent);
        if (!domain)
                return NULL;
  
@@@ -1307,14 -1304,14 +1308,14 @@@ void pci_msi_domain_free_irqs(struct ir
  
  /**
   * pci_msi_create_default_irq_domain - Create a default MSI interrupt domain
-  * @node:     Optional device-tree node of the interrupt controller
+  * @fwnode:   Optional fwnode of the interrupt controller
   * @info:     MSI domain info
   * @parent:   Parent irq domain
   *
   * Returns: A domain pointer or NULL in case of failure. If successful
   * the default PCI/MSI irqdomain pointer is updated.
   */
- struct irq_domain *pci_msi_create_default_irq_domain(struct device_node *node,
+ struct irq_domain *pci_msi_create_default_irq_domain(struct fwnode_handle *fwnode,
                struct msi_domain_info *info, struct irq_domain *parent)
  {
        struct irq_domain *domain;
                pr_err("PCI: default irq domain for PCI MSI has already been created.\n");
                domain = NULL;
        } else {
-               domain = pci_msi_create_irq_domain(node, info, parent);
+               domain = pci_msi_create_irq_domain(fwnode, info, parent);
                pci_msi_default_domain = domain;
        }
        mutex_unlock(&pci_msi_domain_lock);
  
        return domain;
  }
+ static int get_msi_id_cb(struct pci_dev *pdev, u16 alias, void *data)
+ {
+       u32 *pa = data;
+       *pa = alias;
+       return 0;
+ }
+ /**
+  * pci_msi_domain_get_msi_rid - Get the MSI requester id (RID)
+  * @domain:   The interrupt domain
+  * @pdev:     The PCI device.
+  *
+  * The RID for a device is formed from the alias, with a firmware
+  * supplied mapping applied
+  *
+  * Returns: The RID.
+  */
+ u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev)
+ {
+       struct device_node *of_node;
+       u32 rid = 0;
+       pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
+       of_node = irq_domain_get_of_node(domain);
+       if (of_node)
+               rid = of_msi_map_rid(&pdev->dev, of_node, rid);
+       return rid;
+ }
+ /**
+  * pci_msi_get_device_domain - Get the MSI domain for a given PCI device
+  * @pdev:     The PCI device
+  *
+  * Use the firmware data to find a device-specific MSI domain
+  * (i.e. not one that is ste as a default).
+  *
+  * Returns: The coresponding MSI domain or NULL if none has been found.
+  */
+ struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
+ {
+       u32 rid = 0;
+       pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
+       return of_msi_map_get_device_domain(&pdev->dev, rid);
+ }
  #endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
diff --combined kernel/irq/msi.c
index be9149f62eb86e63ac06194d4eeaa4065823ea62,95354bb07a14afb09265d69d2d51ac3eb1234eef..6b0c0b74a2a1a88c0d3f81519fea7c520b290967
@@@ -228,18 -228,22 +228,18 @@@ static void msi_domain_update_chip_ops(
  {
        struct irq_chip *chip = info->chip;
  
 -      BUG_ON(!chip);
 -      if (!chip->irq_mask)
 -              chip->irq_mask = pci_msi_mask_irq;
 -      if (!chip->irq_unmask)
 -              chip->irq_unmask = pci_msi_unmask_irq;
 +      BUG_ON(!chip || !chip->irq_mask || !chip->irq_unmask);
        if (!chip->irq_set_affinity)
                chip->irq_set_affinity = msi_domain_set_affinity;
  }
  
  /**
   * msi_create_irq_domain - Create a MSI interrupt domain
-  * @of_node:  Optional device-tree node of the interrupt controller
+  * @fwnode:   Optional fwnode of the interrupt controller
   * @info:     MSI domain info
   * @parent:   Parent irq domain
   */
- struct irq_domain *msi_create_irq_domain(struct device_node *node,
+ struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
                                         struct msi_domain_info *info,
                                         struct irq_domain *parent)
  {
        if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
                msi_domain_update_chip_ops(info);
  
-       return irq_domain_add_hierarchy(parent, 0, 0, node, &msi_domain_ops,
-                                       info);
+       return irq_domain_create_hierarchy(parent, 0, 0, fwnode,
+                                          &msi_domain_ops, info);
  }
  
  /**
diff --combined virt/kvm/arm/vgic.c
index 66c66165e712d743ed3da1a501c03a087f442378,77b0176b04841301ac9ec2801791f57b57741cf3..30489181922d28d9749feefb552e7b8f0fa97f52
@@@ -531,34 -531,6 +531,34 @@@ bool vgic_handle_set_pending_reg(struc
        return false;
  }
  
 +/*
 + * If a mapped interrupt's state has been modified by the guest such that it
 + * is no longer active or pending, without it have gone through the sync path,
 + * then the map->active field must be cleared so the interrupt can be taken
 + * again.
 + */
 +static void vgic_handle_clear_mapped_irq(struct kvm_vcpu *vcpu)
 +{
 +      struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
 +      struct list_head *root;
 +      struct irq_phys_map_entry *entry;
 +      struct irq_phys_map *map;
 +
 +      rcu_read_lock();
 +
 +      /* Check for PPIs */
 +      root = &vgic_cpu->irq_phys_map_list;
 +      list_for_each_entry_rcu(entry, root, entry) {
 +              map = &entry->map;
 +
 +              if (!vgic_dist_irq_is_pending(vcpu, map->virt_irq) &&
 +                  !vgic_irq_is_active(vcpu, map->virt_irq))
 +                      map->active = false;
 +      }
 +
 +      rcu_read_unlock();
 +}
 +
  bool vgic_handle_clear_pending_reg(struct kvm *kvm,
                                   struct kvm_exit_mmio *mmio,
                                   phys_addr_t offset, int vcpu_id)
                                          vcpu_id, offset);
                vgic_reg_access(mmio, reg, offset, mode);
  
 +              vgic_handle_clear_mapped_irq(kvm_get_vcpu(kvm, vcpu_id));
                vgic_update_state(kvm);
                return true;
        }
@@@ -627,7 -598,6 +627,7 @@@ bool vgic_handle_clear_active_reg(struc
                        ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
  
        if (mmio->is_write) {
 +              vgic_handle_clear_mapped_irq(kvm_get_vcpu(kvm, vcpu_id));
                vgic_update_state(kvm);
                return true;
        }
@@@ -1012,12 -982,6 +1012,12 @@@ static int compute_pending_for_cpu(stru
        pend_percpu = vcpu->arch.vgic_cpu.pending_percpu;
        pend_shared = vcpu->arch.vgic_cpu.pending_shared;
  
 +      if (!dist->enabled) {
 +              bitmap_zero(pend_percpu, VGIC_NR_PRIVATE_IRQS);
 +              bitmap_zero(pend_shared, nr_shared);
 +              return 0;
 +      }
 +
        pending = vgic_bitmap_get_cpu_map(&dist->irq_pending, vcpu_id);
        enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
        bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS);
@@@ -1045,6 -1009,11 +1045,6 @@@ void vgic_update_state(struct kvm *kvm
        struct kvm_vcpu *vcpu;
        int c;
  
 -      if (!dist->enabled) {
 -              set_bit(0, dist->irq_pending_on_cpu);
 -              return;
 -      }
 -
        kvm_for_each_vcpu(c, vcpu, kvm) {
                if (compute_pending_for_cpu(vcpu))
                        set_bit(c, dist->irq_pending_on_cpu);
@@@ -1123,15 -1092,6 +1123,15 @@@ static void vgic_retire_lr(int lr_nr, i
        struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
        struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr);
  
 +      /*
 +       * We must transfer the pending state back to the distributor before
 +       * retiring the LR, otherwise we may loose edge-triggered interrupts.
 +       */
 +      if (vlr.state & LR_STATE_PENDING) {
 +              vgic_dist_irq_set_pending(vcpu, irq);
 +              vlr.hwirq = 0;
 +      }
 +
        vlr.state = 0;
        vgic_set_lr(vcpu, lr_nr, vlr);
        clear_bit(lr_nr, vgic_cpu->lr_used);
@@@ -1172,8 -1132,7 +1172,8 @@@ static void vgic_queue_irq_to_lr(struc
                kvm_debug("Set active, clear distributor: 0x%x\n", vlr.state);
                vgic_irq_clear_active(vcpu, irq);
                vgic_update_state(vcpu->kvm);
 -      } else if (vgic_dist_irq_is_pending(vcpu, irq)) {
 +      } else {
 +              WARN_ON(!vgic_dist_irq_is_pending(vcpu, irq));
                vlr.state |= LR_STATE_PENDING;
                kvm_debug("Set pending: 0x%x\n", vlr.state);
        }
@@@ -1281,7 -1240,7 +1281,7 @@@ static void __kvm_vgic_flush_hwstate(st
        struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
        struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
        unsigned long *pa_percpu, *pa_shared;
 -      int i, vcpu_id, lr, ret;
 +      int i, vcpu_id;
        int overflow = 0;
        int nr_shared = vgic_nr_shared_irqs(dist);
  
@@@ -1336,6 -1295,31 +1336,6 @@@ epilog
                 */
                clear_bit(vcpu_id, dist->irq_pending_on_cpu);
        }
 -
 -      for (lr = 0; lr < vgic->nr_lr; lr++) {
 -              struct vgic_lr vlr;
 -
 -              if (!test_bit(lr, vgic_cpu->lr_used))
 -                      continue;
 -
 -              vlr = vgic_get_lr(vcpu, lr);
 -
 -              /*
 -               * If we have a mapping, and the virtual interrupt is
 -               * presented to the guest (as pending or active), then we must
 -               * set the state to active in the physical world. See
 -               * Documentation/virtual/kvm/arm/vgic-mapped-irqs.txt.
 -               */
 -              if (vlr.state & LR_HW) {
 -                      struct irq_phys_map *map;
 -                      map = vgic_irq_map_search(vcpu, vlr.irq);
 -
 -                      ret = irq_set_irqchip_state(map->irq,
 -                                                  IRQCHIP_STATE_ACTIVE,
 -                                                  true);
 -                      WARN_ON(ret);
 -              }
 -      }
  }
  
  static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
@@@ -1437,7 -1421,7 +1437,7 @@@ static int vgic_sync_hwirq(struct kvm_v
                return 0;
  
        map = vgic_irq_map_search(vcpu, vlr.irq);
 -      BUG_ON(!map || !map->active);
 +      BUG_ON(!map);
  
        ret = irq_get_irqchip_state(map->irq,
                                    IRQCHIP_STATE_ACTIVE,
  
        WARN_ON(ret);
  
 -      if (map->active) {
 -              ret = irq_set_irqchip_state(map->irq,
 -                                          IRQCHIP_STATE_ACTIVE,
 -                                          false);
 -              WARN_ON(ret);
 +      if (map->active)
                return 0;
 -      }
  
        return 1;
  }
@@@ -1618,12 -1607,8 +1618,12 @@@ static int vgic_update_irq_pending(stru
        } else {
                if (level_triggered) {
                        vgic_dist_irq_clear_level(vcpu, irq_num);
 -                      if (!vgic_dist_irq_soft_pend(vcpu, irq_num))
 +                      if (!vgic_dist_irq_soft_pend(vcpu, irq_num)) {
                                vgic_dist_irq_clear_pending(vcpu, irq_num);
 +                              vgic_cpu_irq_clear(vcpu, irq_num);
 +                              if (!compute_pending_for_cpu(vcpu))
 +                                      clear_bit(cpuid, dist->irq_pending_on_cpu);
 +                      }
                }
  
                ret = false;
@@@ -2137,7 -2122,7 +2137,7 @@@ static int init_vgic_model(struct kvm *
        case KVM_DEV_TYPE_ARM_VGIC_V2:
                vgic_v2_init_emulation(kvm);
                break;
- #ifdef CONFIG_ARM_GIC_V3
+ #ifdef CONFIG_KVM_ARM_VGIC_V3
        case KVM_DEV_TYPE_ARM_VGIC_V3:
                vgic_v3_init_emulation(kvm);
                break;
@@@ -2299,7 -2284,7 +2299,7 @@@ int kvm_vgic_addr(struct kvm *kvm, unsi
                block_size = KVM_VGIC_V2_CPU_SIZE;
                alignment = SZ_4K;
                break;
- #ifdef CONFIG_ARM_GIC_V3
+ #ifdef CONFIG_KVM_ARM_VGIC_V3
        case KVM_VGIC_V3_ADDR_TYPE_DIST:
                type_needed = KVM_DEV_TYPE_ARM_VGIC_V3;
                addr_ptr = &vgic->vgic_dist_base;