]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - drivers/irqchip/irq-gic.c
Merge remote-tracking branch 'asoc/fix/rt5645' into asoc-fix-rt5645
[karo-tx-linux.git] / drivers / irqchip / irq-gic.c
index 4dd88264dff55c0c95efd813c82fe762d7216263..e6b7ed537952949eedb43105bcdd555209b62711 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/interrupt.h>
 #include <linux/percpu.h>
 #include <linux/slab.h>
+#include <linux/irqchip.h>
 #include <linux/irqchip/chained_irq.h>
 #include <linux/irqchip/arm-gic.h>
 #include <linux/irqchip/arm-gic-acpi.h>
@@ -46,9 +47,9 @@
 #include <asm/irq.h>
 #include <asm/exception.h>
 #include <asm/smp_plat.h>
+#include <asm/virt.h>
 
 #include "irq-gic-common.h"
-#include "irqchip.h"
 
 union gic_base {
        void __iomem *common_base;
@@ -82,6 +83,8 @@ static DEFINE_RAW_SPINLOCK(irq_controller_lock);
 #define NR_GIC_CPU_IF 8
 static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
 
+static struct static_key supports_deactivate = STATIC_KEY_INIT_TRUE;
+
 #ifndef MAX_GIC_NR
 #define MAX_GIC_NR     1
 #endif
@@ -137,6 +140,36 @@ static inline unsigned int gic_irq(struct irq_data *d)
        return d->hwirq;
 }
 
+static inline bool cascading_gic_irq(struct irq_data *d)
+{
+       void *data = irq_data_get_irq_handler_data(d);
+
+       /*
+        * If handler_data pointing to one of the secondary GICs, then
+        * this is a cascading interrupt, and it cannot possibly be
+        * forwarded.
+        */
+       if (data >= (void *)(gic_data + 1) &&
+           data <  (void *)(gic_data + MAX_GIC_NR))
+               return true;
+
+       return false;
+}
+
+static inline bool forwarded_irq(struct irq_data *d)
+{
+       /*
+        * A forwarded interrupt:
+        * - is on the primary GIC
+        * - has its handler_data set to a value
+        * - that isn't a secondary GIC
+        */
+       if (d->handler_data && !cascading_gic_irq(d))
+               return true;
+
+       return false;
+}
+
 /*
  * Routines to acknowledge, disable and enable interrupts
  */
@@ -157,6 +190,21 @@ static void gic_mask_irq(struct irq_data *d)
        gic_poke_irq(d, GIC_DIST_ENABLE_CLEAR);
 }
 
+static void gic_eoimode1_mask_irq(struct irq_data *d)
+{
+       gic_mask_irq(d);
+       /*
+        * When masking a forwarded interrupt, make sure it is
+        * deactivated as well.
+        *
+        * This ensures that an interrupt that is getting
+        * disabled/masked will not get "stuck", because there is
+        * noone to deactivate it (guest is being terminated).
+        */
+       if (forwarded_irq(d))
+               gic_poke_irq(d, GIC_DIST_ACTIVE_CLEAR);
+}
+
 static void gic_unmask_irq(struct irq_data *d)
 {
        gic_poke_irq(d, GIC_DIST_ENABLE_SET);
@@ -167,6 +215,15 @@ static void gic_eoi_irq(struct irq_data *d)
        writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
 }
 
+static void gic_eoimode1_eoi_irq(struct irq_data *d)
+{
+       /* Do not deactivate an IRQ forwarded to a vcpu. */
+       if (forwarded_irq(d))
+               return;
+
+       writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_DEACTIVATE);
+}
+
 static int gic_irq_set_irqchip_state(struct irq_data *d,
                                     enum irqchip_irq_state which, bool val)
 {
@@ -233,6 +290,16 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
        return gic_configure_irq(gicirq, type, base, NULL);
 }
 
+static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
+{
+       /* Only interrupts on the primary GIC can be forwarded to a vcpu. */
+       if (cascading_gic_irq(d))
+               return -EINVAL;
+
+       d->handler_data = vcpu;
+       return 0;
+}
+
 #ifdef CONFIG_SMP
 static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
                            bool force)
@@ -272,11 +339,15 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
                irqnr = irqstat & GICC_IAR_INT_ID_MASK;
 
                if (likely(irqnr > 15 && irqnr < 1021)) {
+                       if (static_key_true(&supports_deactivate))
+                               writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
                        handle_domain_irq(gic->domain, irqnr, regs);
                        continue;
                }
                if (irqnr < 16) {
                        writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
+                       if (static_key_true(&supports_deactivate))
+                               writel_relaxed(irqstat, cpu_base + GIC_CPU_DEACTIVATE);
 #ifdef CONFIG_SMP
                        handle_IPI(irqnr, regs);
 #endif
@@ -288,8 +359,8 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
 
 static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
 {
-       struct gic_chip_data *chip_data = irq_get_handler_data(irq);
-       struct irq_chip *chip = irq_get_chip(irq);
+       struct gic_chip_data *chip_data = irq_desc_get_handler_data(desc);
+       struct irq_chip *chip = irq_desc_get_chip(desc);
        unsigned int cascade_irq, gic_irq;
        unsigned long status;
 
@@ -324,16 +395,34 @@ static struct irq_chip gic_chip = {
 #endif
        .irq_get_irqchip_state  = gic_irq_get_irqchip_state,
        .irq_set_irqchip_state  = gic_irq_set_irqchip_state,
-       .flags                  = IRQCHIP_SET_TYPE_MASKED,
+       .flags                  = IRQCHIP_SET_TYPE_MASKED |
+                                 IRQCHIP_SKIP_SET_WAKE |
+                                 IRQCHIP_MASK_ON_SUSPEND,
+};
+
+static struct irq_chip gic_eoimode1_chip = {
+       .name                   = "GICv2",
+       .irq_mask               = gic_eoimode1_mask_irq,
+       .irq_unmask             = gic_unmask_irq,
+       .irq_eoi                = gic_eoimode1_eoi_irq,
+       .irq_set_type           = gic_set_type,
+#ifdef CONFIG_SMP
+       .irq_set_affinity       = gic_set_affinity,
+#endif
+       .irq_get_irqchip_state  = gic_irq_get_irqchip_state,
+       .irq_set_irqchip_state  = gic_irq_set_irqchip_state,
+       .irq_set_vcpu_affinity  = gic_irq_set_vcpu_affinity,
+       .flags                  = IRQCHIP_SET_TYPE_MASKED |
+                                 IRQCHIP_SKIP_SET_WAKE |
+                                 IRQCHIP_MASK_ON_SUSPEND,
 };
 
 void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
 {
        if (gic_nr >= MAX_GIC_NR)
                BUG();
-       if (irq_set_handler_data(irq, &gic_data[gic_nr]) != 0)
-               BUG();
-       irq_set_chained_handler(irq, gic_handle_cascade_irq);
+       irq_set_chained_handler_and_data(irq, gic_handle_cascade_irq,
+                                        &gic_data[gic_nr]);
 }
 
 static u8 gic_get_cpumask(struct gic_chip_data *gic)
@@ -355,10 +444,14 @@ static u8 gic_get_cpumask(struct gic_chip_data *gic)
        return mask;
 }
 
-static void gic_cpu_if_up(void)
+static void gic_cpu_if_up(struct gic_chip_data *gic)
 {
-       void __iomem *cpu_base = gic_data_cpu_base(&gic_data[0]);
+       void __iomem *cpu_base = gic_data_cpu_base(gic);
        u32 bypass = 0;
+       u32 mode = 0;
+
+       if (static_key_true(&supports_deactivate))
+               mode = GIC_CPU_CTRL_EOImodeNS;
 
        /*
        * Preserve bypass disable bits to be written back later
@@ -366,7 +459,7 @@ static void gic_cpu_if_up(void)
        bypass = readl(cpu_base + GIC_CPU_CTRL);
        bypass &= GICC_DIS_BYPASS_MASK;
 
-       writel_relaxed(bypass | GICC_ENABLE, cpu_base + GIC_CPU_CTRL);
+       writel_relaxed(bypass | mode | GICC_ENABLE, cpu_base + GIC_CPU_CTRL);
 }
 
 
@@ -401,34 +494,47 @@ static void gic_cpu_init(struct gic_chip_data *gic)
        int i;
 
        /*
-        * Get what the GIC says our CPU mask is.
+        * Setting up the CPU map is only relevant for the primary GIC
+        * because any nested/secondary GICs do not directly interface
+        * with the CPU(s).
         */
-       BUG_ON(cpu >= NR_GIC_CPU_IF);
-       cpu_mask = gic_get_cpumask(gic);
-       gic_cpu_map[cpu] = cpu_mask;
+       if (gic == &gic_data[0]) {
+               /*
+                * Get what the GIC says our CPU mask is.
+                */
+               BUG_ON(cpu >= NR_GIC_CPU_IF);
+               cpu_mask = gic_get_cpumask(gic);
+               gic_cpu_map[cpu] = cpu_mask;
 
-       /*
-        * Clear our mask from the other map entries in case they're
-        * still undefined.
-        */
-       for (i = 0; i < NR_GIC_CPU_IF; i++)
-               if (i != cpu)
-                       gic_cpu_map[i] &= ~cpu_mask;
+               /*
+                * Clear our mask from the other map entries in case they're
+                * still undefined.
+                */
+               for (i = 0; i < NR_GIC_CPU_IF; i++)
+                       if (i != cpu)
+                               gic_cpu_map[i] &= ~cpu_mask;
+       }
 
        gic_cpu_config(dist_base, NULL);
 
        writel_relaxed(GICC_INT_PRI_THRESHOLD, base + GIC_CPU_PRIMASK);
-       gic_cpu_if_up();
+       gic_cpu_if_up(gic);
 }
 
-void gic_cpu_if_down(void)
+int gic_cpu_if_down(unsigned int gic_nr)
 {
-       void __iomem *cpu_base = gic_data_cpu_base(&gic_data[0]);
+       void __iomem *cpu_base;
        u32 val = 0;
 
+       if (gic_nr >= MAX_GIC_NR)
+               return -EINVAL;
+
+       cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
        val = readl(cpu_base + GIC_CPU_CTRL);
        val &= ~GICC_ENABLE;
        writel_relaxed(val, cpu_base + GIC_CPU_CTRL);
+
+       return 0;
 }
 
 #ifdef CONFIG_CPU_PM
@@ -564,7 +670,7 @@ static void gic_cpu_restore(unsigned int gic_nr)
                                        dist_base + GIC_DIST_PRI + i * 4);
 
        writel_relaxed(GICC_INT_PRI_THRESHOLD, cpu_base + GIC_CPU_PRIMASK);
-       gic_cpu_if_up();
+       gic_cpu_if_up(&gic_data[gic_nr]);
 }
 
 static int gic_notifier(struct notifier_block *self, unsigned long cmd,        void *v)
@@ -789,13 +895,20 @@ void __init gic_init_physaddr(struct device_node *node)
 static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
                                irq_hw_number_t hw)
 {
+       struct irq_chip *chip = &gic_chip;
+
+       if (static_key_true(&supports_deactivate)) {
+               if (d->host_data == (void *)&gic_data[0])
+                       chip = &gic_eoimode1_chip;
+       }
+
        if (hw < 32) {
                irq_set_percpu_devid(irq);
-               irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
+               irq_domain_set_info(d, irq, hw, chip, d->host_data,
                                    handle_percpu_devid_irq, NULL, NULL);
                set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
        } else {
-               irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
+               irq_domain_set_info(d, irq, hw, chip, d->host_data,
                                    handle_fasteoi_irq, NULL, NULL);
                set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
        }
@@ -880,12 +993,7 @@ static const struct irq_domain_ops gic_irq_domain_ops = {
        .xlate = gic_irq_domain_xlate,
 };
 
-void gic_set_irqchip_flags(unsigned long flags)
-{
-       gic_chip.flags |= flags;
-}
-
-void __init gic_init_bases(unsigned int gic_nr, int irq_start,
+static void __init __gic_init_bases(unsigned int gic_nr, int irq_start,
                           void __iomem *dist_base, void __iomem *cpu_base,
                           u32 percpu_offset, struct device_node *node)
 {
@@ -929,13 +1037,6 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
                gic_set_base_accessor(gic, gic_get_common_base);
        }
 
-       /*
-        * Initialize the CPU interface map to all CPUs.
-        * It will be refined as each CPU probes its ID.
-        */
-       for (i = 0; i < NR_GIC_CPU_IF; i++)
-               gic_cpu_map[i] = 0xff;
-
        /*
         * Find out how many interrupts are supported.
         * The GIC only supports up to 1020 interrupt sources.
@@ -981,11 +1082,20 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
                return;
 
        if (gic_nr == 0) {
+               /*
+                * Initialize the CPU interface map to all CPUs.
+                * It will be refined as each CPU probes its ID.
+                * This is only necessary for the primary GIC.
+                */
+               for (i = 0; i < NR_GIC_CPU_IF; i++)
+                       gic_cpu_map[i] = 0xff;
 #ifdef CONFIG_SMP
                set_smp_cross_call(gic_raise_softirq);
                register_cpu_notifier(&gic_cpu_notifier);
 #endif
                set_handle_irq(gic_handle_irq);
+               if (static_key_true(&supports_deactivate))
+                       pr_info("GIC: Using split EOI/Deactivate mode\n");
        }
 
        gic_dist_init(gic);
@@ -993,6 +1103,19 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
        gic_pm_init(gic);
 }
 
+void __init gic_init_bases(unsigned int gic_nr, int irq_start,
+                          void __iomem *dist_base, void __iomem *cpu_base,
+                          u32 percpu_offset, struct device_node *node)
+{
+       /*
+        * Non-DT/ACPI systems won't run a hypervisor, so let's not
+        * bother with these...
+        */
+       static_key_slow_dec(&supports_deactivate);
+       __gic_init_bases(gic_nr, irq_start, dist_base, cpu_base,
+                        percpu_offset, node);
+}
+
 #ifdef CONFIG_OF
 static int gic_cnt __initdata;
 
@@ -1001,6 +1124,7 @@ gic_of_init(struct device_node *node, struct device_node *parent)
 {
        void __iomem *cpu_base;
        void __iomem *dist_base;
+       struct resource cpu_res;
        u32 percpu_offset;
        int irq;
 
@@ -1013,10 +1137,20 @@ gic_of_init(struct device_node *node, struct device_node *parent)
        cpu_base = of_iomap(node, 1);
        WARN(!cpu_base, "unable to map gic cpu registers\n");
 
+       of_address_to_resource(node, 1, &cpu_res);
+
+       /*
+        * Disable split EOI/Deactivate if either HYP is not available
+        * or the CPU interface is too small.
+        */
+       if (gic_cnt == 0 && (!is_hyp_mode_available() ||
+                            resource_size(&cpu_res) < SZ_8K))
+               static_key_slow_dec(&supports_deactivate);
+
        if (of_property_read_u32(node, "cpu-offset", &percpu_offset))
                percpu_offset = 0;
 
-       gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node);
+       __gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node);
        if (!gic_cnt)
                gic_init_physaddr(node);
 
@@ -1131,12 +1265,20 @@ gic_v2_acpi_init(struct acpi_table_header *table)
                return -ENOMEM;
        }
 
+       /*
+        * Disable split EOI/Deactivate if HYP is not available. ACPI
+        * guarantees that we'll always have a GICv2, so the CPU
+        * interface will always be the right size.
+        */
+       if (!is_hyp_mode_available())
+               static_key_slow_dec(&supports_deactivate);
+
        /*
         * Initialize zero GIC instance (no multi-GIC support). Also, set GIC
         * as default IRQ domain to allow for GSI registration and GSI to IRQ
         * number translation (see acpi_register_gsi() and acpi_gsi_to_irq()).
         */
-       gic_init_bases(0, -1, dist_base, cpu_base, 0, NULL);
+       __gic_init_bases(0, -1, dist_base, cpu_base, 0, NULL);
        irq_set_default_host(gic_data[0].domain);
 
        acpi_irq_model = ACPI_IRQ_MODEL_GIC;