]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge remote-tracking branch 'tip/auto-latest'
authorStephen Rothwell <sfr@canb.auug.org.au>
Mon, 2 Nov 2015 01:33:26 +0000 (12:33 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Mon, 2 Nov 2015 01:33:26 +0000 (12:33 +1100)
48 files changed:
1  2 
Documentation/arm/uefi.txt
Documentation/arm64/booting.txt
Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt
Documentation/kernel-parameters.txt
MAINTAINERS
arch/arm/Kconfig
arch/arm/mach-exynos/suspend.c
arch/arm/mach-imx/gpc.c
arch/arm/mach-omap2/omap-wakeupgen.c
arch/arm64/Kconfig
arch/arm64/include/asm/acpi.h
arch/arm64/include/asm/atomic.h
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/cputype.h
arch/arm64/include/asm/memory.h
arch/arm64/include/asm/pgtable.h
arch/arm64/kernel/acpi.c
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/efi.c
arch/arm64/kernel/head.S
arch/arm64/kvm/Kconfig
arch/arm64/mm/proc.S
arch/mips/include/asm/atomic.h
arch/powerpc/sysdev/mpic.c
arch/x86/Kconfig
arch/x86/Makefile
arch/x86/crypto/sha1_ssse3_glue.c
arch/x86/crypto/sha256_ssse3_glue.c
arch/x86/crypto/sha512_ssse3_glue.c
arch/x86/include/asm/pgtable.h
drivers/clocksource/Kconfig
drivers/clocksource/Makefile
drivers/clocksource/h8300_timer8.c
drivers/firmware/efi/Makefile
drivers/irqchip/irq-gic.c
drivers/net/ethernet/sfc/mcdi.c
drivers/of/irq.c
drivers/pci/msi.c
drivers/pci/probe.c
include/linux/acpi.h
include/linux/fwnode.h
include/linux/msi.h
include/linux/of_irq.h
include/linux/sched.h
include/uapi/linux/perf_event.h
kernel/events/core.c
kernel/irq/manage.c

index 7f1bed8872f3d73361f143d8131ff5d29fedb848,7b3fdfe0f7ba37a7ff6a0e46cfec18e1fcfbe68e..6543a0adea8a9741798570108719c5e968c25088
@@@ -58,5 -58,5 +58,3 @@@ linux,uefi-mmap-desc-size | 32-bit | Si
  --------------------------------------------------------------------------------
  linux,uefi-mmap-desc-ver  | 32-bit | Version of the mmap descriptor format.
  --------------------------------------------------------------------------------
- For verbose debug messages, specify 'uefi_debug' on the kernel command line.
 -linux,uefi-stub-kern-ver  | string | Copy of linux_banner from build.
 ---------------------------------------------------------------------------------
Simple merge
Simple merge
diff --cc MAINTAINERS
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 5e13ad76a2493ad1458943338cade910e77d979d,1e247ac2601af41012823f0a639e2f9393af6d0b..f3a3586a421c8869d6e88219c686491d71e2ecd5
  #define ATOMIC_INIT(i)        { (i) }
  
  #define atomic_read(v)                        READ_ONCE((v)->counter)
- #define atomic_set(v, i)              (((v)->counter) = (i))
+ #define atomic_set(v, i)              WRITE_ONCE(((v)->counter), (i))
 +
 +#define atomic_add_return_relaxed     atomic_add_return_relaxed
 +#define atomic_add_return_acquire     atomic_add_return_acquire
 +#define atomic_add_return_release     atomic_add_return_release
 +#define atomic_add_return             atomic_add_return
 +
 +#define atomic_inc_return_relaxed(v)  atomic_add_return_relaxed(1, (v))
 +#define atomic_inc_return_acquire(v)  atomic_add_return_acquire(1, (v))
 +#define atomic_inc_return_release(v)  atomic_add_return_release(1, (v))
 +#define atomic_inc_return(v)          atomic_add_return(1, (v))
 +
 +#define atomic_sub_return_relaxed     atomic_sub_return_relaxed
 +#define atomic_sub_return_acquire     atomic_sub_return_acquire
 +#define atomic_sub_return_release     atomic_sub_return_release
 +#define atomic_sub_return             atomic_sub_return
 +
 +#define atomic_dec_return_relaxed(v)  atomic_sub_return_relaxed(1, (v))
 +#define atomic_dec_return_acquire(v)  atomic_sub_return_acquire(1, (v))
 +#define atomic_dec_return_release(v)  atomic_sub_return_release(1, (v))
 +#define atomic_dec_return(v)          atomic_sub_return(1, (v))
 +
 +#define atomic_xchg_relaxed(v, new)   xchg_relaxed(&((v)->counter), (new))
 +#define atomic_xchg_acquire(v, new)   xchg_acquire(&((v)->counter), (new))
 +#define atomic_xchg_release(v, new)   xchg_release(&((v)->counter), (new))
  #define atomic_xchg(v, new)           xchg(&((v)->counter), (new))
 +
 +#define atomic_cmpxchg_relaxed(v, old, new)                           \
 +      cmpxchg_relaxed(&((v)->counter), (old), (new))
 +#define atomic_cmpxchg_acquire(v, old, new)                           \
 +      cmpxchg_acquire(&((v)->counter), (old), (new))
 +#define atomic_cmpxchg_release(v, old, new)                           \
 +      cmpxchg_release(&((v)->counter), (old), (new))
  #define atomic_cmpxchg(v, old, new)   cmpxchg(&((v)->counter), (old), (new))
  
  #define atomic_inc(v)                 atomic_add(1, (v))
Simple merge
index 31678b2f295f242fc03d461dd92f52de0388d9c9,100a3d1b17c854d6c1a2c465b56ad8f101ab5efa..1a5949364ed0f43eee2be4b61c3497fe4fdbbb7b
        (0xf                    << MIDR_ARCHITECTURE_SHIFT) | \
        ((partnum)              << MIDR_PARTNUM_SHIFT))
  
- #define ARM_CPU_IMP_ARM               0x41
- #define ARM_CPU_IMP_APM               0x50
+ #define ARM_CPU_IMP_ARM                       0x41
+ #define ARM_CPU_IMP_APM                       0x50
+ #define ARM_CPU_IMP_CAVIUM            0x43
  
- #define ARM_CPU_PART_AEM_V8   0xD0F
- #define ARM_CPU_PART_FOUNDATION       0xD00
- #define ARM_CPU_PART_CORTEX_A57       0xD07
- #define ARM_CPU_PART_CORTEX_A53       0xD03
+ #define ARM_CPU_PART_AEM_V8           0xD0F
+ #define ARM_CPU_PART_FOUNDATION               0xD00
+ #define ARM_CPU_PART_CORTEX_A57               0xD07
+ #define ARM_CPU_PART_CORTEX_A53               0xD03
  
- #define APM_CPU_PART_POTENZA  0x000
+ #define APM_CPU_PART_POTENZA          0x000
+ #define CAVIUM_CPU_PART_THUNDERX      0x0A1
  
 -#define ID_AA64MMFR0_BIGENDEL0_SHIFT  16
 -#define ID_AA64MMFR0_BIGENDEL0_MASK   (0xf << ID_AA64MMFR0_BIGENDEL0_SHIFT)
 -#define ID_AA64MMFR0_BIGENDEL0(mmfr0) \
 -      (((mmfr0) & ID_AA64MMFR0_BIGENDEL0_MASK) >> ID_AA64MMFR0_BIGENDEL0_SHIFT)
 -#define ID_AA64MMFR0_BIGEND_SHIFT     8
 -#define ID_AA64MMFR0_BIGEND_MASK      (0xf << ID_AA64MMFR0_BIGEND_SHIFT)
 -#define ID_AA64MMFR0_BIGEND(mmfr0)    \
 -      (((mmfr0) & ID_AA64MMFR0_BIGEND_MASK) >> ID_AA64MMFR0_BIGEND_SHIFT)
 -
  #ifndef __ASSEMBLY__
  
  /*
Simple merge
Simple merge
index d6463bba2360561e59acb1f0fa5dbac926f37dfa,137d537ddceb8001f15d9daa18c95631b561a085..d1ce8e2f98b99bcb1fba0bae25c08e8f851c4f4b
@@@ -205,3 -210,52 +210,27 @@@ void __init acpi_boot_table_init(void
                        disable_acpi();
        }
  }
 -void __init acpi_gic_init(void)
 -{
 -      struct acpi_table_header *table;
 -      acpi_status status;
 -      acpi_size tbl_size;
 -      int err;
 -
 -      if (acpi_disabled)
 -              return;
 -
 -      status = acpi_get_table_with_size(ACPI_SIG_MADT, 0, &table, &tbl_size);
 -      if (ACPI_FAILURE(status)) {
 -              const char *msg = acpi_format_exception(status);
 -
 -              pr_err("Failed to get MADT table, %s\n", msg);
 -              return;
 -      }
 -
 -      err = gic_v2_acpi_init(table);
 -      if (err)
 -              pr_err("Failed to initialize GIC IRQ controller");
 -
 -      early_acpi_os_unmap_memory((char *)table, tbl_size);
 -}
 -
+ #ifdef CONFIG_ACPI_APEI
+ pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr)
+ {
+       /*
+        * According to "Table 8 Map: EFI memory types to AArch64 memory
+        * types" of UEFI 2.5 section 2.3.6.1, each EFI memory type is
+        * mapped to a corresponding MAIR attribute encoding.
+        * The EFI memory attribute advises all possible capabilities
+        * of a memory region. We use the most efficient capability.
+        */
+       u64 attr;
+       attr = efi_mem_attributes(addr);
+       if (attr & EFI_MEMORY_WB)
+               return PAGE_KERNEL;
+       if (attr & EFI_MEMORY_WT)
+               return __pgprot(PROT_NORMAL_WT);
+       if (attr & EFI_MEMORY_WC)
+               return __pgprot(PROT_NORMAL_NC);
+       return __pgprot(PROT_DEVICE_nGnRnE);
+ }
+ #endif
Simple merge
index 504526fa81299eeb3a7c127e73a54c1a85a7383a,305f30dc9e633fe86947621e54d802744e59df52..369975c3a9956efc14ee2f2f393eea9b2f225bbe
  #include <linux/types.h>
  #include <asm/cpu.h>
  #include <asm/cpufeature.h>
 +#include <asm/cpu_ops.h>
  #include <asm/processor.h>
 +#include <asm/sysreg.h>
 +
 +unsigned long elf_hwcap __read_mostly;
 +EXPORT_SYMBOL_GPL(elf_hwcap);
 +
 +#ifdef CONFIG_COMPAT
 +#define COMPAT_ELF_HWCAP_DEFAULT      \
 +                              (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
 +                               COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
 +                               COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
 +                               COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
 +                               COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
 +                               COMPAT_HWCAP_LPAE)
 +unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
 +unsigned int compat_elf_hwcap2 __read_mostly;
 +#endif
 +
 +DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
 +
 +#define ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
 +      {                                               \
 +              .strict = STRICT,                       \
 +              .type = TYPE,                           \
 +              .shift = SHIFT,                         \
 +              .width = WIDTH,                         \
 +              .safe_val = SAFE_VAL,                   \
 +      }
 +
 +#define ARM64_FTR_END                                 \
 +      {                                               \
 +              .width = 0,                             \
 +      }
 +
 +static struct arm64_ftr_bits ftr_id_aa64isar0[] = {
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 24, 4, 0),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA2_SHIFT, 4, 0),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA1_SHIFT, 4, 0),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_AES_SHIFT, 4, 0),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* RAZ */
 +      ARM64_FTR_END,
 +};
 +
 +static struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 4, 0),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_GIC_SHIFT, 4, 0),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
 +      /* Linux doesn't care about the EL3 */
 +      ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, ID_AA64PFR0_EL3_SHIFT, 4, 0),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL2_SHIFT, 4, 0),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
 +      ARM64_FTR_END,
 +};
 +
 +static struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
 +      /* Linux shouldn't care about secure memory */
 +      ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_ASID_SHIFT, 4, 0),
 +      /*
 +       * Differing PARange is fine as long as all peripherals and memory are mapped
 +       * within the minimum PARange of all CPUs
 +       */
 +      ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0),
 +      ARM64_FTR_END,
 +};
 +
 +static struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_LOR_SHIFT, 4, 0),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_HPD_SHIFT, 4, 0),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_VHE_SHIFT, 4, 0),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_HADBS_SHIFT, 4, 0),
 +      ARM64_FTR_END,
 +};
 +
 +static struct arm64_ftr_bits ftr_ctr[] = {
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1),        /* RAO */
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 3, 0),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0),  /* CWG */
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),   /* ERG */
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1),   /* DminLine */
 +      /*
 +       * Linux can handle differing I-cache policies. Userspace JITs will
 +       * make use of *minLine
 +       */
 +      ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, 14, 2, 0),     /* L1Ip */
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 10, 0),        /* RAZ */
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),    /* IminLine */
 +      ARM64_FTR_END,
 +};
 +
 +static struct arm64_ftr_bits ftr_id_mmfr0[] = {
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 4, 0),        /* InnerShr */
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 24, 4, 0),        /* FCSE */
 +      ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, 20, 4, 0),        /* AuxReg */
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 16, 4, 0),        /* TCM */
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 12, 4, 0),        /* ShareLvl */
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 4, 0), /* OuterShr */
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0), /* PMSA */
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* VMSA */
 +      ARM64_FTR_END,
 +};
 +
 +static struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
 +      ARM64_FTR_END,
 +};
 +
 +static struct arm64_ftr_bits ftr_mvfr2[] = {
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 24, 0),        /* RAZ */
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0),         /* FPMisc */
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0),         /* SIMDMisc */
 +      ARM64_FTR_END,
 +};
 +
 +static struct arm64_ftr_bits ftr_dczid[] = {
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 5, 27, 0),        /* RAZ */
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 1, 1),         /* DZP */
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),    /* BS */
 +      ARM64_FTR_END,
 +};
 +
 +
 +static struct arm64_ftr_bits ftr_id_isar5[] = {
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_RDM_SHIFT, 4, 0),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 20, 4, 0),        /* RAZ */
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_CRC32_SHIFT, 4, 0),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_SHA2_SHIFT, 4, 0),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_SHA1_SHIFT, 4, 0),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_AES_SHIFT, 4, 0),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_SEVL_SHIFT, 4, 0),
 +      ARM64_FTR_END,
 +};
 +
 +static struct arm64_ftr_bits ftr_id_mmfr4[] = {
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 24, 0),        /* RAZ */
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0),         /* ac2 */
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0),         /* RAZ */
 +      ARM64_FTR_END,
 +};
 +
 +static struct arm64_ftr_bits ftr_id_pfr0[] = {
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 16, 16, 0),       /* RAZ */
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 12, 4, 0),        /* State3 */
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 4, 0),         /* State2 */
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0),         /* State1 */
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0),         /* State0 */
 +      ARM64_FTR_END,
 +};
 +
 +/*
 + * Common ftr bits for a 32bit register with all hidden, strict
 + * attributes, with 4bit feature fields and a default safe value of
 + * 0. Covers the following 32bit registers:
 + * id_isar[0-4], id_mmfr[1-3], id_pfr1, mvfr[0-1]
 + */
 +static struct arm64_ftr_bits ftr_generic_32bits[] = {
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
 +      ARM64_FTR_END,
 +};
 +
 +static struct arm64_ftr_bits ftr_generic[] = {
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 64, 0),
 +      ARM64_FTR_END,
 +};
 +
 +static struct arm64_ftr_bits ftr_generic32[] = {
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 32, 0),
 +      ARM64_FTR_END,
 +};
 +
 +static struct arm64_ftr_bits ftr_aa64raz[] = {
 +      ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 64, 0),
 +      ARM64_FTR_END,
 +};
 +
 +#define ARM64_FTR_REG(id, table)              \
 +      {                                       \
 +              .sys_id = id,                   \
 +              .name = #id,                    \
 +              .ftr_bits = &((table)[0]),      \
 +      }
 +
 +static struct arm64_ftr_reg arm64_ftr_regs[] = {
 +
 +      /* Op1 = 0, CRn = 0, CRm = 1 */
 +      ARM64_FTR_REG(SYS_ID_PFR0_EL1, ftr_id_pfr0),
 +      ARM64_FTR_REG(SYS_ID_PFR1_EL1, ftr_generic_32bits),
 +      ARM64_FTR_REG(SYS_ID_DFR0_EL1, ftr_generic_32bits),
 +      ARM64_FTR_REG(SYS_ID_MMFR0_EL1, ftr_id_mmfr0),
 +      ARM64_FTR_REG(SYS_ID_MMFR1_EL1, ftr_generic_32bits),
 +      ARM64_FTR_REG(SYS_ID_MMFR2_EL1, ftr_generic_32bits),
 +      ARM64_FTR_REG(SYS_ID_MMFR3_EL1, ftr_generic_32bits),
 +
 +      /* Op1 = 0, CRn = 0, CRm = 2 */
 +      ARM64_FTR_REG(SYS_ID_ISAR0_EL1, ftr_generic_32bits),
 +      ARM64_FTR_REG(SYS_ID_ISAR1_EL1, ftr_generic_32bits),
 +      ARM64_FTR_REG(SYS_ID_ISAR2_EL1, ftr_generic_32bits),
 +      ARM64_FTR_REG(SYS_ID_ISAR3_EL1, ftr_generic_32bits),
 +      ARM64_FTR_REG(SYS_ID_ISAR4_EL1, ftr_generic_32bits),
 +      ARM64_FTR_REG(SYS_ID_ISAR5_EL1, ftr_id_isar5),
 +      ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4),
 +
 +      /* Op1 = 0, CRn = 0, CRm = 3 */
 +      ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_generic_32bits),
 +      ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_generic_32bits),
 +      ARM64_FTR_REG(SYS_MVFR2_EL1, ftr_mvfr2),
 +
 +      /* Op1 = 0, CRn = 0, CRm = 4 */
 +      ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
 +      ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_aa64raz),
 +
 +      /* Op1 = 0, CRn = 0, CRm = 5 */
 +      ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0),
 +      ARM64_FTR_REG(SYS_ID_AA64DFR1_EL1, ftr_generic),
 +
 +      /* Op1 = 0, CRn = 0, CRm = 6 */
 +      ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
 +      ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_aa64raz),
 +
 +      /* Op1 = 0, CRn = 0, CRm = 7 */
 +      ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
 +      ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1),
 +
 +      /* Op1 = 3, CRn = 0, CRm = 0 */
 +      ARM64_FTR_REG(SYS_CTR_EL0, ftr_ctr),
 +      ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid),
 +
 +      /* Op1 = 3, CRn = 14, CRm = 0 */
 +      ARM64_FTR_REG(SYS_CNTFRQ_EL0, ftr_generic32),
 +};
 +
 +static int search_cmp_ftr_reg(const void *id, const void *regp)
 +{
 +      return (int)(unsigned long)id - (int)((const struct arm64_ftr_reg *)regp)->sys_id;
 +}
 +
 +/*
 + * get_arm64_ftr_reg - Lookup a feature register entry using its
 + * sys_reg() encoding. With the array arm64_ftr_regs sorted in the
 + * ascending order of sys_id , we use binary search to find a matching
 + * entry.
 + *
 + * returns - Upon success,  matching ftr_reg entry for id.
 + *         - NULL on failure. It is upto the caller to decide
 + *         the impact of a failure.
 + */
 +static struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id)
 +{
 +      return bsearch((const void *)(unsigned long)sys_id,
 +                      arm64_ftr_regs,
 +                      ARRAY_SIZE(arm64_ftr_regs),
 +                      sizeof(arm64_ftr_regs[0]),
 +                      search_cmp_ftr_reg);
 +}
 +
 +static u64 arm64_ftr_set_value(struct arm64_ftr_bits *ftrp, s64 reg, s64 ftr_val)
 +{
 +      u64 mask = arm64_ftr_mask(ftrp);
 +
 +      reg &= ~mask;
 +      reg |= (ftr_val << ftrp->shift) & mask;
 +      return reg;
 +}
 +
 +static s64 arm64_ftr_safe_value(struct arm64_ftr_bits *ftrp, s64 new, s64 cur)
 +{
 +      s64 ret = 0;
 +
 +      switch (ftrp->type) {
 +      case FTR_EXACT:
 +              ret = ftrp->safe_val;
 +              break;
 +      case FTR_LOWER_SAFE:
 +              ret = new < cur ? new : cur;
 +              break;
 +      case FTR_HIGHER_SAFE:
 +              ret = new > cur ? new : cur;
 +              break;
 +      default:
 +              BUG();
 +      }
 +
 +      return ret;
 +}
 +
 +static int __init sort_cmp_ftr_regs(const void *a, const void *b)
 +{
 +      return ((const struct arm64_ftr_reg *)a)->sys_id -
 +               ((const struct arm64_ftr_reg *)b)->sys_id;
 +}
 +
 +static void __init swap_ftr_regs(void *a, void *b, int size)
 +{
 +      struct arm64_ftr_reg tmp = *(struct arm64_ftr_reg *)a;
 +      *(struct arm64_ftr_reg *)a = *(struct arm64_ftr_reg *)b;
 +      *(struct arm64_ftr_reg *)b = tmp;
 +}
 +
 +static void __init sort_ftr_regs(void)
 +{
 +      /* Keep the array sorted so that we can do the binary search */
 +      sort(arm64_ftr_regs,
 +              ARRAY_SIZE(arm64_ftr_regs),
 +              sizeof(arm64_ftr_regs[0]),
 +              sort_cmp_ftr_regs,
 +              swap_ftr_regs);
 +}
 +
 +/*
 + * Initialise the CPU feature register from Boot CPU values.
 + * Also initiliases the strict_mask for the register.
 + */
 +static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
 +{
 +      u64 val = 0;
 +      u64 strict_mask = ~0x0ULL;
 +      struct arm64_ftr_bits *ftrp;
 +      struct arm64_ftr_reg *reg = get_arm64_ftr_reg(sys_reg);
 +
 +      BUG_ON(!reg);
 +
 +      for (ftrp  = reg->ftr_bits; ftrp->width; ftrp++) {
 +              s64 ftr_new = arm64_ftr_value(ftrp, new);
 +
 +              val = arm64_ftr_set_value(ftrp, val, ftr_new);
 +              if (!ftrp->strict)
 +                      strict_mask &= ~arm64_ftr_mask(ftrp);
 +      }
 +      reg->sys_val = val;
 +      reg->strict_mask = strict_mask;
 +}
 +
 +void __init init_cpu_features(struct cpuinfo_arm64 *info)
 +{
 +      /* Before we start using the tables, make sure it is sorted */
 +      sort_ftr_regs();
 +
 +      init_cpu_ftr_reg(SYS_CTR_EL0, info->reg_ctr);
 +      init_cpu_ftr_reg(SYS_DCZID_EL0, info->reg_dczid);
 +      init_cpu_ftr_reg(SYS_CNTFRQ_EL0, info->reg_cntfrq);
 +      init_cpu_ftr_reg(SYS_ID_AA64DFR0_EL1, info->reg_id_aa64dfr0);
 +      init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1);
 +      init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0);
 +      init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1);
 +      init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0);
 +      init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
 +      init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0);
 +      init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
 +      init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
 +      init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
 +      init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
 +      init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
 +      init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
 +      init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
 +      init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
 +      init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
 +      init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
 +      init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
 +      init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
 +      init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
 +      init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
 +      init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
 +      init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
 +      init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
 +}
 +
 +static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
 +{
 +      struct arm64_ftr_bits *ftrp;
 +
 +      for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
 +              s64 ftr_cur = arm64_ftr_value(ftrp, reg->sys_val);
 +              s64 ftr_new = arm64_ftr_value(ftrp, new);
 +
 +              if (ftr_cur == ftr_new)
 +                      continue;
 +              /* Find a safe value */
 +              ftr_new = arm64_ftr_safe_value(ftrp, ftr_new, ftr_cur);
 +              reg->sys_val = arm64_ftr_set_value(ftrp, reg->sys_val, ftr_new);
 +      }
 +
 +}
 +
 +static int check_update_ftr_reg(u32 sys_id, int cpu, u64 val, u64 boot)
 +{
 +      struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id);
 +
 +      BUG_ON(!regp);
 +      update_cpu_ftr_reg(regp, val);
 +      if ((boot & regp->strict_mask) == (val & regp->strict_mask))
 +              return 0;
 +      pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016llx, CPU%d: %#016llx\n",
 +                      regp->name, boot, cpu, val);
 +      return 1;
 +}
 +
 +/*
 + * Update system wide CPU feature registers with the values from a
 + * non-boot CPU. Also performs SANITY checks to make sure that there
 + * aren't any insane variations from that of the boot CPU.
 + */
 +void update_cpu_features(int cpu,
 +                       struct cpuinfo_arm64 *info,
 +                       struct cpuinfo_arm64 *boot)
 +{
 +      int taint = 0;
 +
 +      /*
 +       * The kernel can handle differing I-cache policies, but otherwise
 +       * caches should look identical. Userspace JITs will make use of
 +       * *minLine.
 +       */
 +      taint |= check_update_ftr_reg(SYS_CTR_EL0, cpu,
 +                                    info->reg_ctr, boot->reg_ctr);
 +
 +      /*
 +       * Userspace may perform DC ZVA instructions. Mismatched block sizes
 +       * could result in too much or too little memory being zeroed if a
 +       * process is preempted and migrated between CPUs.
 +       */
 +      taint |= check_update_ftr_reg(SYS_DCZID_EL0, cpu,
 +                                    info->reg_dczid, boot->reg_dczid);
 +
 +      /* If different, timekeeping will be broken (especially with KVM) */
 +      taint |= check_update_ftr_reg(SYS_CNTFRQ_EL0, cpu,
 +                                    info->reg_cntfrq, boot->reg_cntfrq);
 +
 +      /*
 +       * The kernel uses self-hosted debug features and expects CPUs to
 +       * support identical debug features. We presently need CTX_CMPs, WRPs,
 +       * and BRPs to be identical.
 +       * ID_AA64DFR1 is currently RES0.
 +       */
 +      taint |= check_update_ftr_reg(SYS_ID_AA64DFR0_EL1, cpu,
 +                                    info->reg_id_aa64dfr0, boot->reg_id_aa64dfr0);
 +      taint |= check_update_ftr_reg(SYS_ID_AA64DFR1_EL1, cpu,
 +                                    info->reg_id_aa64dfr1, boot->reg_id_aa64dfr1);
 +      /*
 +       * Even in big.LITTLE, processors should be identical instruction-set
 +       * wise.
 +       */
 +      taint |= check_update_ftr_reg(SYS_ID_AA64ISAR0_EL1, cpu,
 +                                    info->reg_id_aa64isar0, boot->reg_id_aa64isar0);
 +      taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu,
 +                                    info->reg_id_aa64isar1, boot->reg_id_aa64isar1);
 +
 +      /*
 +       * Differing PARange support is fine as long as all peripherals and
 +       * memory are mapped within the minimum PARange of all CPUs.
 +       * Linux should not care about secure memory.
 +       */
 +      taint |= check_update_ftr_reg(SYS_ID_AA64MMFR0_EL1, cpu,
 +                                    info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0);
 +      taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu,
 +                                    info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1);
 +
 +      /*
 +       * EL3 is not our concern.
 +       * ID_AA64PFR1 is currently RES0.
 +       */
 +      taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
 +                                    info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
 +      taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu,
 +                                    info->reg_id_aa64pfr1, boot->reg_id_aa64pfr1);
 +
 +      /*
 +       * If we have AArch32, we care about 32-bit features for compat. These
 +       * registers should be RES0 otherwise.
 +       */
 +      taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu,
 +                                      info->reg_id_dfr0, boot->reg_id_dfr0);
 +      taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu,
 +                                      info->reg_id_isar0, boot->reg_id_isar0);
 +      taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu,
 +                                      info->reg_id_isar1, boot->reg_id_isar1);
 +      taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu,
 +                                      info->reg_id_isar2, boot->reg_id_isar2);
 +      taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu,
 +                                      info->reg_id_isar3, boot->reg_id_isar3);
 +      taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu,
 +                                      info->reg_id_isar4, boot->reg_id_isar4);
 +      taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu,
 +                                      info->reg_id_isar5, boot->reg_id_isar5);
 +
 +      /*
 +       * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
 +       * ACTLR formats could differ across CPUs and therefore would have to
 +       * be trapped for virtualization anyway.
 +       */
 +      taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu,
 +                                      info->reg_id_mmfr0, boot->reg_id_mmfr0);
 +      taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu,
 +                                      info->reg_id_mmfr1, boot->reg_id_mmfr1);
 +      taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu,
 +                                      info->reg_id_mmfr2, boot->reg_id_mmfr2);
 +      taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu,
 +                                      info->reg_id_mmfr3, boot->reg_id_mmfr3);
 +      taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu,
 +                                      info->reg_id_pfr0, boot->reg_id_pfr0);
 +      taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu,
 +                                      info->reg_id_pfr1, boot->reg_id_pfr1);
 +      taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu,
 +                                      info->reg_mvfr0, boot->reg_mvfr0);
 +      taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu,
 +                                      info->reg_mvfr1, boot->reg_mvfr1);
 +      taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu,
 +                                      info->reg_mvfr2, boot->reg_mvfr2);
 +
 +      /*
 +       * Mismatched CPU features are a recipe for disaster. Don't even
 +       * pretend to support them.
 +       */
 +      WARN_TAINT_ONCE(taint, TAINT_CPU_OUT_OF_SPEC,
 +                      "Unsupported CPU feature variation.\n");
 +}
 +
 +u64 read_system_reg(u32 id)
 +{
 +      struct arm64_ftr_reg *regp = get_arm64_ftr_reg(id);
 +
 +      /* We shouldn't get a request for an unsupported register */
 +      BUG_ON(!regp);
 +      return regp->sys_val;
 +}
  
+ #include <linux/irqchip/arm-gic-v3.h>
  static bool
  feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
  {
        return val >= entry->min_field_value;
  }
  
 -#define __ID_FEAT_CHK(reg)                                            \
 -static bool __maybe_unused                                            \
 -has_##reg##_feature(const struct arm64_cpu_capabilities *entry)               \
 -{                                                                     \
 -      u64 val;                                                        \
 -                                                                      \
 -      val = read_cpuid(reg##_el1);                                    \
 -      return feature_matches(val, entry);                             \
 -}
 +static bool
 +has_cpuid_feature(const struct arm64_cpu_capabilities *entry)
 +{
 +      u64 val;
  
 -__ID_FEAT_CHK(id_aa64pfr0);
 -__ID_FEAT_CHK(id_aa64mmfr1);
 -__ID_FEAT_CHK(id_aa64isar0);
 +      val = read_system_reg(entry->sys_reg);
 +      return feature_matches(val, entry);
 +}
  
 -      if (!has_id_aa64pfr0_feature(entry))
+ static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry)
+ {
+       bool has_sre;
 -
++      if (!has_cpuid_feature(entry))
+               return false;
 -                           entry->desc);
+       has_sre = gic_enable_sre();
+       if (!has_sre)
+               pr_warn_once("%s present but disabled by higher exception level\n",
++                              entry->desc);
+       return has_sre;
+ }
  static const struct arm64_cpu_capabilities arm64_features[] = {
        {
                .desc = "GIC system register CPU interface",
                .capability = ARM64_HAS_SYSREG_GIC_CPUIF,
-               .matches = has_cpuid_feature,
+               .matches = has_useable_gicv3_cpuif,
 -              .field_pos = 24,
 +              .sys_reg = SYS_ID_AA64PFR0_EL1,
 +              .field_pos = ID_AA64PFR0_GIC_SHIFT,
                .min_field_value = 1,
        },
  #ifdef CONFIG_ARM64_PAN
index a48d1f477b2e8c4e3852cd9677c766fcf243b7c2,61eb1d17586a859a1fcc1d0d0fd653028d736b11..de46b50f4cdf952087e77d473314e75728efeec0
@@@ -48,17 -48,9 +48,8 @@@ static struct mm_struct efi_mm = 
        .mmap_sem               = __RWSEM_INITIALIZER(efi_mm.mmap_sem),
        .page_table_lock        = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
        .mmlist                 = LIST_HEAD_INIT(efi_mm.mmlist),
 -      INIT_MM_CONTEXT(efi_mm)
  };
  
- static int uefi_debug __initdata;
- static int __init uefi_debug_setup(char *str)
- {
-       uefi_debug = 1;
-       return 0;
- }
- early_param("uefi_debug", uefi_debug_setup);
  static int __init is_normal_ram(efi_memory_desc_t *md)
  {
        if (md->attribute & EFI_MEMORY_WB)
Simple merge
index 6a7d5cd772e6b73929fc33099d2c9193b1c0cb60,ff5292c6277c4764734a1a1769af06347ccb8c4b..c9d1f34daab152028d8bcd795504ac4ed2919cc8
@@@ -32,10 -34,9 +35,11 @@@ config KV
        select KVM_VFIO
        select HAVE_KVM_EVENTFD
        select HAVE_KVM_IRQFD
+       select KVM_ARM_VGIC_V3
        ---help---
          Support hosting virtualized guest machines.
 +        We don't support KVM with 16K page tables yet, due to the multiple
 +        levels of fake page tables.
  
          If unsure, say N.
  
Simple merge
Simple merge
Simple merge
Simple merge
index a8009c77918a7340193d6991b5a1db0dbb3532e6,2dfaa72260b41fcb3c9ef528ceffa9840cfc691d..4086abca0b32345c92207fa75468cc98ebe97da4
@@@ -165,11 -171,9 +171,11 @@@ asinstr += $(call as-instr,pshufb %xmm0
  asinstr += $(call as-instr,crc32l %eax$(comma)%eax,-DCONFIG_AS_CRC32=1)
  avx_instr := $(call as-instr,vxorps %ymm0$(comma)%ymm1$(comma)%ymm2,-DCONFIG_AS_AVX=1)
  avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1)
 +sha1_ni_instr :=$(call as-instr,sha1msg1 %xmm0$(comma)%xmm1,-DCONFIG_AS_SHA1_NI=1)
 +sha256_ni_instr :=$(call as-instr,sha256msg1 %xmm0$(comma)%xmm1,-DCONFIG_AS_SHA256_NI=1)
  
- KBUILD_AFLAGS += $(asinstr) $(avx_instr) $(avx2_instr) $(sha1_ni_instr) $(sha256_ni_instr)
- KBUILD_CFLAGS += $(asinstr) $(avx_instr) $(avx2_instr) $(sha1_ni_instr) $(sha256_ni_instr)
 -KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr)
 -KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr)
++KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(sha1_ni_instr) $(sha256_ni_instr)
++KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(sha1_ni_instr) $(sha256_ni_instr)
  
  LDFLAGS := -m elf_$(UTS_MACHINE)
  
index c934197fe84ae05b07a2e8a1196a6d74f581807b,00212c32d4db289a2fb2937241a39be917738889..dd14616b773970d13c2886f255c0f76b4eb58450
@@@ -110,62 -118,10 +110,62 @@@ static struct shash_alg sha1_ssse3_alg 
        }
  };
  
 +static int register_sha1_ssse3(void)
 +{
 +      if (boot_cpu_has(X86_FEATURE_SSSE3))
 +              return crypto_register_shash(&sha1_ssse3_alg);
 +      return 0;
 +}
 +
 +static void unregister_sha1_ssse3(void)
 +{
 +      if (boot_cpu_has(X86_FEATURE_SSSE3))
 +              crypto_unregister_shash(&sha1_ssse3_alg);
 +}
 +
  #ifdef CONFIG_AS_AVX
 -static bool __init avx_usable(void)
 +asmlinkage void sha1_transform_avx(u32 *digest, const char *data,
 +                                 unsigned int rounds);
 +
 +static int sha1_avx_update(struct shash_desc *desc, const u8 *data,
 +                           unsigned int len)
 +{
 +      return sha1_update(desc, data, len,
 +                      (sha1_transform_fn *) sha1_transform_avx);
 +}
 +
 +static int sha1_avx_finup(struct shash_desc *desc, const u8 *data,
 +                            unsigned int len, u8 *out)
 +{
 +      return sha1_finup(desc, data, len, out,
 +                      (sha1_transform_fn *) sha1_transform_avx);
 +}
 +
 +static int sha1_avx_final(struct shash_desc *desc, u8 *out)
 +{
 +      return sha1_avx_finup(desc, NULL, 0, out);
 +}
 +
 +static struct shash_alg sha1_avx_alg = {
 +      .digestsize     =       SHA1_DIGEST_SIZE,
 +      .init           =       sha1_base_init,
 +      .update         =       sha1_avx_update,
 +      .final          =       sha1_avx_final,
 +      .finup          =       sha1_avx_finup,
 +      .descsize       =       sizeof(struct sha1_state),
 +      .base           =       {
 +              .cra_name       =       "sha1",
 +              .cra_driver_name =      "sha1-avx",
 +              .cra_priority   =       160,
 +              .cra_flags      =       CRYPTO_ALG_TYPE_SHASH,
 +              .cra_blocksize  =       SHA1_BLOCK_SIZE,
 +              .cra_module     =       THIS_MODULE,
 +      }
 +};
 +
 +static bool avx_usable(void)
  {
-       if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL)) {
+       if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
                if (cpu_has_avx)
                        pr_info("AVX detected but unusable.\n");
                return false;
index 863e2f6aad135e1bf436d0374abce8f1d2eb3a5e,0e0e85aea63418fefb54e2d377c86bcebdf1c795..5f4d6086dc5913be7d680882ab94d2286ad3e230
@@@ -130,77 -127,10 +130,77 @@@ static struct shash_alg sha256_ssse3_al
        }
  } };
  
 +static int register_sha256_ssse3(void)
 +{
 +      if (boot_cpu_has(X86_FEATURE_SSSE3))
 +              return crypto_register_shashes(sha256_ssse3_algs,
 +                              ARRAY_SIZE(sha256_ssse3_algs));
 +      return 0;
 +}
 +
 +static void unregister_sha256_ssse3(void)
 +{
 +      if (boot_cpu_has(X86_FEATURE_SSSE3))
 +              crypto_unregister_shashes(sha256_ssse3_algs,
 +                              ARRAY_SIZE(sha256_ssse3_algs));
 +}
 +
  #ifdef CONFIG_AS_AVX
 -static bool __init avx_usable(void)
 +asmlinkage void sha256_transform_avx(u32 *digest, const char *data,
 +                                   u64 rounds);
 +
 +static int sha256_avx_update(struct shash_desc *desc, const u8 *data,
 +                       unsigned int len)
 +{
 +      return sha256_update(desc, data, len, sha256_transform_avx);
 +}
 +
 +static int sha256_avx_finup(struct shash_desc *desc, const u8 *data,
 +                    unsigned int len, u8 *out)
 +{
 +      return sha256_finup(desc, data, len, out, sha256_transform_avx);
 +}
 +
 +static int sha256_avx_final(struct shash_desc *desc, u8 *out)
 +{
 +      return sha256_avx_finup(desc, NULL, 0, out);
 +}
 +
 +static struct shash_alg sha256_avx_algs[] = { {
 +      .digestsize     =       SHA256_DIGEST_SIZE,
 +      .init           =       sha256_base_init,
 +      .update         =       sha256_avx_update,
 +      .final          =       sha256_avx_final,
 +      .finup          =       sha256_avx_finup,
 +      .descsize       =       sizeof(struct sha256_state),
 +      .base           =       {
 +              .cra_name       =       "sha256",
 +              .cra_driver_name =      "sha256-avx",
 +              .cra_priority   =       160,
 +              .cra_flags      =       CRYPTO_ALG_TYPE_SHASH,
 +              .cra_blocksize  =       SHA256_BLOCK_SIZE,
 +              .cra_module     =       THIS_MODULE,
 +      }
 +}, {
 +      .digestsize     =       SHA224_DIGEST_SIZE,
 +      .init           =       sha224_base_init,
 +      .update         =       sha256_avx_update,
 +      .final          =       sha256_avx_final,
 +      .finup          =       sha256_avx_finup,
 +      .descsize       =       sizeof(struct sha256_state),
 +      .base           =       {
 +              .cra_name       =       "sha224",
 +              .cra_driver_name =      "sha224-avx",
 +              .cra_priority   =       160,
 +              .cra_flags      =       CRYPTO_ALG_TYPE_SHASH,
 +              .cra_blocksize  =       SHA224_BLOCK_SIZE,
 +              .cra_module     =       THIS_MODULE,
 +      }
 +} };
 +
 +static bool avx_usable(void)
  {
-       if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL)) {
+       if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
                if (cpu_has_avx)
                        pr_info("AVX detected but unusable.\n");
                return false;
index 0dfe9a2ba64bc275432a01e717141694749a7439,0c8c38c101acda77c3bf67af0639a75451360ce8..34e5083d6f36540e967dc755384012ca35afd714
@@@ -130,27 -126,10 +130,27 @@@ static struct shash_alg sha512_ssse3_al
        }
  } };
  
 +static int register_sha512_ssse3(void)
 +{
 +      if (boot_cpu_has(X86_FEATURE_SSSE3))
 +              return crypto_register_shashes(sha512_ssse3_algs,
 +                      ARRAY_SIZE(sha512_ssse3_algs));
 +      return 0;
 +}
 +
 +static void unregister_sha512_ssse3(void)
 +{
 +      if (boot_cpu_has(X86_FEATURE_SSSE3))
 +              crypto_unregister_shashes(sha512_ssse3_algs,
 +                      ARRAY_SIZE(sha512_ssse3_algs));
 +}
 +
  #ifdef CONFIG_AS_AVX
 -static bool __init avx_usable(void)
 +asmlinkage void sha512_transform_avx(u64 *digest, const char *data,
 +                                   u64 rounds);
 +static bool avx_usable(void)
  {
-       if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL)) {
+       if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
                if (cpu_has_avx)
                        pr_info("AVX detected but unusable.\n");
                return false;
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index d4add30d1d4650c507da403aca880f6157ef6681,1d0e76855106cf946627dcc5c5aaa728a38c6bc8..515c823c1c95cee63b46c18bed35bb70260ae9a9
@@@ -1222,38 -1247,26 +1246,39 @@@ gic_acpi_parse_madt_cpu(struct acpi_sub
        return 0;
  }
  
 -static int __init
 -gic_acpi_parse_madt_distributor(struct acpi_subtable_header *header,
 -                              const unsigned long end)
 +/* The things you have to do to just *count* something... */
 +static int __init acpi_dummy_func(struct acpi_subtable_header *header,
 +                                const unsigned long end)
  {
 -      struct acpi_madt_generic_distributor *dist;
 +      return 0;
 +}
  
 -      dist = (struct acpi_madt_generic_distributor *)header;
 +static bool __init acpi_gic_redist_is_present(void)
 +{
 +      return acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR,
 +                                   acpi_dummy_func, 0) > 0;
 +}
  
 -      if (BAD_MADT_ENTRY(dist, end))
 -              return -EINVAL;
 +static bool __init gic_validate_dist(struct acpi_subtable_header *header,
 +                                   struct acpi_probe_entry *ape)
 +{
 +      struct acpi_madt_generic_distributor *dist;
 +      dist = (struct acpi_madt_generic_distributor *)header;
  
 -      dist_phy_base = dist->base_address;
 -      return 0;
 +      return (dist->version == ape->driver_data &&
 +              (dist->version != ACPI_MADT_GIC_VERSION_NONE ||
 +               !acpi_gic_redist_is_present()));
  }
  
 -int __init
 -gic_v2_acpi_init(struct acpi_table_header *table)
 +#define ACPI_GICV2_DIST_MEM_SIZE      (SZ_4K)
 +#define ACPI_GIC_CPU_IF_MEM_SIZE      (SZ_8K)
 +
 +static int __init gic_v2_acpi_init(struct acpi_subtable_header *header,
 +                                 const unsigned long end)
  {
 +      struct acpi_madt_generic_distributor *dist;
        void __iomem *cpu_base, *dist_base;
+       struct fwnode_handle *domain_handle;
        int count;
  
        /* Collect CPU base addresses */
                static_key_slow_dec(&supports_deactivate);
  
        /*
-        * Initialize zero GIC instance (no multi-GIC support). Also, set GIC
-        * as default IRQ domain to allow for GSI registration and GSI to IRQ
-        * number translation (see acpi_register_gsi() and acpi_gsi_to_irq()).
+        * Initialize GIC instance zero (no multi-GIC support).
         */
-       __gic_init_bases(0, -1, dist_base, cpu_base, 0, NULL);
-       irq_set_default_host(gic_data[0].domain);
+       domain_handle = irq_domain_alloc_fwnode(dist_base);
+       if (!domain_handle) {
+               pr_err("Unable to allocate domain handle\n");
+               iounmap(cpu_base);
+               iounmap(dist_base);
+               return -ENOMEM;
+       }
+       __gic_init_bases(0, -1, dist_base, cpu_base, 0, domain_handle);
  
-       acpi_irq_model = ACPI_IRQ_MODEL_GIC;
+       acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle);
        return 0;
  }
 +IRQCHIP_ACPI_DECLARE(gic_v2, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
 +                   gic_validate_dist, ACPI_MADT_GIC_VERSION_V2,
 +                   gic_v2_acpi_init);
 +IRQCHIP_ACPI_DECLARE(gic_v2_maybe, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
 +                   gic_validate_dist, ACPI_MADT_GIC_VERSION_NONE,
 +                   gic_v2_acpi_init);
  #endif
Simple merge
Simple merge
Simple merge
index 0271b69d67afcb2c79abbac1a46eb94eedc6919a,f14a970b61fa59bf00b6e97474da3ec024ba315e..f53b8e85f137902f6e8b0667ae9f0539c06537a1
@@@ -1624,19 -1620,50 +1624,52 @@@ static void pci_init_capabilities(struc
  
        /* Enable ACS P2P upstream forwarding */
        pci_enable_acs(dev);
 +
 +      pci_cleanup_aer_error_status_regs(dev);
  }
  
+ /*
+  * This is the equivalent of pci_host_bridge_msi_domain that acts on
+  * devices. Firmware interfaces that can select the MSI domain on a
+  * per-device basis should be called from here.
+  */
+ static struct irq_domain *pci_dev_msi_domain(struct pci_dev *dev)
+ {
+       struct irq_domain *d;
+       /*
+        * If a domain has been set through the pcibios_add_device
+        * callback, then this is the one (platform code knows best).
+        */
+       d = dev_get_msi_domain(&dev->dev);
+       if (d)
+               return d;
+       /*
+        * Let's see if we have a firmware interface able to provide
+        * the domain.
+        */
+       d = pci_msi_get_device_domain(dev);
+       if (d)
+               return d;
+       return NULL;
+ }
  static void pci_set_msi_domain(struct pci_dev *dev)
  {
+       struct irq_domain *d;
        /*
-        * If no domain has been set through the pcibios_add_device
-        * callback, inherit the default from the bus device.
+        * If the platform or firmware interfaces cannot supply a
+        * device-specific MSI domain, then inherit the default domain
+        * from the host bridge itself.
         */
-       if (!dev_get_msi_domain(&dev->dev))
-               dev_set_msi_domain(&dev->dev,
-                                  dev_get_msi_domain(&dev->bus->dev));
+       d = pci_dev_msi_domain(dev);
+       if (!d)
+               d = dev_get_msi_domain(&dev->bus->dev);
+       dev_set_msi_domain(&dev->dev, d);
  }
  
  void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
Simple merge
index b08d6ba5c1e6c9a5e7e31d7ed966bffa20e517a1,37ec668546ab21897393bcebc1ba548796db7994..8516717427906948a215c56fad35ab462a8cbfda
@@@ -16,8 -16,8 +16,9 @@@ enum fwnode_type 
        FWNODE_INVALID = 0,
        FWNODE_OF,
        FWNODE_ACPI,
 +      FWNODE_ACPI_DATA,
        FWNODE_PDATA,
+       FWNODE_IRQCHIP,
  };
  
  struct fwnode_handle {
Simple merge
index 580818d90475a267aa36d1b757e1e1328bf2f0fb,65d969246a4d02e1ee8854451c923d5003941d31..79e1d97bcec3031cd17764645f3aa30eab2fe349
@@@ -46,7 -46,11 +46,12 @@@ extern int of_irq_get(struct device_nod
  extern int of_irq_get_byname(struct device_node *dev, const char *name);
  extern int of_irq_to_resource_table(struct device_node *dev,
                struct resource *res, int nr_irqs);
 +extern void of_msi_configure(struct device *dev, struct device_node *np);
+ extern struct irq_domain *of_msi_get_domain(struct device *dev,
+                                           struct device_node *np,
+                                           enum irq_domain_bus_token token);
+ extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev,
+                                                      u32 rid);
  #else
  static inline int of_irq_count(struct device_node *dev)
  {
@@@ -65,25 -69,47 +70,43 @@@ static inline int of_irq_to_resource_ta
  {
        return 0;
  }
 +static inline void of_msi_configure(struct device *dev, struct device_node *np)
 +{
 +}
+ static inline struct irq_domain *of_msi_get_domain(struct device *dev,
+                                                  struct device_node *np,
+                                                  enum irq_domain_bus_token token)
+ {
+       return NULL;
+ }
+ static inline struct irq_domain *of_msi_map_get_device_domain(struct device *dev,
+                                                             u32 rid)
+ {
+       return NULL;
+ }
  #endif
  
 -#if defined(CONFIG_OF)
 +#if defined(CONFIG_OF_IRQ) || defined(CONFIG_SPARC)
  /*
   * irq_of_parse_and_map() is used by all OF enabled platforms; but SPARC
   * implements it differently.  However, the prototype is the same for all,
   * so declare it here regardless of the CONFIG_OF_IRQ setting.
   */
  extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
 -extern struct device_node *of_irq_find_parent(struct device_node *child);
 -extern void of_msi_configure(struct device *dev, struct device_node *np);
+ u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in);
  
 -#else /* !CONFIG_OF */
 +#else /* !CONFIG_OF && !CONFIG_SPARC */
  static inline unsigned int irq_of_parse_and_map(struct device_node *dev,
                                                int index)
  {
        return 0;
  }
 -static inline void *of_irq_find_parent(struct device_node *child)
 -{
 -      return NULL;
 -}
 -
+ static inline u32 of_msi_map_rid(struct device *dev,
+                                struct device_node *msi_np, u32 rid_in)
+ {
+       return rid_in;
+ }
  #endif /* !CONFIG_OF */
  
  #endif /* __OF_IRQ_H */
Simple merge
Simple merge
Simple merge
Simple merge