X-Git-Url: https://git.kernelconcepts.de/?a=blobdiff_plain;f=arch%2Farm64%2Finclude%2Fasm%2Fmmu.h;h=990124a67eebd4b10a19ae9509cfd5b6d9ac1712;hb=1b3f6228d202d0131a51cb7a7963f6d7635187ad;hp=030208767185bd56edce4b01e9d5aa91c08c3058;hpb=4184a8fc57a43faa2625988cc87031db77af9d98;p=karo-tx-linux.git diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 030208767185..990124a67eeb 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -17,15 +17,16 @@ #define __ASM_MMU_H typedef struct { - unsigned int id; - raw_spinlock_t id_lock; - void *vdso; + atomic64_t id; + void *vdso; } mm_context_t; -#define INIT_MM_CONTEXT(name) \ - .context.id_lock = __RAW_SPIN_LOCK_UNLOCKED(name.context.id_lock), - -#define ASID(mm) ((mm)->context.id & 0xffff) +/* + * This macro is only used by the TLBI code, which cannot race with an + * ASID change and therefore doesn't need to reload the counter using + * atomic64_read. + */ +#define ASID(mm) ((mm)->context.id.counter & 0xffff) extern void paging_init(void); extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);