]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - arch/arm64/include/asm/efi.h
Merge branches 'pm-cpuidle', 'pm-cpufreq' and 'pm-sleep'
[karo-tx-linux.git] / arch / arm64 / include / asm / efi.h
1 #ifndef _ASM_EFI_H
2 #define _ASM_EFI_H
3
4 #include <asm/cpufeature.h>
5 #include <asm/io.h>
6 #include <asm/mmu_context.h>
7 #include <asm/neon.h>
8 #include <asm/ptrace.h>
9 #include <asm/tlbflush.h>
10
11 #ifdef CONFIG_EFI
12 extern void efi_init(void);
13 #else
14 #define efi_init()
15 #endif
16
17 int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
18 int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
19
20 #define arch_efi_call_virt_setup()                                      \
21 ({                                                                      \
22         kernel_neon_begin();                                            \
23         efi_virtmap_load();                                             \
24 })
25
26 #define arch_efi_call_virt(p, f, args...)                               \
27 ({                                                                      \
28         efi_##f##_t *__f;                                               \
29         __f = p->f;                                                     \
30         __f(args);                                                      \
31 })
32
33 #define arch_efi_call_virt_teardown()                                   \
34 ({                                                                      \
35         efi_virtmap_unload();                                           \
36         kernel_neon_end();                                              \
37 })
38
39 #define ARCH_EFI_IRQ_FLAGS_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
40
41 /* arch specific definitions used by the stub code */
42
43 /*
44  * AArch64 requires the DTB to be 8-byte aligned in the first 512MiB from
45  * start of kernel and may not cross a 2MiB boundary. We set alignment to
46  * 2MiB so we know it won't cross a 2MiB boundary.
47  */
48 #define EFI_FDT_ALIGN   SZ_2M   /* used by allocate_new_fdt_and_exit_boot() */
49 #define MAX_FDT_OFFSET  SZ_512M
50
51 #define efi_call_early(f, ...)          sys_table_arg->boottime->f(__VA_ARGS__)
52 #define __efi_call_early(f, ...)        f(__VA_ARGS__)
53 #define efi_call_runtime(f, ...)        sys_table_arg->runtime->f(__VA_ARGS__)
54 #define efi_is_64bit()                  (true)
55
56 #define efi_call_proto(protocol, f, instance, ...)                      \
57         ((protocol##_t *)instance)->f(instance, ##__VA_ARGS__)
58
59 #define alloc_screen_info(x...)         &screen_info
60 #define free_screen_info(x...)
61
62 static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt)
63 {
64 }
65
66 #define EFI_ALLOC_ALIGN         SZ_64K
67
68 /*
69  * On ARM systems, virtually remapped UEFI runtime services are set up in two
70  * distinct stages:
71  * - The stub retrieves the final version of the memory map from UEFI, populates
72  *   the virt_addr fields and calls the SetVirtualAddressMap() [SVAM] runtime
73  *   service to communicate the new mapping to the firmware (Note that the new
74  *   mapping is not live at this time)
75  * - During an early initcall(), the EFI system table is permanently remapped
76  *   and the virtual remapping of the UEFI Runtime Services regions is loaded
77  *   into a private set of page tables. If this all succeeds, the Runtime
78  *   Services are enabled and the EFI_RUNTIME_SERVICES bit set.
79  */
80
81 static inline void efi_set_pgd(struct mm_struct *mm)
82 {
83         __switch_mm(mm);
84
85         if (system_uses_ttbr0_pan()) {
86                 if (mm != current->active_mm) {
87                         /*
88                          * Update the current thread's saved ttbr0 since it is
89                          * restored as part of a return from exception. Set
90                          * the hardware TTBR0_EL1 using cpu_switch_mm()
91                          * directly to enable potential errata workarounds.
92                          */
93                         update_saved_ttbr0(current, mm);
94                         cpu_switch_mm(mm->pgd, mm);
95                 } else {
96                         /*
97                          * Defer the switch to the current thread's TTBR0_EL1
98                          * until uaccess_enable(). Restore the current
99                          * thread's saved ttbr0 corresponding to its active_mm
100                          * (if different from init_mm).
101                          */
102                         cpu_set_reserved_ttbr0();
103                         if (current->active_mm != &init_mm)
104                                 update_saved_ttbr0(current, current->active_mm);
105                 }
106         }
107 }
108
109 void efi_virtmap_load(void);
110 void efi_virtmap_unload(void);
111
112 #endif /* _ASM_EFI_H */