1 #ifndef _LINUX_MM_TYPES_TASK_H
2 #define _LINUX_MM_TYPES_TASK_H
5 * Here are the definitions of the MM data types that are embedded in 'struct task_struct'.
7 * (These are defined separately to decouple sched.h from mm_types.h as much as possible.)
10 #include <linux/types.h>
11 #include <linux/threads.h>
12 #include <linux/atomic.h>
13 #include <linux/cpumask.h>
17 #define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
18 #define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \
19 IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
20 #define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
23 * The per task VMA cache array:
25 #define VMACACHE_BITS 2
26 #define VMACACHE_SIZE (1U << VMACACHE_BITS)
27 #define VMACACHE_MASK (VMACACHE_SIZE - 1)
31 struct vm_area_struct *vmas[VMACACHE_SIZE];
35 MM_FILEPAGES, /* Resident file mapping pages */
36 MM_ANONPAGES, /* Resident anonymous pages */
37 MM_SWAPENTS, /* Anonymous swap entries */
38 MM_SHMEMPAGES, /* Resident shared memory pages */
42 #if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU)
43 #define SPLIT_RSS_COUNTING
44 /* per-thread cached information, */
45 struct task_rss_stat {
46 int events; /* for synchronization threshold */
47 int count[NR_MM_COUNTERS];
49 #endif /* USE_SPLIT_PTE_PTLOCKS */
52 atomic_long_t count[NR_MM_COUNTERS];
57 #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
66 /* Track pages that require TLB flushes */
67 struct tlbflush_unmap_batch {
68 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
70 * Each bit set is a CPU that potentially has a TLB entry for one of
71 * the PFNs being flushed. See set_tlb_ubc_flush_pending().
73 struct cpumask cpumask;
75 /* True if any bit in cpumask is set */
79 * If true then the PTE was dirty when unmapped. The entry must be
80 * flushed before IO is initiated or a stale TLB entry potentially
81 * allows an update without redirtying the page.
87 #endif /* _LINUX_MM_TYPES_TASK_H */