]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - include/linux/slub_def.h
hugetlb, memory_hotplug: prefer to use reserved pages for migration
[karo-tx-linux.git] / include / linux / slub_def.h
1 #ifndef _LINUX_SLUB_DEF_H
2 #define _LINUX_SLUB_DEF_H
3
4 /*
5  * SLUB : A Slab allocator without object queues.
6  *
7  * (C) 2007 SGI, Christoph Lameter
8  */
9 #include <linux/kobject.h>
10
11 enum stat_item {
12         ALLOC_FASTPATH,         /* Allocation from cpu slab */
13         ALLOC_SLOWPATH,         /* Allocation by getting a new cpu slab */
14         FREE_FASTPATH,          /* Free to cpu slab */
15         FREE_SLOWPATH,          /* Freeing not to cpu slab */
16         FREE_FROZEN,            /* Freeing to frozen slab */
17         FREE_ADD_PARTIAL,       /* Freeing moves slab to partial list */
18         FREE_REMOVE_PARTIAL,    /* Freeing removes last object */
19         ALLOC_FROM_PARTIAL,     /* Cpu slab acquired from node partial list */
20         ALLOC_SLAB,             /* Cpu slab acquired from page allocator */
21         ALLOC_REFILL,           /* Refill cpu slab from slab freelist */
22         ALLOC_NODE_MISMATCH,    /* Switching cpu slab */
23         FREE_SLAB,              /* Slab freed to the page allocator */
24         CPUSLAB_FLUSH,          /* Abandoning of the cpu slab */
25         DEACTIVATE_FULL,        /* Cpu slab was full when deactivated */
26         DEACTIVATE_EMPTY,       /* Cpu slab was empty when deactivated */
27         DEACTIVATE_TO_HEAD,     /* Cpu slab was moved to the head of partials */
28         DEACTIVATE_TO_TAIL,     /* Cpu slab was moved to the tail of partials */
29         DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
30         DEACTIVATE_BYPASS,      /* Implicit deactivation */
31         ORDER_FALLBACK,         /* Number of times fallback was necessary */
32         CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
33         CMPXCHG_DOUBLE_FAIL,    /* Number of times that cmpxchg double did not match */
34         CPU_PARTIAL_ALLOC,      /* Used cpu partial on alloc */
35         CPU_PARTIAL_FREE,       /* Refill cpu partial on free */
36         CPU_PARTIAL_NODE,       /* Refill cpu partial from node partial */
37         CPU_PARTIAL_DRAIN,      /* Drain cpu partial to node partial */
38         NR_SLUB_STAT_ITEMS };
39
40 struct kmem_cache_cpu {
41         void **freelist;        /* Pointer to next available object */
42         unsigned long tid;      /* Globally unique transaction id */
43         struct page *page;      /* The slab from which we are allocating */
44 #ifdef CONFIG_SLUB_CPU_PARTIAL
45         struct page *partial;   /* Partially allocated frozen slabs */
46 #endif
47 #ifdef CONFIG_SLUB_STATS
48         unsigned stat[NR_SLUB_STAT_ITEMS];
49 #endif
50 };
51
52 #ifdef CONFIG_SLUB_CPU_PARTIAL
53 #define slub_percpu_partial(c)          ((c)->partial)
54
55 #define slub_set_percpu_partial(c, p)           \
56 ({                                              \
57         slub_percpu_partial(c) = (p)->next;     \
58 })
59
60 #define slub_percpu_partial_read_once(c)     READ_ONCE(slub_percpu_partial(c))
61 #else
62 #define slub_percpu_partial(c)                  NULL
63
64 #define slub_set_percpu_partial(c, p)
65
66 #define slub_percpu_partial_read_once(c)        NULL
67 #endif // CONFIG_SLUB_CPU_PARTIAL
68
69 /*
70  * Word size structure that can be atomically updated or read and that
71  * contains both the order and the number of objects that a slab of the
72  * given order would contain.
73  */
74 struct kmem_cache_order_objects {
75         unsigned long x;
76 };
77
78 /*
79  * Slab cache management.
80  */
81 struct kmem_cache {
82         struct kmem_cache_cpu __percpu *cpu_slab;
83         /* Used for retriving partial slabs etc */
84         unsigned long flags;
85         unsigned long min_partial;
86         int size;               /* The size of an object including meta data */
87         int object_size;        /* The size of an object without meta data */
88         int offset;             /* Free pointer offset. */
89 #ifdef CONFIG_SLUB_CPU_PARTIAL
90         int cpu_partial;        /* Number of per cpu partial objects to keep around */
91 #endif
92         struct kmem_cache_order_objects oo;
93
94         /* Allocation and freeing of slabs */
95         struct kmem_cache_order_objects max;
96         struct kmem_cache_order_objects min;
97         gfp_t allocflags;       /* gfp flags to use on each alloc */
98         int refcount;           /* Refcount for slab cache destroy */
99         void (*ctor)(void *);
100         int inuse;              /* Offset to metadata */
101         int align;              /* Alignment */
102         int reserved;           /* Reserved bytes at the end of slabs */
103         int red_left_pad;       /* Left redzone padding size */
104         const char *name;       /* Name (only for display!) */
105         struct list_head list;  /* List of slab caches */
106 #ifdef CONFIG_SYSFS
107         struct kobject kobj;    /* For sysfs */
108         struct work_struct kobj_remove_work;
109 #endif
110 #ifdef CONFIG_MEMCG
111         struct memcg_cache_params memcg_params;
112         int max_attr_size; /* for propagation, maximum size of a stored attr */
113 #ifdef CONFIG_SYSFS
114         struct kset *memcg_kset;
115 #endif
116 #endif
117
118 #ifdef CONFIG_NUMA
119         /*
120          * Defragmentation by allocating from a remote node.
121          */
122         int remote_node_defrag_ratio;
123 #endif
124
125 #ifdef CONFIG_SLAB_FREELIST_RANDOM
126         unsigned int *random_seq;
127 #endif
128
129 #ifdef CONFIG_KASAN
130         struct kasan_cache kasan_info;
131 #endif
132
133         struct kmem_cache_node *node[MAX_NUMNODES];
134 };
135
136 #ifdef CONFIG_SLUB_CPU_PARTIAL
137 #define slub_cpu_partial(s)             ((s)->cpu_partial)
138 #define slub_set_cpu_partial(s, n)              \
139 ({                                              \
140         slub_cpu_partial(s) = (n);              \
141 })
142 #else
143 #define slub_cpu_partial(s)             (0)
144 #define slub_set_cpu_partial(s, n)
145 #endif // CONFIG_SLUB_CPU_PARTIAL
146
147 #ifdef CONFIG_SYSFS
148 #define SLAB_SUPPORTS_SYSFS
149 void sysfs_slab_release(struct kmem_cache *);
150 #else
151 static inline void sysfs_slab_release(struct kmem_cache *s)
152 {
153 }
154 #endif
155
156 void object_err(struct kmem_cache *s, struct page *page,
157                 u8 *object, char *reason);
158
159 void *fixup_red_left(struct kmem_cache *s, void *p);
160
161 static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
162                                 void *x) {
163         void *object = x - (x - page_address(page)) % cache->size;
164         void *last_object = page_address(page) +
165                 (page->objects - 1) * cache->size;
166         void *result = (unlikely(object > last_object)) ? last_object : object;
167
168         result = fixup_red_left(cache, result);
169         return result;
170 }
171
172 #endif /* _LINUX_SLUB_DEF_H */