]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - mm/kasan/quarantine.c
ARC: uaccess: enable INLINE_COPY_{TO,FROM}_USER ...
[karo-tx-linux.git] / mm / kasan / quarantine.c
1 /*
2  * KASAN quarantine.
3  *
4  * Author: Alexander Potapenko <glider@google.com>
5  * Copyright (C) 2016 Google, Inc.
6  *
7  * Based on code by Dmitry Chernenkov.
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * version 2 as published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  */
19
20 #include <linux/gfp.h>
21 #include <linux/hash.h>
22 #include <linux/kernel.h>
23 #include <linux/mm.h>
24 #include <linux/percpu.h>
25 #include <linux/printk.h>
26 #include <linux/shrinker.h>
27 #include <linux/slab.h>
28 #include <linux/string.h>
29 #include <linux/types.h>
30
31 #include "../slab.h"
32 #include "kasan.h"
33
34 /* Data structure and operations for quarantine queues. */
35
36 /*
37  * Each queue is a signle-linked list, which also stores the total size of
38  * objects inside of it.
39  */
40 struct qlist_head {
41         struct qlist_node *head;
42         struct qlist_node *tail;
43         size_t bytes;
44 };
45
46 #define QLIST_INIT { NULL, NULL, 0 }
47
48 static bool qlist_empty(struct qlist_head *q)
49 {
50         return !q->head;
51 }
52
53 static void qlist_init(struct qlist_head *q)
54 {
55         q->head = q->tail = NULL;
56         q->bytes = 0;
57 }
58
59 static void qlist_put(struct qlist_head *q, struct qlist_node *qlink,
60                 size_t size)
61 {
62         if (unlikely(qlist_empty(q)))
63                 q->head = qlink;
64         else
65                 q->tail->next = qlink;
66         q->tail = qlink;
67         qlink->next = NULL;
68         q->bytes += size;
69 }
70
71 static void qlist_move_all(struct qlist_head *from, struct qlist_head *to)
72 {
73         if (unlikely(qlist_empty(from)))
74                 return;
75
76         if (qlist_empty(to)) {
77                 *to = *from;
78                 qlist_init(from);
79                 return;
80         }
81
82         to->tail->next = from->head;
83         to->tail = from->tail;
84         to->bytes += from->bytes;
85
86         qlist_init(from);
87 }
88
89 #define QUARANTINE_PERCPU_SIZE (1 << 20)
90 #define QUARANTINE_BATCHES \
91         (1024 > 4 * CONFIG_NR_CPUS ? 1024 : 4 * CONFIG_NR_CPUS)
92
93 /*
94  * The object quarantine consists of per-cpu queues and a global queue,
95  * guarded by quarantine_lock.
96  */
97 static DEFINE_PER_CPU(struct qlist_head, cpu_quarantine);
98
99 /* Round-robin FIFO array of batches. */
100 static struct qlist_head global_quarantine[QUARANTINE_BATCHES];
101 static int quarantine_head;
102 static int quarantine_tail;
103 /* Total size of all objects in global_quarantine across all batches. */
104 static unsigned long quarantine_size;
105 static DEFINE_SPINLOCK(quarantine_lock);
106
107 /* Maximum size of the global queue. */
108 static unsigned long quarantine_max_size;
109
110 /*
111  * Target size of a batch in global_quarantine.
112  * Usually equal to QUARANTINE_PERCPU_SIZE unless we have too much RAM.
113  */
114 static unsigned long quarantine_batch_size;
115
116 /*
117  * The fraction of physical memory the quarantine is allowed to occupy.
118  * Quarantine doesn't support memory shrinker with SLAB allocator, so we keep
119  * the ratio low to avoid OOM.
120  */
121 #define QUARANTINE_FRACTION 32
122
123 static struct kmem_cache *qlink_to_cache(struct qlist_node *qlink)
124 {
125         return virt_to_head_page(qlink)->slab_cache;
126 }
127
128 static void *qlink_to_object(struct qlist_node *qlink, struct kmem_cache *cache)
129 {
130         struct kasan_free_meta *free_info =
131                 container_of(qlink, struct kasan_free_meta,
132                              quarantine_link);
133
134         return ((void *)free_info) - cache->kasan_info.free_meta_offset;
135 }
136
137 static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache)
138 {
139         void *object = qlink_to_object(qlink, cache);
140         unsigned long flags;
141
142         if (IS_ENABLED(CONFIG_SLAB))
143                 local_irq_save(flags);
144
145         ___cache_free(cache, object, _THIS_IP_);
146
147         if (IS_ENABLED(CONFIG_SLAB))
148                 local_irq_restore(flags);
149 }
150
151 static void qlist_free_all(struct qlist_head *q, struct kmem_cache *cache)
152 {
153         struct qlist_node *qlink;
154
155         if (unlikely(qlist_empty(q)))
156                 return;
157
158         qlink = q->head;
159         while (qlink) {
160                 struct kmem_cache *obj_cache =
161                         cache ? cache : qlink_to_cache(qlink);
162                 struct qlist_node *next = qlink->next;
163
164                 qlink_free(qlink, obj_cache);
165                 qlink = next;
166         }
167         qlist_init(q);
168 }
169
170 void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache)
171 {
172         unsigned long flags;
173         struct qlist_head *q;
174         struct qlist_head temp = QLIST_INIT;
175
176         local_irq_save(flags);
177
178         q = this_cpu_ptr(&cpu_quarantine);
179         qlist_put(q, &info->quarantine_link, cache->size);
180         if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE))
181                 qlist_move_all(q, &temp);
182
183         local_irq_restore(flags);
184
185         if (unlikely(!qlist_empty(&temp))) {
186                 spin_lock_irqsave(&quarantine_lock, flags);
187                 WRITE_ONCE(quarantine_size, quarantine_size + temp.bytes);
188                 qlist_move_all(&temp, &global_quarantine[quarantine_tail]);
189                 if (global_quarantine[quarantine_tail].bytes >=
190                                 READ_ONCE(quarantine_batch_size)) {
191                         int new_tail;
192
193                         new_tail = quarantine_tail + 1;
194                         if (new_tail == QUARANTINE_BATCHES)
195                                 new_tail = 0;
196                         if (new_tail != quarantine_head)
197                                 quarantine_tail = new_tail;
198                 }
199                 spin_unlock_irqrestore(&quarantine_lock, flags);
200         }
201 }
202
203 void quarantine_reduce(void)
204 {
205         size_t total_size, new_quarantine_size, percpu_quarantines;
206         unsigned long flags;
207         struct qlist_head to_free = QLIST_INIT;
208
209         if (likely(READ_ONCE(quarantine_size) <=
210                    READ_ONCE(quarantine_max_size)))
211                 return;
212
213         spin_lock_irqsave(&quarantine_lock, flags);
214
215         /*
216          * Update quarantine size in case of hotplug. Allocate a fraction of
217          * the installed memory to quarantine minus per-cpu queue limits.
218          */
219         total_size = (READ_ONCE(totalram_pages) << PAGE_SHIFT) /
220                 QUARANTINE_FRACTION;
221         percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus();
222         new_quarantine_size = (total_size < percpu_quarantines) ?
223                 0 : total_size - percpu_quarantines;
224         WRITE_ONCE(quarantine_max_size, new_quarantine_size);
225         /* Aim at consuming at most 1/2 of slots in quarantine. */
226         WRITE_ONCE(quarantine_batch_size, max((size_t)QUARANTINE_PERCPU_SIZE,
227                 2 * total_size / QUARANTINE_BATCHES));
228
229         if (likely(quarantine_size > quarantine_max_size)) {
230                 qlist_move_all(&global_quarantine[quarantine_head], &to_free);
231                 WRITE_ONCE(quarantine_size, quarantine_size - to_free.bytes);
232                 quarantine_head++;
233                 if (quarantine_head == QUARANTINE_BATCHES)
234                         quarantine_head = 0;
235         }
236
237         spin_unlock_irqrestore(&quarantine_lock, flags);
238
239         qlist_free_all(&to_free, NULL);
240 }
241
242 static void qlist_move_cache(struct qlist_head *from,
243                                    struct qlist_head *to,
244                                    struct kmem_cache *cache)
245 {
246         struct qlist_node *curr;
247
248         if (unlikely(qlist_empty(from)))
249                 return;
250
251         curr = from->head;
252         qlist_init(from);
253         while (curr) {
254                 struct qlist_node *next = curr->next;
255                 struct kmem_cache *obj_cache = qlink_to_cache(curr);
256
257                 if (obj_cache == cache)
258                         qlist_put(to, curr, obj_cache->size);
259                 else
260                         qlist_put(from, curr, obj_cache->size);
261
262                 curr = next;
263         }
264 }
265
266 static void per_cpu_remove_cache(void *arg)
267 {
268         struct kmem_cache *cache = arg;
269         struct qlist_head to_free = QLIST_INIT;
270         struct qlist_head *q;
271
272         q = this_cpu_ptr(&cpu_quarantine);
273         qlist_move_cache(q, &to_free, cache);
274         qlist_free_all(&to_free, cache);
275 }
276
277 /* Free all quarantined objects belonging to cache. */
278 void quarantine_remove_cache(struct kmem_cache *cache)
279 {
280         unsigned long flags, i;
281         struct qlist_head to_free = QLIST_INIT;
282
283         on_each_cpu(per_cpu_remove_cache, cache, 1);
284
285         spin_lock_irqsave(&quarantine_lock, flags);
286         for (i = 0; i < QUARANTINE_BATCHES; i++)
287                 qlist_move_cache(&global_quarantine[i], &to_free, cache);
288         spin_unlock_irqrestore(&quarantine_lock, flags);
289
290         qlist_free_all(&to_free, cache);
291 }