]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - mm/kasan/kasan.c
mm, kasan: stackdepot implementation. Enable stackdepot for SLAB
[karo-tx-linux.git] / mm / kasan / kasan.c
index 1ad20ade8c91328d2f47e8c116f9aca6bc2fb57f..acb3b6c4dd89e5c604a1abac15484abb8e82ee01 100644 (file)
@@ -17,7 +17,9 @@
 #define DISABLE_BRANCH_PROFILING
 
 #include <linux/export.h>
+#include <linux/interrupt.h>
 #include <linux/init.h>
+#include <linux/kasan.h>
 #include <linux/kernel.h>
 #include <linux/kmemleak.h>
 #include <linux/linkage.h>
@@ -32,7 +34,6 @@
 #include <linux/string.h>
 #include <linux/types.h>
 #include <linux/vmalloc.h>
-#include <linux/kasan.h>
 
 #include "kasan.h"
 #include "../slab.h"
@@ -334,6 +335,59 @@ void kasan_free_pages(struct page *page, unsigned int order)
                                KASAN_FREE_PAGE);
 }
 
+#ifdef CONFIG_SLAB
+/*
+ * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
+ * For larger allocations larger redzones are used.
+ */
+static size_t optimal_redzone(size_t object_size)
+{
+       int rz =
+               object_size <= 64        - 16   ? 16 :
+               object_size <= 128       - 32   ? 32 :
+               object_size <= 512       - 64   ? 64 :
+               object_size <= 4096      - 128  ? 128 :
+               object_size <= (1 << 14) - 256  ? 256 :
+               object_size <= (1 << 15) - 512  ? 512 :
+               object_size <= (1 << 16) - 1024 ? 1024 : 2048;
+       return rz;
+}
+
+void kasan_cache_create(struct kmem_cache *cache, size_t *size,
+                       unsigned long *flags)
+{
+       int redzone_adjust;
+       /* Make sure the adjusted size is still less than
+        * KMALLOC_MAX_CACHE_SIZE.
+        * TODO: this check is only useful for SLAB, but not SLUB. We'll need
+        * to skip it for SLUB when it starts using kasan_cache_create().
+        */
+       if (*size > KMALLOC_MAX_CACHE_SIZE -
+           sizeof(struct kasan_alloc_meta) -
+           sizeof(struct kasan_free_meta))
+               return;
+       *flags |= SLAB_KASAN;
+       /* Add alloc meta. */
+       cache->kasan_info.alloc_meta_offset = *size;
+       *size += sizeof(struct kasan_alloc_meta);
+
+       /* Add free meta. */
+       if (cache->flags & SLAB_DESTROY_BY_RCU || cache->ctor ||
+           cache->object_size < sizeof(struct kasan_free_meta)) {
+               cache->kasan_info.free_meta_offset = *size;
+               *size += sizeof(struct kasan_free_meta);
+       }
+       redzone_adjust = optimal_redzone(cache->object_size) -
+               (*size - cache->object_size);
+       if (redzone_adjust > 0)
+               *size += redzone_adjust;
+       *size = min(KMALLOC_MAX_CACHE_SIZE,
+                   max(*size,
+                       cache->object_size +
+                       optimal_redzone(cache->object_size)));
+}
+#endif
+
 void kasan_poison_slab(struct page *page)
 {
        kasan_poison_shadow(page_address(page),
@@ -351,11 +405,81 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object)
        kasan_poison_shadow(object,
                        round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
                        KASAN_KMALLOC_REDZONE);
+#ifdef CONFIG_SLAB
+       if (cache->flags & SLAB_KASAN) {
+               struct kasan_alloc_meta *alloc_info =
+                       get_alloc_info(cache, object);
+               alloc_info->state = KASAN_STATE_INIT;
+       }
+#endif
 }
 
-void kasan_slab_alloc(struct kmem_cache *cache, void *object)
+#ifdef CONFIG_SLAB
+static inline int in_irqentry_text(unsigned long ptr)
 {
-       kasan_kmalloc(cache, object, cache->object_size);
+       return (ptr >= (unsigned long)&__irqentry_text_start &&
+               ptr < (unsigned long)&__irqentry_text_end) ||
+               (ptr >= (unsigned long)&__softirqentry_text_start &&
+                ptr < (unsigned long)&__softirqentry_text_end);
+}
+
+static inline void filter_irq_stacks(struct stack_trace *trace)
+{
+       int i;
+
+       if (!trace->nr_entries)
+               return;
+       for (i = 0; i < trace->nr_entries; i++)
+               if (in_irqentry_text(trace->entries[i])) {
+                       /* Include the irqentry function into the stack. */
+                       trace->nr_entries = i + 1;
+                       break;
+               }
+}
+
+static inline depot_stack_handle_t save_stack(gfp_t flags)
+{
+       unsigned long entries[KASAN_STACK_DEPTH];
+       struct stack_trace trace = {
+               .nr_entries = 0,
+               .entries = entries,
+               .max_entries = KASAN_STACK_DEPTH,
+               .skip = 0
+       };
+
+       save_stack_trace(&trace);
+       filter_irq_stacks(&trace);
+       if (trace.nr_entries != 0 &&
+           trace.entries[trace.nr_entries-1] == ULONG_MAX)
+               trace.nr_entries--;
+
+       return depot_save_stack(&trace, flags);
+}
+
+static inline void set_track(struct kasan_track *track, gfp_t flags)
+{
+       track->pid = current->pid;
+       track->stack = save_stack(flags);
+}
+
+struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
+                                       const void *object)
+{
+       BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32);
+       return (void *)object + cache->kasan_info.alloc_meta_offset;
+}
+
+struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
+                                     const void *object)
+{
+       BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
+       return (void *)object + cache->kasan_info.free_meta_offset;
+}
+#endif
+
+void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
+{
+       kasan_kmalloc(cache, object, cache->object_size, flags);
 }
 
 void kasan_slab_free(struct kmem_cache *cache, void *object)
@@ -367,10 +491,22 @@ void kasan_slab_free(struct kmem_cache *cache, void *object)
        if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
                return;
 
+#ifdef CONFIG_SLAB
+       if (cache->flags & SLAB_KASAN) {
+               struct kasan_free_meta *free_info =
+                       get_free_info(cache, object);
+               struct kasan_alloc_meta *alloc_info =
+                       get_alloc_info(cache, object);
+               alloc_info->state = KASAN_STATE_FREE;
+               set_track(&free_info->track);
+       }
+#endif
+
        kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
 }
 
-void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size)
+void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
+                  gfp_t flags)
 {
        unsigned long redzone_start;
        unsigned long redzone_end;
@@ -386,10 +522,20 @@ void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size)
        kasan_unpoison_shadow(object, size);
        kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
                KASAN_KMALLOC_REDZONE);
+#ifdef CONFIG_SLAB
+       if (cache->flags & SLAB_KASAN) {
+               struct kasan_alloc_meta *alloc_info =
+                       get_alloc_info(cache, object);
+
+               alloc_info->state = KASAN_STATE_ALLOC;
+               alloc_info->alloc_size = size;
+               set_track(&alloc_info->track, flags);
+       }
+#endif
 }
 EXPORT_SYMBOL(kasan_kmalloc);
 
-void kasan_kmalloc_large(const void *ptr, size_t size)
+void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
 {
        struct page *page;
        unsigned long redzone_start;
@@ -408,7 +554,7 @@ void kasan_kmalloc_large(const void *ptr, size_t size)
                KASAN_PAGE_REDZONE);
 }
 
-void kasan_krealloc(const void *object, size_t size)
+void kasan_krealloc(const void *object, size_t size, gfp_t flags)
 {
        struct page *page;
 
@@ -418,9 +564,9 @@ void kasan_krealloc(const void *object, size_t size)
        page = virt_to_head_page(object);
 
        if (unlikely(!PageSlab(page)))
-               kasan_kmalloc_large(object, size);
+               kasan_kmalloc_large(object, size, flags);
        else
-               kasan_kmalloc(page->slab_cache, object, size);
+               kasan_kmalloc(page->slab_cache, object, size, flags);
 }
 
 void kasan_kfree(void *ptr)