]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - mm/kmemleak.c
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[karo-tx-linux.git] / mm / kmemleak.c
index 8d2fcdfeff7fdb319f58c838cd8b94a6cc59121e..3cda50c1e3942100bcd3f8715a9062fe1c6d7528 100644 (file)
@@ -387,7 +387,7 @@ static void dump_object_info(struct kmemleak_object *object)
        pr_notice("  min_count = %d\n", object->min_count);
        pr_notice("  count = %d\n", object->count);
        pr_notice("  flags = 0x%lx\n", object->flags);
-       pr_notice("  checksum = %d\n", object->checksum);
+       pr_notice("  checksum = %u\n", object->checksum);
        pr_notice("  backtrace:\n");
        print_stack_trace(&trace, 4);
 }
@@ -989,6 +989,40 @@ void __ref kmemleak_free_percpu(const void __percpu *ptr)
 }
 EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
 
+/**
+ * kmemleak_update_trace - update object allocation stack trace
+ * @ptr:       pointer to beginning of the object
+ *
+ * Override the object allocation stack trace for cases where the actual
+ * allocation place is not always useful.
+ */
+void __ref kmemleak_update_trace(const void *ptr)
+{
+       struct kmemleak_object *object;
+       unsigned long flags;
+
+       pr_debug("%s(0x%p)\n", __func__, ptr);
+
+       if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
+               return;
+
+       object = find_and_get_object((unsigned long)ptr, 1);
+       if (!object) {
+#ifdef DEBUG
+               kmemleak_warn("Updating stack trace for unknown object at %p\n",
+                             ptr);
+#endif
+               return;
+       }
+
+       spin_lock_irqsave(&object->lock, flags);
+       object->trace_len = __save_stack_trace(object->trace);
+       spin_unlock_irqrestore(&object->lock, flags);
+
+       put_object(object);
+}
+EXPORT_SYMBOL(kmemleak_update_trace);
+
 /**
  * kmemleak_not_leak - mark an allocated object as false positive
  * @ptr:       pointer to beginning of the object
@@ -1300,7 +1334,7 @@ static void kmemleak_scan(void)
        /*
         * Struct page scanning for each node.
         */
-       lock_memory_hotplug();
+       get_online_mems();
        for_each_online_node(i) {
                unsigned long start_pfn = node_start_pfn(i);
                unsigned long end_pfn = node_end_pfn(i);
@@ -1318,7 +1352,7 @@ static void kmemleak_scan(void)
                        scan_block(page, page + 1, NULL, 1);
                }
        }
-       unlock_memory_hotplug();
+       put_online_mems();
 
        /*
         * Scanning the task stacks (may introduce false negatives).