]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/drm_mm.c
Merge tag 'drm-intel-next-2013-07-12' of git://people.freedesktop.org/~danvet/drm...
[karo-tx-linux.git] / drivers / gpu / drm / drm_mm.c
1 /**************************************************************************
2  *
3  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  *
27  **************************************************************************/
28
29 /*
30  * Generic simple memory manager implementation. Intended to be used as a base
31  * class implementation for more advanced memory managers.
32  *
33  * Note that the algorithm used is quite simple and there might be substantial
34  * performance gains if a smarter free list is implemented. Currently it is just an
35  * unordered stack of free regions. This could easily be improved if an RB-tree
36  * is used instead. At least if we expect heavy fragmentation.
37  *
38  * Aligned allocations can also see improvement.
39  *
40  * Authors:
41  * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
42  */
43
44 #include <drm/drmP.h>
45 #include <drm/drm_mm.h>
46 #include <linux/slab.h>
47 #include <linux/seq_file.h>
48 #include <linux/export.h>
49
50 #define MM_UNUSED_TARGET 4
51
52 static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
53 {
54         struct drm_mm_node *child;
55
56         if (atomic)
57                 child = kzalloc(sizeof(*child), GFP_ATOMIC);
58         else
59                 child = kzalloc(sizeof(*child), GFP_KERNEL);
60
61         if (unlikely(child == NULL)) {
62                 spin_lock(&mm->unused_lock);
63                 if (list_empty(&mm->unused_nodes))
64                         child = NULL;
65                 else {
66                         child =
67                             list_entry(mm->unused_nodes.next,
68                                        struct drm_mm_node, node_list);
69                         list_del(&child->node_list);
70                         --mm->num_unused;
71                 }
72                 spin_unlock(&mm->unused_lock);
73         }
74         return child;
75 }
76
77 /* drm_mm_pre_get() - pre allocate drm_mm_node structure
78  * drm_mm:      memory manager struct we are pre-allocating for
79  *
80  * Returns 0 on success or -ENOMEM if allocation fails.
81  */
82 int drm_mm_pre_get(struct drm_mm *mm)
83 {
84         struct drm_mm_node *node;
85
86         spin_lock(&mm->unused_lock);
87         while (mm->num_unused < MM_UNUSED_TARGET) {
88                 spin_unlock(&mm->unused_lock);
89                 node = kzalloc(sizeof(*node), GFP_KERNEL);
90                 spin_lock(&mm->unused_lock);
91
92                 if (unlikely(node == NULL)) {
93                         int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
94                         spin_unlock(&mm->unused_lock);
95                         return ret;
96                 }
97                 ++mm->num_unused;
98                 list_add_tail(&node->node_list, &mm->unused_nodes);
99         }
100         spin_unlock(&mm->unused_lock);
101         return 0;
102 }
103 EXPORT_SYMBOL(drm_mm_pre_get);
104
105 static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
106                                  struct drm_mm_node *node,
107                                  unsigned long size, unsigned alignment,
108                                  unsigned long color)
109 {
110         struct drm_mm *mm = hole_node->mm;
111         unsigned long hole_start = drm_mm_hole_node_start(hole_node);
112         unsigned long hole_end = drm_mm_hole_node_end(hole_node);
113         unsigned long adj_start = hole_start;
114         unsigned long adj_end = hole_end;
115
116         BUG_ON(node->allocated);
117
118         if (mm->color_adjust)
119                 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
120
121         if (alignment) {
122                 unsigned tmp = adj_start % alignment;
123                 if (tmp)
124                         adj_start += alignment - tmp;
125         }
126
127         if (adj_start == hole_start) {
128                 hole_node->hole_follows = 0;
129                 list_del(&hole_node->hole_stack);
130         }
131
132         node->start = adj_start;
133         node->size = size;
134         node->mm = mm;
135         node->color = color;
136         node->allocated = 1;
137
138         INIT_LIST_HEAD(&node->hole_stack);
139         list_add(&node->node_list, &hole_node->node_list);
140
141         BUG_ON(node->start + node->size > adj_end);
142
143         node->hole_follows = 0;
144         if (__drm_mm_hole_node_start(node) < hole_end) {
145                 list_add(&node->hole_stack, &mm->hole_stack);
146                 node->hole_follows = 1;
147         }
148 }
149
150 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
151 {
152         struct drm_mm_node *hole;
153         unsigned long end = node->start + node->size;
154         unsigned long hole_start;
155         unsigned long hole_end;
156
157         BUG_ON(node == NULL);
158
159         /* Find the relevant hole to add our node to */
160         drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
161                 if (hole_start > node->start || hole_end < end)
162                         continue;
163
164                 node->mm = mm;
165                 node->allocated = 1;
166
167                 INIT_LIST_HEAD(&node->hole_stack);
168                 list_add(&node->node_list, &hole->node_list);
169
170                 if (node->start == hole_start) {
171                         hole->hole_follows = 0;
172                         list_del_init(&hole->hole_stack);
173                 }
174
175                 node->hole_follows = 0;
176                 if (end != hole_end) {
177                         list_add(&node->hole_stack, &mm->hole_stack);
178                         node->hole_follows = 1;
179                 }
180
181                 return 0;
182         }
183
184         WARN(1, "no hole found for node 0x%lx + 0x%lx\n",
185              node->start, node->size);
186         return -ENOSPC;
187 }
188 EXPORT_SYMBOL(drm_mm_reserve_node);
189
190 struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
191                                              unsigned long size,
192                                              unsigned alignment,
193                                              unsigned long color,
194                                              int atomic)
195 {
196         struct drm_mm_node *node;
197
198         node = drm_mm_kmalloc(hole_node->mm, atomic);
199         if (unlikely(node == NULL))
200                 return NULL;
201
202         drm_mm_insert_helper(hole_node, node, size, alignment, color);
203
204         return node;
205 }
206 EXPORT_SYMBOL(drm_mm_get_block_generic);
207
208 /**
209  * Search for free space and insert a preallocated memory node. Returns
210  * -ENOSPC if no suitable free area is available. The preallocated memory node
211  * must be cleared.
212  */
213 int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
214                                unsigned long size, unsigned alignment,
215                                unsigned long color)
216 {
217         struct drm_mm_node *hole_node;
218
219         hole_node = drm_mm_search_free_generic(mm, size, alignment,
220                                                color, 0);
221         if (!hole_node)
222                 return -ENOSPC;
223
224         drm_mm_insert_helper(hole_node, node, size, alignment, color);
225         return 0;
226 }
227 EXPORT_SYMBOL(drm_mm_insert_node_generic);
228
229 int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
230                        unsigned long size, unsigned alignment)
231 {
232         return drm_mm_insert_node_generic(mm, node, size, alignment, 0);
233 }
234 EXPORT_SYMBOL(drm_mm_insert_node);
235
236 static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
237                                        struct drm_mm_node *node,
238                                        unsigned long size, unsigned alignment,
239                                        unsigned long color,
240                                        unsigned long start, unsigned long end)
241 {
242         struct drm_mm *mm = hole_node->mm;
243         unsigned long hole_start = drm_mm_hole_node_start(hole_node);
244         unsigned long hole_end = drm_mm_hole_node_end(hole_node);
245         unsigned long adj_start = hole_start;
246         unsigned long adj_end = hole_end;
247
248         BUG_ON(!hole_node->hole_follows || node->allocated);
249
250         if (adj_start < start)
251                 adj_start = start;
252         if (adj_end > end)
253                 adj_end = end;
254
255         if (mm->color_adjust)
256                 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
257
258         if (alignment) {
259                 unsigned tmp = adj_start % alignment;
260                 if (tmp)
261                         adj_start += alignment - tmp;
262         }
263
264         if (adj_start == hole_start) {
265                 hole_node->hole_follows = 0;
266                 list_del(&hole_node->hole_stack);
267         }
268
269         node->start = adj_start;
270         node->size = size;
271         node->mm = mm;
272         node->color = color;
273         node->allocated = 1;
274
275         INIT_LIST_HEAD(&node->hole_stack);
276         list_add(&node->node_list, &hole_node->node_list);
277
278         BUG_ON(node->start + node->size > adj_end);
279         BUG_ON(node->start + node->size > end);
280
281         node->hole_follows = 0;
282         if (__drm_mm_hole_node_start(node) < hole_end) {
283                 list_add(&node->hole_stack, &mm->hole_stack);
284                 node->hole_follows = 1;
285         }
286 }
287
288 struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
289                                                 unsigned long size,
290                                                 unsigned alignment,
291                                                 unsigned long color,
292                                                 unsigned long start,
293                                                 unsigned long end,
294                                                 int atomic)
295 {
296         struct drm_mm_node *node;
297
298         node = drm_mm_kmalloc(hole_node->mm, atomic);
299         if (unlikely(node == NULL))
300                 return NULL;
301
302         drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
303                                    start, end);
304
305         return node;
306 }
307 EXPORT_SYMBOL(drm_mm_get_block_range_generic);
308
309 /**
310  * Search for free space and insert a preallocated memory node. Returns
311  * -ENOSPC if no suitable free area is available. This is for range
312  * restricted allocations. The preallocated memory node must be cleared.
313  */
314 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
315                                         unsigned long size, unsigned alignment, unsigned long color,
316                                         unsigned long start, unsigned long end)
317 {
318         struct drm_mm_node *hole_node;
319
320         hole_node = drm_mm_search_free_in_range_generic(mm,
321                                                         size, alignment, color,
322                                                         start, end, 0);
323         if (!hole_node)
324                 return -ENOSPC;
325
326         drm_mm_insert_helper_range(hole_node, node,
327                                    size, alignment, color,
328                                    start, end);
329         return 0;
330 }
331 EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
332
333 int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
334                                 unsigned long size, unsigned alignment,
335                                 unsigned long start, unsigned long end)
336 {
337         return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end);
338 }
339 EXPORT_SYMBOL(drm_mm_insert_node_in_range);
340
341 /**
342  * Remove a memory node from the allocator.
343  */
344 void drm_mm_remove_node(struct drm_mm_node *node)
345 {
346         struct drm_mm *mm = node->mm;
347         struct drm_mm_node *prev_node;
348
349         BUG_ON(node->scanned_block || node->scanned_prev_free
350                                    || node->scanned_next_free);
351
352         prev_node =
353             list_entry(node->node_list.prev, struct drm_mm_node, node_list);
354
355         if (node->hole_follows) {
356                 BUG_ON(__drm_mm_hole_node_start(node) ==
357                        __drm_mm_hole_node_end(node));
358                 list_del(&node->hole_stack);
359         } else
360                 BUG_ON(__drm_mm_hole_node_start(node) !=
361                        __drm_mm_hole_node_end(node));
362
363
364         if (!prev_node->hole_follows) {
365                 prev_node->hole_follows = 1;
366                 list_add(&prev_node->hole_stack, &mm->hole_stack);
367         } else
368                 list_move(&prev_node->hole_stack, &mm->hole_stack);
369
370         list_del(&node->node_list);
371         node->allocated = 0;
372 }
373 EXPORT_SYMBOL(drm_mm_remove_node);
374
375 /*
376  * Remove a memory node from the allocator and free the allocated struct
377  * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
378  * drm_mm_get_block functions.
379  */
380 void drm_mm_put_block(struct drm_mm_node *node)
381 {
382
383         struct drm_mm *mm = node->mm;
384
385         drm_mm_remove_node(node);
386
387         spin_lock(&mm->unused_lock);
388         if (mm->num_unused < MM_UNUSED_TARGET) {
389                 list_add(&node->node_list, &mm->unused_nodes);
390                 ++mm->num_unused;
391         } else
392                 kfree(node);
393         spin_unlock(&mm->unused_lock);
394 }
395 EXPORT_SYMBOL(drm_mm_put_block);
396
397 static int check_free_hole(unsigned long start, unsigned long end,
398                            unsigned long size, unsigned alignment)
399 {
400         if (end - start < size)
401                 return 0;
402
403         if (alignment) {
404                 unsigned tmp = start % alignment;
405                 if (tmp)
406                         start += alignment - tmp;
407         }
408
409         return end >= start + size;
410 }
411
412 struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
413                                                unsigned long size,
414                                                unsigned alignment,
415                                                unsigned long color,
416                                                bool best_match)
417 {
418         struct drm_mm_node *entry;
419         struct drm_mm_node *best;
420         unsigned long adj_start;
421         unsigned long adj_end;
422         unsigned long best_size;
423
424         BUG_ON(mm->scanned_blocks);
425
426         best = NULL;
427         best_size = ~0UL;
428
429         drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
430                 if (mm->color_adjust) {
431                         mm->color_adjust(entry, color, &adj_start, &adj_end);
432                         if (adj_end <= adj_start)
433                                 continue;
434                 }
435
436                 if (!check_free_hole(adj_start, adj_end, size, alignment))
437                         continue;
438
439                 if (!best_match)
440                         return entry;
441
442                 if (entry->size < best_size) {
443                         best = entry;
444                         best_size = entry->size;
445                 }
446         }
447
448         return best;
449 }
450 EXPORT_SYMBOL(drm_mm_search_free_generic);
451
452 struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
453                                                         unsigned long size,
454                                                         unsigned alignment,
455                                                         unsigned long color,
456                                                         unsigned long start,
457                                                         unsigned long end,
458                                                         bool best_match)
459 {
460         struct drm_mm_node *entry;
461         struct drm_mm_node *best;
462         unsigned long adj_start;
463         unsigned long adj_end;
464         unsigned long best_size;
465
466         BUG_ON(mm->scanned_blocks);
467
468         best = NULL;
469         best_size = ~0UL;
470
471         drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
472                 if (adj_start < start)
473                         adj_start = start;
474                 if (adj_end > end)
475                         adj_end = end;
476
477                 if (mm->color_adjust) {
478                         mm->color_adjust(entry, color, &adj_start, &adj_end);
479                         if (adj_end <= adj_start)
480                                 continue;
481                 }
482
483                 if (!check_free_hole(adj_start, adj_end, size, alignment))
484                         continue;
485
486                 if (!best_match)
487                         return entry;
488
489                 if (entry->size < best_size) {
490                         best = entry;
491                         best_size = entry->size;
492                 }
493         }
494
495         return best;
496 }
497 EXPORT_SYMBOL(drm_mm_search_free_in_range_generic);
498
499 /**
500  * Moves an allocation. To be used with embedded struct drm_mm_node.
501  */
502 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
503 {
504         list_replace(&old->node_list, &new->node_list);
505         list_replace(&old->hole_stack, &new->hole_stack);
506         new->hole_follows = old->hole_follows;
507         new->mm = old->mm;
508         new->start = old->start;
509         new->size = old->size;
510         new->color = old->color;
511
512         old->allocated = 0;
513         new->allocated = 1;
514 }
515 EXPORT_SYMBOL(drm_mm_replace_node);
516
517 /**
518  * Initializa lru scanning.
519  *
520  * This simply sets up the scanning routines with the parameters for the desired
521  * hole.
522  *
523  * Warning: As long as the scan list is non-empty, no other operations than
524  * adding/removing nodes to/from the scan list are allowed.
525  */
526 void drm_mm_init_scan(struct drm_mm *mm,
527                       unsigned long size,
528                       unsigned alignment,
529                       unsigned long color)
530 {
531         mm->scan_color = color;
532         mm->scan_alignment = alignment;
533         mm->scan_size = size;
534         mm->scanned_blocks = 0;
535         mm->scan_hit_start = 0;
536         mm->scan_hit_end = 0;
537         mm->scan_check_range = 0;
538         mm->prev_scanned_node = NULL;
539 }
540 EXPORT_SYMBOL(drm_mm_init_scan);
541
542 /**
543  * Initializa lru scanning.
544  *
545  * This simply sets up the scanning routines with the parameters for the desired
546  * hole. This version is for range-restricted scans.
547  *
548  * Warning: As long as the scan list is non-empty, no other operations than
549  * adding/removing nodes to/from the scan list are allowed.
550  */
551 void drm_mm_init_scan_with_range(struct drm_mm *mm,
552                                  unsigned long size,
553                                  unsigned alignment,
554                                  unsigned long color,
555                                  unsigned long start,
556                                  unsigned long end)
557 {
558         mm->scan_color = color;
559         mm->scan_alignment = alignment;
560         mm->scan_size = size;
561         mm->scanned_blocks = 0;
562         mm->scan_hit_start = 0;
563         mm->scan_hit_end = 0;
564         mm->scan_start = start;
565         mm->scan_end = end;
566         mm->scan_check_range = 1;
567         mm->prev_scanned_node = NULL;
568 }
569 EXPORT_SYMBOL(drm_mm_init_scan_with_range);
570
571 /**
572  * Add a node to the scan list that might be freed to make space for the desired
573  * hole.
574  *
575  * Returns non-zero, if a hole has been found, zero otherwise.
576  */
577 int drm_mm_scan_add_block(struct drm_mm_node *node)
578 {
579         struct drm_mm *mm = node->mm;
580         struct drm_mm_node *prev_node;
581         unsigned long hole_start, hole_end;
582         unsigned long adj_start, adj_end;
583
584         mm->scanned_blocks++;
585
586         BUG_ON(node->scanned_block);
587         node->scanned_block = 1;
588
589         prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
590                                node_list);
591
592         node->scanned_preceeds_hole = prev_node->hole_follows;
593         prev_node->hole_follows = 1;
594         list_del(&node->node_list);
595         node->node_list.prev = &prev_node->node_list;
596         node->node_list.next = &mm->prev_scanned_node->node_list;
597         mm->prev_scanned_node = node;
598
599         adj_start = hole_start = drm_mm_hole_node_start(prev_node);
600         adj_end = hole_end = drm_mm_hole_node_end(prev_node);
601
602         if (mm->scan_check_range) {
603                 if (adj_start < mm->scan_start)
604                         adj_start = mm->scan_start;
605                 if (adj_end > mm->scan_end)
606                         adj_end = mm->scan_end;
607         }
608
609         if (mm->color_adjust)
610                 mm->color_adjust(prev_node, mm->scan_color,
611                                  &adj_start, &adj_end);
612
613         if (check_free_hole(adj_start, adj_end,
614                             mm->scan_size, mm->scan_alignment)) {
615                 mm->scan_hit_start = hole_start;
616                 mm->scan_hit_end = hole_end;
617                 return 1;
618         }
619
620         return 0;
621 }
622 EXPORT_SYMBOL(drm_mm_scan_add_block);
623
624 /**
625  * Remove a node from the scan list.
626  *
627  * Nodes _must_ be removed in the exact same order from the scan list as they
628  * have been added, otherwise the internal state of the memory manager will be
629  * corrupted.
630  *
631  * When the scan list is empty, the selected memory nodes can be freed. An
632  * immediately following drm_mm_search_free with best_match = 0 will then return
633  * the just freed block (because its at the top of the free_stack list).
634  *
635  * Returns one if this block should be evicted, zero otherwise. Will always
636  * return zero when no hole has been found.
637  */
638 int drm_mm_scan_remove_block(struct drm_mm_node *node)
639 {
640         struct drm_mm *mm = node->mm;
641         struct drm_mm_node *prev_node;
642
643         mm->scanned_blocks--;
644
645         BUG_ON(!node->scanned_block);
646         node->scanned_block = 0;
647
648         prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
649                                node_list);
650
651         prev_node->hole_follows = node->scanned_preceeds_hole;
652         list_add(&node->node_list, &prev_node->node_list);
653
654          return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
655                  node->start < mm->scan_hit_end);
656 }
657 EXPORT_SYMBOL(drm_mm_scan_remove_block);
658
659 int drm_mm_clean(struct drm_mm * mm)
660 {
661         struct list_head *head = &mm->head_node.node_list;
662
663         return (head->next->next == head);
664 }
665 EXPORT_SYMBOL(drm_mm_clean);
666
667 void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
668 {
669         INIT_LIST_HEAD(&mm->hole_stack);
670         INIT_LIST_HEAD(&mm->unused_nodes);
671         mm->num_unused = 0;
672         mm->scanned_blocks = 0;
673         spin_lock_init(&mm->unused_lock);
674
675         /* Clever trick to avoid a special case in the free hole tracking. */
676         INIT_LIST_HEAD(&mm->head_node.node_list);
677         INIT_LIST_HEAD(&mm->head_node.hole_stack);
678         mm->head_node.hole_follows = 1;
679         mm->head_node.scanned_block = 0;
680         mm->head_node.scanned_prev_free = 0;
681         mm->head_node.scanned_next_free = 0;
682         mm->head_node.mm = mm;
683         mm->head_node.start = start + size;
684         mm->head_node.size = start - mm->head_node.start;
685         list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
686
687         mm->color_adjust = NULL;
688 }
689 EXPORT_SYMBOL(drm_mm_init);
690
691 void drm_mm_takedown(struct drm_mm * mm)
692 {
693         struct drm_mm_node *entry, *next;
694
695         if (WARN(!list_empty(&mm->head_node.node_list),
696                  "Memory manager not clean. Delaying takedown\n")) {
697                 return;
698         }
699
700         spin_lock(&mm->unused_lock);
701         list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
702                 list_del(&entry->node_list);
703                 kfree(entry);
704                 --mm->num_unused;
705         }
706         spin_unlock(&mm->unused_lock);
707
708         BUG_ON(mm->num_unused != 0);
709 }
710 EXPORT_SYMBOL(drm_mm_takedown);
711
712 static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry,
713                                        const char *prefix)
714 {
715         unsigned long hole_start, hole_end, hole_size;
716
717         if (entry->hole_follows) {
718                 hole_start = drm_mm_hole_node_start(entry);
719                 hole_end = drm_mm_hole_node_end(entry);
720                 hole_size = hole_end - hole_start;
721                 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
722                         prefix, hole_start, hole_end,
723                         hole_size);
724                 return hole_size;
725         }
726
727         return 0;
728 }
729
730 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
731 {
732         struct drm_mm_node *entry;
733         unsigned long total_used = 0, total_free = 0, total = 0;
734
735         total_free += drm_mm_debug_hole(&mm->head_node, prefix);
736
737         drm_mm_for_each_node(entry, mm) {
738                 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
739                         prefix, entry->start, entry->start + entry->size,
740                         entry->size);
741                 total_used += entry->size;
742                 total_free += drm_mm_debug_hole(entry, prefix);
743         }
744         total = total_free + total_used;
745
746         printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
747                 total_used, total_free);
748 }
749 EXPORT_SYMBOL(drm_mm_debug_table);
750
751 #if defined(CONFIG_DEBUG_FS)
752 static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
753 {
754         unsigned long hole_start, hole_end, hole_size;
755
756         if (entry->hole_follows) {
757                 hole_start = drm_mm_hole_node_start(entry);
758                 hole_end = drm_mm_hole_node_end(entry);
759                 hole_size = hole_end - hole_start;
760                 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
761                                 hole_start, hole_end, hole_size);
762                 return hole_size;
763         }
764
765         return 0;
766 }
767
768 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
769 {
770         struct drm_mm_node *entry;
771         unsigned long total_used = 0, total_free = 0, total = 0;
772
773         total_free += drm_mm_dump_hole(m, &mm->head_node);
774
775         drm_mm_for_each_node(entry, mm) {
776                 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
777                                 entry->start, entry->start + entry->size,
778                                 entry->size);
779                 total_used += entry->size;
780                 total_free += drm_mm_dump_hole(m, entry);
781         }
782         total = total_free + total_used;
783
784         seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
785         return 0;
786 }
787 EXPORT_SYMBOL(drm_mm_dump_table);
788 #endif