]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/drm_vm.c
Merge tag 'driver-core-4.13-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git...
[karo-tx-linux.git] / drivers / gpu / drm / drm_vm.c
1 /**
2  * \file drm_vm.c
3  * Memory mapping for DRM
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8
9 /*
10  * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
11  *
12  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35
36 #include <drm/drmP.h>
37 #include <linux/export.h>
38 #include <linux/seq_file.h>
39 #if defined(__ia64__)
40 #include <linux/efi.h>
41 #include <linux/slab.h>
42 #endif
43 #include <asm/pgtable.h>
44 #include "drm_internal.h"
45 #include "drm_legacy.h"
46
47 struct drm_vma_entry {
48         struct list_head head;
49         struct vm_area_struct *vma;
50         pid_t pid;
51 };
52
53 static void drm_vm_open(struct vm_area_struct *vma);
54 static void drm_vm_close(struct vm_area_struct *vma);
55
56 static pgprot_t drm_io_prot(struct drm_local_map *map,
57                             struct vm_area_struct *vma)
58 {
59         pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
60
61 #if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__)
62         if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
63                 tmp = pgprot_noncached(tmp);
64         else
65                 tmp = pgprot_writecombine(tmp);
66 #elif defined(__ia64__)
67         if (efi_range_is_wc(vma->vm_start, vma->vm_end -
68                                     vma->vm_start))
69                 tmp = pgprot_writecombine(tmp);
70         else
71                 tmp = pgprot_noncached(tmp);
72 #elif defined(__sparc__) || defined(__arm__) || defined(__mips__)
73         tmp = pgprot_noncached(tmp);
74 #endif
75         return tmp;
76 }
77
78 static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
79 {
80         pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
81
82 #if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
83         tmp = pgprot_noncached_wc(tmp);
84 #endif
85         return tmp;
86 }
87
88 /**
89  * \c fault method for AGP virtual memory.
90  *
91  * \param vma virtual memory area.
92  * \param address access address.
93  * \return pointer to the page structure.
94  *
95  * Find the right map and if it's AGP memory find the real physical page to
96  * map, get the page, increment the use count and return it.
97  */
98 #if IS_ENABLED(CONFIG_AGP)
99 static int drm_vm_fault(struct vm_fault *vmf)
100 {
101         struct vm_area_struct *vma = vmf->vma;
102         struct drm_file *priv = vma->vm_file->private_data;
103         struct drm_device *dev = priv->minor->dev;
104         struct drm_local_map *map = NULL;
105         struct drm_map_list *r_list;
106         struct drm_hash_item *hash;
107
108         /*
109          * Find the right map
110          */
111         if (!dev->agp)
112                 goto vm_fault_error;
113
114         if (!dev->agp || !dev->agp->cant_use_aperture)
115                 goto vm_fault_error;
116
117         if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
118                 goto vm_fault_error;
119
120         r_list = drm_hash_entry(hash, struct drm_map_list, hash);
121         map = r_list->map;
122
123         if (map && map->type == _DRM_AGP) {
124                 /*
125                  * Using vm_pgoff as a selector forces us to use this unusual
126                  * addressing scheme.
127                  */
128                 resource_size_t offset = vmf->address - vma->vm_start;
129                 resource_size_t baddr = map->offset + offset;
130                 struct drm_agp_mem *agpmem;
131                 struct page *page;
132
133 #ifdef __alpha__
134                 /*
135                  * Adjust to a bus-relative address
136                  */
137                 baddr -= dev->hose->mem_space->start;
138 #endif
139
140                 /*
141                  * It's AGP memory - find the real physical page to map
142                  */
143                 list_for_each_entry(agpmem, &dev->agp->memory, head) {
144                         if (agpmem->bound <= baddr &&
145                             agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
146                                 break;
147                 }
148
149                 if (&agpmem->head == &dev->agp->memory)
150                         goto vm_fault_error;
151
152                 /*
153                  * Get the page, inc the use count, and return it
154                  */
155                 offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
156                 page = agpmem->memory->pages[offset];
157                 get_page(page);
158                 vmf->page = page;
159
160                 DRM_DEBUG
161                     ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
162                      (unsigned long long)baddr,
163                      agpmem->memory->pages[offset],
164                      (unsigned long long)offset,
165                      page_count(page));
166                 return 0;
167         }
168 vm_fault_error:
169         return VM_FAULT_SIGBUS; /* Disallow mremap */
170 }
171 #else
172 static int drm_vm_fault(struct vm_fault *vmf)
173 {
174         return VM_FAULT_SIGBUS;
175 }
176 #endif
177
178 /**
179  * \c nopage method for shared virtual memory.
180  *
181  * \param vma virtual memory area.
182  * \param address access address.
183  * \return pointer to the page structure.
184  *
185  * Get the mapping, find the real physical page to map, get the page, and
186  * return it.
187  */
188 static int drm_vm_shm_fault(struct vm_fault *vmf)
189 {
190         struct vm_area_struct *vma = vmf->vma;
191         struct drm_local_map *map = vma->vm_private_data;
192         unsigned long offset;
193         unsigned long i;
194         struct page *page;
195
196         if (!map)
197                 return VM_FAULT_SIGBUS; /* Nothing allocated */
198
199         offset = vmf->address - vma->vm_start;
200         i = (unsigned long)map->handle + offset;
201         page = vmalloc_to_page((void *)i);
202         if (!page)
203                 return VM_FAULT_SIGBUS;
204         get_page(page);
205         vmf->page = page;
206
207         DRM_DEBUG("shm_fault 0x%lx\n", offset);
208         return 0;
209 }
210
211 /**
212  * \c close method for shared virtual memory.
213  *
214  * \param vma virtual memory area.
215  *
216  * Deletes map information if we are the last
217  * person to close a mapping and it's not in the global maplist.
218  */
219 static void drm_vm_shm_close(struct vm_area_struct *vma)
220 {
221         struct drm_file *priv = vma->vm_file->private_data;
222         struct drm_device *dev = priv->minor->dev;
223         struct drm_vma_entry *pt, *temp;
224         struct drm_local_map *map;
225         struct drm_map_list *r_list;
226         int found_maps = 0;
227
228         DRM_DEBUG("0x%08lx,0x%08lx\n",
229                   vma->vm_start, vma->vm_end - vma->vm_start);
230
231         map = vma->vm_private_data;
232
233         mutex_lock(&dev->struct_mutex);
234         list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
235                 if (pt->vma->vm_private_data == map)
236                         found_maps++;
237                 if (pt->vma == vma) {
238                         list_del(&pt->head);
239                         kfree(pt);
240                 }
241         }
242
243         /* We were the only map that was found */
244         if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
245                 /* Check to see if we are in the maplist, if we are not, then
246                  * we delete this mappings information.
247                  */
248                 found_maps = 0;
249                 list_for_each_entry(r_list, &dev->maplist, head) {
250                         if (r_list->map == map)
251                                 found_maps++;
252                 }
253
254                 if (!found_maps) {
255                         drm_dma_handle_t dmah;
256
257                         switch (map->type) {
258                         case _DRM_REGISTERS:
259                         case _DRM_FRAME_BUFFER:
260                                 arch_phys_wc_del(map->mtrr);
261                                 iounmap(map->handle);
262                                 break;
263                         case _DRM_SHM:
264                                 vfree(map->handle);
265                                 break;
266                         case _DRM_AGP:
267                         case _DRM_SCATTER_GATHER:
268                                 break;
269                         case _DRM_CONSISTENT:
270                                 dmah.vaddr = map->handle;
271                                 dmah.busaddr = map->offset;
272                                 dmah.size = map->size;
273                                 __drm_legacy_pci_free(dev, &dmah);
274                                 break;
275                         }
276                         kfree(map);
277                 }
278         }
279         mutex_unlock(&dev->struct_mutex);
280 }
281
282 /**
283  * \c fault method for DMA virtual memory.
284  *
285  * \param address access address.
286  * \return pointer to the page structure.
287  *
288  * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
289  */
290 static int drm_vm_dma_fault(struct vm_fault *vmf)
291 {
292         struct vm_area_struct *vma = vmf->vma;
293         struct drm_file *priv = vma->vm_file->private_data;
294         struct drm_device *dev = priv->minor->dev;
295         struct drm_device_dma *dma = dev->dma;
296         unsigned long offset;
297         unsigned long page_nr;
298         struct page *page;
299
300         if (!dma)
301                 return VM_FAULT_SIGBUS; /* Error */
302         if (!dma->pagelist)
303                 return VM_FAULT_SIGBUS; /* Nothing allocated */
304
305         offset = vmf->address - vma->vm_start;
306                                         /* vm_[pg]off[set] should be 0 */
307         page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
308         page = virt_to_page((void *)dma->pagelist[page_nr]);
309
310         get_page(page);
311         vmf->page = page;
312
313         DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
314         return 0;
315 }
316
317 /**
318  * \c fault method for scatter-gather virtual memory.
319  *
320  * \param address access address.
321  * \return pointer to the page structure.
322  *
323  * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
324  */
325 static int drm_vm_sg_fault(struct vm_fault *vmf)
326 {
327         struct vm_area_struct *vma = vmf->vma;
328         struct drm_local_map *map = vma->vm_private_data;
329         struct drm_file *priv = vma->vm_file->private_data;
330         struct drm_device *dev = priv->minor->dev;
331         struct drm_sg_mem *entry = dev->sg;
332         unsigned long offset;
333         unsigned long map_offset;
334         unsigned long page_offset;
335         struct page *page;
336
337         if (!entry)
338                 return VM_FAULT_SIGBUS; /* Error */
339         if (!entry->pagelist)
340                 return VM_FAULT_SIGBUS; /* Nothing allocated */
341
342         offset = vmf->address - vma->vm_start;
343         map_offset = map->offset - (unsigned long)dev->sg->virtual;
344         page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
345         page = entry->pagelist[page_offset];
346         get_page(page);
347         vmf->page = page;
348
349         return 0;
350 }
351
352 /** AGP virtual memory operations */
353 static const struct vm_operations_struct drm_vm_ops = {
354         .fault = drm_vm_fault,
355         .open = drm_vm_open,
356         .close = drm_vm_close,
357 };
358
359 /** Shared virtual memory operations */
360 static const struct vm_operations_struct drm_vm_shm_ops = {
361         .fault = drm_vm_shm_fault,
362         .open = drm_vm_open,
363         .close = drm_vm_shm_close,
364 };
365
366 /** DMA virtual memory operations */
367 static const struct vm_operations_struct drm_vm_dma_ops = {
368         .fault = drm_vm_dma_fault,
369         .open = drm_vm_open,
370         .close = drm_vm_close,
371 };
372
373 /** Scatter-gather virtual memory operations */
374 static const struct vm_operations_struct drm_vm_sg_ops = {
375         .fault = drm_vm_sg_fault,
376         .open = drm_vm_open,
377         .close = drm_vm_close,
378 };
379
380 static void drm_vm_open_locked(struct drm_device *dev,
381                                struct vm_area_struct *vma)
382 {
383         struct drm_vma_entry *vma_entry;
384
385         DRM_DEBUG("0x%08lx,0x%08lx\n",
386                   vma->vm_start, vma->vm_end - vma->vm_start);
387
388         vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
389         if (vma_entry) {
390                 vma_entry->vma = vma;
391                 vma_entry->pid = current->pid;
392                 list_add(&vma_entry->head, &dev->vmalist);
393         }
394 }
395
396 static void drm_vm_open(struct vm_area_struct *vma)
397 {
398         struct drm_file *priv = vma->vm_file->private_data;
399         struct drm_device *dev = priv->minor->dev;
400
401         mutex_lock(&dev->struct_mutex);
402         drm_vm_open_locked(dev, vma);
403         mutex_unlock(&dev->struct_mutex);
404 }
405
406 static void drm_vm_close_locked(struct drm_device *dev,
407                                 struct vm_area_struct *vma)
408 {
409         struct drm_vma_entry *pt, *temp;
410
411         DRM_DEBUG("0x%08lx,0x%08lx\n",
412                   vma->vm_start, vma->vm_end - vma->vm_start);
413
414         list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
415                 if (pt->vma == vma) {
416                         list_del(&pt->head);
417                         kfree(pt);
418                         break;
419                 }
420         }
421 }
422
423 /**
424  * \c close method for all virtual memory types.
425  *
426  * \param vma virtual memory area.
427  *
428  * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
429  * free it.
430  */
431 static void drm_vm_close(struct vm_area_struct *vma)
432 {
433         struct drm_file *priv = vma->vm_file->private_data;
434         struct drm_device *dev = priv->minor->dev;
435
436         mutex_lock(&dev->struct_mutex);
437         drm_vm_close_locked(dev, vma);
438         mutex_unlock(&dev->struct_mutex);
439 }
440
441 /**
442  * mmap DMA memory.
443  *
444  * \param file_priv DRM file private.
445  * \param vma virtual memory area.
446  * \return zero on success or a negative number on failure.
447  *
448  * Sets the virtual memory area operations structure to vm_dma_ops, the file
449  * pointer, and calls vm_open().
450  */
451 static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
452 {
453         struct drm_file *priv = filp->private_data;
454         struct drm_device *dev;
455         struct drm_device_dma *dma;
456         unsigned long length = vma->vm_end - vma->vm_start;
457
458         dev = priv->minor->dev;
459         dma = dev->dma;
460         DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
461                   vma->vm_start, vma->vm_end, vma->vm_pgoff);
462
463         /* Length must match exact page count */
464         if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
465                 return -EINVAL;
466         }
467
468         if (!capable(CAP_SYS_ADMIN) &&
469             (dma->flags & _DRM_DMA_USE_PCI_RO)) {
470                 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
471 #if defined(__i386__) || defined(__x86_64__)
472                 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
473 #else
474                 /* Ye gads this is ugly.  With more thought
475                    we could move this up higher and use
476                    `protection_map' instead.  */
477                 vma->vm_page_prot =
478                     __pgprot(pte_val
479                              (pte_wrprotect
480                               (__pte(pgprot_val(vma->vm_page_prot)))));
481 #endif
482         }
483
484         vma->vm_ops = &drm_vm_dma_ops;
485
486         vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
487
488         drm_vm_open_locked(dev, vma);
489         return 0;
490 }
491
492 static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
493 {
494 #ifdef __alpha__
495         return dev->hose->dense_mem_base;
496 #else
497         return 0;
498 #endif
499 }
500
501 /**
502  * mmap DMA memory.
503  *
504  * \param file_priv DRM file private.
505  * \param vma virtual memory area.
506  * \return zero on success or a negative number on failure.
507  *
508  * If the virtual memory area has no offset associated with it then it's a DMA
509  * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
510  * checks that the restricted flag is not set, sets the virtual memory operations
511  * according to the mapping type and remaps the pages. Finally sets the file
512  * pointer and calls vm_open().
513  */
514 static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
515 {
516         struct drm_file *priv = filp->private_data;
517         struct drm_device *dev = priv->minor->dev;
518         struct drm_local_map *map = NULL;
519         resource_size_t offset = 0;
520         struct drm_hash_item *hash;
521
522         DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
523                   vma->vm_start, vma->vm_end, vma->vm_pgoff);
524
525         if (!priv->authenticated)
526                 return -EACCES;
527
528         /* We check for "dma". On Apple's UniNorth, it's valid to have
529          * the AGP mapped at physical address 0
530          * --BenH.
531          */
532         if (!vma->vm_pgoff
533 #if IS_ENABLED(CONFIG_AGP)
534             && (!dev->agp
535                 || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
536 #endif
537             )
538                 return drm_mmap_dma(filp, vma);
539
540         if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
541                 DRM_ERROR("Could not find map\n");
542                 return -EINVAL;
543         }
544
545         map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
546         if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
547                 return -EPERM;
548
549         /* Check for valid size. */
550         if (map->size < vma->vm_end - vma->vm_start)
551                 return -EINVAL;
552
553         if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
554                 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
555 #if defined(__i386__) || defined(__x86_64__)
556                 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
557 #else
558                 /* Ye gads this is ugly.  With more thought
559                    we could move this up higher and use
560                    `protection_map' instead.  */
561                 vma->vm_page_prot =
562                     __pgprot(pte_val
563                              (pte_wrprotect
564                               (__pte(pgprot_val(vma->vm_page_prot)))));
565 #endif
566         }
567
568         switch (map->type) {
569 #if !defined(__arm__)
570         case _DRM_AGP:
571                 if (dev->agp && dev->agp->cant_use_aperture) {
572                         /*
573                          * On some platforms we can't talk to bus dma address from the CPU, so for
574                          * memory of type DRM_AGP, we'll deal with sorting out the real physical
575                          * pages and mappings in fault()
576                          */
577 #if defined(__powerpc__)
578                         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
579 #endif
580                         vma->vm_ops = &drm_vm_ops;
581                         break;
582                 }
583                 /* fall through to _DRM_FRAME_BUFFER... */
584 #endif
585         case _DRM_FRAME_BUFFER:
586         case _DRM_REGISTERS:
587                 offset = drm_core_get_reg_ofs(dev);
588                 vma->vm_page_prot = drm_io_prot(map, vma);
589                 if (io_remap_pfn_range(vma, vma->vm_start,
590                                        (map->offset + offset) >> PAGE_SHIFT,
591                                        vma->vm_end - vma->vm_start,
592                                        vma->vm_page_prot))
593                         return -EAGAIN;
594                 DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
595                           " offset = 0x%llx\n",
596                           map->type,
597                           vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
598
599                 vma->vm_ops = &drm_vm_ops;
600                 break;
601         case _DRM_CONSISTENT:
602                 /* Consistent memory is really like shared memory. But
603                  * it's allocated in a different way, so avoid fault */
604                 if (remap_pfn_range(vma, vma->vm_start,
605                     page_to_pfn(virt_to_page(map->handle)),
606                     vma->vm_end - vma->vm_start, vma->vm_page_prot))
607                         return -EAGAIN;
608                 vma->vm_page_prot = drm_dma_prot(map->type, vma);
609         /* fall through to _DRM_SHM */
610         case _DRM_SHM:
611                 vma->vm_ops = &drm_vm_shm_ops;
612                 vma->vm_private_data = (void *)map;
613                 break;
614         case _DRM_SCATTER_GATHER:
615                 vma->vm_ops = &drm_vm_sg_ops;
616                 vma->vm_private_data = (void *)map;
617                 vma->vm_page_prot = drm_dma_prot(map->type, vma);
618                 break;
619         default:
620                 return -EINVAL; /* This should never happen. */
621         }
622         vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
623
624         drm_vm_open_locked(dev, vma);
625         return 0;
626 }
627
628 int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma)
629 {
630         struct drm_file *priv = filp->private_data;
631         struct drm_device *dev = priv->minor->dev;
632         int ret;
633
634         if (drm_device_is_unplugged(dev))
635                 return -ENODEV;
636
637         mutex_lock(&dev->struct_mutex);
638         ret = drm_mmap_locked(filp, vma);
639         mutex_unlock(&dev->struct_mutex);
640
641         return ret;
642 }
643 EXPORT_SYMBOL(drm_legacy_mmap);
644
645 void drm_legacy_vma_flush(struct drm_device *dev)
646 {
647         struct drm_vma_entry *vma, *vma_temp;
648
649         /* Clear vma list (only needed for legacy drivers) */
650         list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
651                 list_del(&vma->head);
652                 kfree(vma);
653         }
654 }