2 * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
4 * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sub license,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #include "nouveau_drm.h"
28 #include "nouveau_ttm.h"
29 #include "nouveau_gem.h"
31 #include "drm_legacy.h"
33 nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
35 struct nouveau_drm *drm = nouveau_bdev(man->bdev);
36 struct nvkm_fb *fb = nvxx_fb(&drm->device);
42 nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
49 nvkm_mem_node_cleanup(struct nvkm_mem *node)
51 if (node->vma[0].node) {
52 nvkm_vm_unmap(&node->vma[0]);
53 nvkm_vm_put(&node->vma[0]);
56 if (node->vma[1].node) {
57 nvkm_vm_unmap(&node->vma[1]);
58 nvkm_vm_put(&node->vma[1]);
63 nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
64 struct ttm_mem_reg *mem)
66 struct nouveau_drm *drm = nouveau_bdev(man->bdev);
67 struct nvkm_ram *ram = nvxx_fb(&drm->device)->ram;
68 nvkm_mem_node_cleanup(mem->mm_node);
69 ram->func->put(ram, (struct nvkm_mem **)&mem->mm_node);
73 nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
74 struct ttm_buffer_object *bo,
75 const struct ttm_place *place,
76 struct ttm_mem_reg *mem)
78 struct nouveau_drm *drm = nouveau_bdev(man->bdev);
79 struct nvkm_ram *ram = nvxx_fb(&drm->device)->ram;
80 struct nouveau_bo *nvbo = nouveau_bo(bo);
81 struct nvkm_mem *node;
85 if (drm->device.info.ram_size == 0)
88 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
89 size_nc = 1 << nvbo->page_shift;
91 ret = ram->func->get(ram, mem->num_pages << PAGE_SHIFT,
92 mem->page_alignment << PAGE_SHIFT, size_nc,
93 (nvbo->tile_flags >> 8) & 0x3ff, &node);
96 return (ret == -ENOSPC) ? 0 : ret;
99 node->page_shift = nvbo->page_shift;
102 mem->start = node->offset >> PAGE_SHIFT;
106 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
107 nouveau_vram_manager_init,
108 nouveau_vram_manager_fini,
109 nouveau_vram_manager_new,
110 nouveau_vram_manager_del,
114 nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
120 nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
126 nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
127 struct ttm_mem_reg *mem)
129 nvkm_mem_node_cleanup(mem->mm_node);
135 nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
136 struct ttm_buffer_object *bo,
137 const struct ttm_place *place,
138 struct ttm_mem_reg *mem)
140 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
141 struct nouveau_bo *nvbo = nouveau_bo(bo);
142 struct nvkm_mem *node;
144 node = kzalloc(sizeof(*node), GFP_KERNEL);
148 node->page_shift = 12;
150 switch (drm->device.info.family) {
151 case NV_DEVICE_INFO_V0_TNT:
152 case NV_DEVICE_INFO_V0_CELSIUS:
153 case NV_DEVICE_INFO_V0_KELVIN:
154 case NV_DEVICE_INFO_V0_RANKINE:
155 case NV_DEVICE_INFO_V0_CURIE:
157 case NV_DEVICE_INFO_V0_TESLA:
158 if (drm->device.info.chipset != 0x50)
159 node->memtype = (nvbo->tile_flags & 0x7f00) >> 8;
161 case NV_DEVICE_INFO_V0_FERMI:
162 case NV_DEVICE_INFO_V0_KEPLER:
163 case NV_DEVICE_INFO_V0_MAXWELL:
164 node->memtype = (nvbo->tile_flags & 0xff00) >> 8;
167 NV_WARN(drm, "%s: unhandled family type %x\n", __func__,
168 drm->device.info.family);
178 nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
182 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
183 nouveau_gart_manager_init,
184 nouveau_gart_manager_fini,
185 nouveau_gart_manager_new,
186 nouveau_gart_manager_del,
187 nouveau_gart_manager_debug
191 #include <subdev/mmu/nv04.h>
193 nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
195 struct nouveau_drm *drm = nouveau_bdev(man->bdev);
196 struct nvkm_mmu *mmu = nvxx_mmu(&drm->device);
197 struct nv04_mmu *priv = (void *)mmu;
198 struct nvkm_vm *vm = NULL;
199 nvkm_vm_ref(priv->vm, &vm, NULL);
205 nv04_gart_manager_fini(struct ttm_mem_type_manager *man)
207 struct nvkm_vm *vm = man->priv;
208 nvkm_vm_ref(NULL, &vm, NULL);
214 nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem)
216 struct nvkm_mem *node = mem->mm_node;
217 if (node->vma[0].node)
218 nvkm_vm_put(&node->vma[0]);
224 nv04_gart_manager_new(struct ttm_mem_type_manager *man,
225 struct ttm_buffer_object *bo,
226 const struct ttm_place *place,
227 struct ttm_mem_reg *mem)
229 struct nvkm_mem *node;
232 node = kzalloc(sizeof(*node), GFP_KERNEL);
236 node->page_shift = 12;
238 ret = nvkm_vm_get(man->priv, mem->num_pages << 12, node->page_shift,
239 NV_MEM_ACCESS_RW, &node->vma[0]);
246 mem->start = node->vma[0].offset >> PAGE_SHIFT;
251 nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
255 const struct ttm_mem_type_manager_func nv04_gart_manager = {
256 nv04_gart_manager_init,
257 nv04_gart_manager_fini,
258 nv04_gart_manager_new,
259 nv04_gart_manager_del,
260 nv04_gart_manager_debug
264 nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
266 struct drm_file *file_priv = filp->private_data;
267 struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
269 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
270 return drm_legacy_mmap(filp, vma);
272 return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
276 nouveau_ttm_mem_global_init(struct drm_global_reference *ref)
278 return ttm_mem_global_init(ref->object);
282 nouveau_ttm_mem_global_release(struct drm_global_reference *ref)
284 ttm_mem_global_release(ref->object);
288 nouveau_ttm_global_init(struct nouveau_drm *drm)
290 struct drm_global_reference *global_ref;
293 global_ref = &drm->ttm.mem_global_ref;
294 global_ref->global_type = DRM_GLOBAL_TTM_MEM;
295 global_ref->size = sizeof(struct ttm_mem_global);
296 global_ref->init = &nouveau_ttm_mem_global_init;
297 global_ref->release = &nouveau_ttm_mem_global_release;
299 ret = drm_global_item_ref(global_ref);
300 if (unlikely(ret != 0)) {
301 DRM_ERROR("Failed setting up TTM memory accounting\n");
302 drm->ttm.mem_global_ref.release = NULL;
306 drm->ttm.bo_global_ref.mem_glob = global_ref->object;
307 global_ref = &drm->ttm.bo_global_ref.ref;
308 global_ref->global_type = DRM_GLOBAL_TTM_BO;
309 global_ref->size = sizeof(struct ttm_bo_global);
310 global_ref->init = &ttm_bo_global_init;
311 global_ref->release = &ttm_bo_global_release;
313 ret = drm_global_item_ref(global_ref);
314 if (unlikely(ret != 0)) {
315 DRM_ERROR("Failed setting up TTM BO subsystem\n");
316 drm_global_item_unref(&drm->ttm.mem_global_ref);
317 drm->ttm.mem_global_ref.release = NULL;
325 nouveau_ttm_global_release(struct nouveau_drm *drm)
327 if (drm->ttm.mem_global_ref.release == NULL)
330 drm_global_item_unref(&drm->ttm.bo_global_ref.ref);
331 drm_global_item_unref(&drm->ttm.mem_global_ref);
332 drm->ttm.mem_global_ref.release = NULL;
336 nouveau_ttm_init(struct nouveau_drm *drm)
338 struct nvkm_device *device = nvxx_device(&drm->device);
339 struct nvkm_pci *pci = device->pci;
340 struct drm_device *dev = drm->dev;
344 if (pci && pci->agp.bridge) {
345 drm->agp.bridge = pci->agp.bridge;
346 drm->agp.base = pci->agp.base;
347 drm->agp.size = pci->agp.size;
348 drm->agp.cma = pci->agp.cma;
351 bits = nvxx_mmu(&drm->device)->dma_bits;
352 if (nvxx_device(&drm->device)->func->pci) {
353 if (drm->agp.bridge ||
354 !pci_dma_supported(dev->pdev, DMA_BIT_MASK(bits)))
357 ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(bits));
361 ret = pci_set_consistent_dma_mask(dev->pdev,
364 pci_set_consistent_dma_mask(dev->pdev,
368 ret = nouveau_ttm_global_init(drm);
372 ret = ttm_bo_device_init(&drm->ttm.bdev,
373 drm->ttm.bo_global_ref.ref.object,
375 dev->anon_inode->i_mapping,
376 DRM_FILE_PAGE_OFFSET,
377 bits <= 32 ? true : false);
379 NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
384 drm->gem.vram_available = drm->device.info.ram_user;
386 ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM,
387 drm->gem.vram_available >> PAGE_SHIFT);
389 NV_ERROR(drm, "VRAM mm init failed, %d\n", ret);
393 drm->ttm.mtrr = arch_phys_wc_add(device->func->resource_addr(device, 1),
394 device->func->resource_size(device, 1));
397 if (!drm->agp.bridge) {
398 drm->gem.gart_available = nvxx_mmu(&drm->device)->limit;
400 drm->gem.gart_available = drm->agp.size;
403 ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_TT,
404 drm->gem.gart_available >> PAGE_SHIFT);
406 NV_ERROR(drm, "GART mm init failed, %d\n", ret);
410 NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available >> 20));
411 NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available >> 20));
416 nouveau_ttm_fini(struct nouveau_drm *drm)
418 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
419 ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
421 ttm_bo_device_release(&drm->ttm.bdev);
423 nouveau_ttm_global_release(drm);
425 arch_phys_wc_del(drm->ttm.mtrr);