]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/char/agp/intel-gtt.c
agp/intel: Promote warning about failure to setup flush to error.
[karo-tx-linux.git] / drivers / char / agp / intel-gtt.c
1 /*
2  * Intel GTT (Graphics Translation Table) routines
3  *
4  * Caveat: This driver implements the linux agp interface, but this is far from
5  * a agp driver! GTT support ended up here for purely historical reasons: The
6  * old userspace intel graphics drivers needed an interface to map memory into
7  * the GTT. And the drm provides a default interface for graphic devices sitting
8  * on an agp port. So it made sense to fake the GTT support as an agp port to
9  * avoid having to create a new api.
10  *
11  * With gem this does not make much sense anymore, just needlessly complicates
12  * the code. But as long as the old graphics stack is still support, it's stuck
13  * here.
14  *
15  * /fairy-tale-mode off
16  */
17
18 /*
19  * If we have Intel graphics, we're not going to have anything other than
20  * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
21  * on the Intel IOMMU support (CONFIG_DMAR).
22  * Only newer chipsets need to bother with this, of course.
23  */
24 #ifdef CONFIG_DMAR
25 #define USE_PCI_DMA_API 1
26 #endif
27
28 /* Max amount of stolen space, anything above will be returned to Linux */
29 int intel_max_stolen = 32 * 1024 * 1024;
30 EXPORT_SYMBOL(intel_max_stolen);
31
32 static const struct aper_size_info_fixed intel_i810_sizes[] =
33 {
34         {64, 16384, 4},
35         /* The 32M mode still requires a 64k gatt */
36         {32, 8192, 4}
37 };
38
39 #define AGP_DCACHE_MEMORY       1
40 #define AGP_PHYS_MEMORY         2
41 #define INTEL_AGP_CACHED_MEMORY 3
42
43 static struct gatt_mask intel_i810_masks[] =
44 {
45         {.mask = I810_PTE_VALID, .type = 0},
46         {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY},
47         {.mask = I810_PTE_VALID, .type = 0},
48         {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED,
49          .type = INTEL_AGP_CACHED_MEMORY}
50 };
51
52 static struct _intel_private {
53         struct pci_dev *pcidev; /* device one */
54         u8 __iomem *registers;
55         u32 __iomem *gtt;               /* I915G */
56         int num_dcache_entries;
57         /* gtt_entries is the number of gtt entries that are already mapped
58          * to stolen memory.  Stolen memory is larger than the memory mapped
59          * through gtt_entries, as it includes some reserved space for the BIOS
60          * popup and for the GTT.
61          */
62         int gtt_entries;                        /* i830+ */
63         int gtt_total_size;
64         union {
65                 void __iomem *i9xx_flush_page;
66                 void *i8xx_flush_page;
67         };
68         struct page *i8xx_page;
69         struct resource ifp_resource;
70         int resource_valid;
71 } intel_private;
72
73 #ifdef USE_PCI_DMA_API
74 static int intel_agp_map_page(struct page *page, dma_addr_t *ret)
75 {
76         *ret = pci_map_page(intel_private.pcidev, page, 0,
77                             PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
78         if (pci_dma_mapping_error(intel_private.pcidev, *ret))
79                 return -EINVAL;
80         return 0;
81 }
82
83 static void intel_agp_unmap_page(struct page *page, dma_addr_t dma)
84 {
85         pci_unmap_page(intel_private.pcidev, dma,
86                        PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
87 }
88
89 static void intel_agp_free_sglist(struct agp_memory *mem)
90 {
91         struct sg_table st;
92
93         st.sgl = mem->sg_list;
94         st.orig_nents = st.nents = mem->page_count;
95
96         sg_free_table(&st);
97
98         mem->sg_list = NULL;
99         mem->num_sg = 0;
100 }
101
102 static int intel_agp_map_memory(struct agp_memory *mem)
103 {
104         struct sg_table st;
105         struct scatterlist *sg;
106         int i;
107
108         DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
109
110         if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
111                 return -ENOMEM;
112
113         mem->sg_list = sg = st.sgl;
114
115         for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg))
116                 sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0);
117
118         mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list,
119                                  mem->page_count, PCI_DMA_BIDIRECTIONAL);
120         if (unlikely(!mem->num_sg)) {
121                 intel_agp_free_sglist(mem);
122                 return -ENOMEM;
123         }
124         return 0;
125 }
126
127 static void intel_agp_unmap_memory(struct agp_memory *mem)
128 {
129         DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
130
131         pci_unmap_sg(intel_private.pcidev, mem->sg_list,
132                      mem->page_count, PCI_DMA_BIDIRECTIONAL);
133         intel_agp_free_sglist(mem);
134 }
135
136 static void intel_agp_insert_sg_entries(struct agp_memory *mem,
137                                         off_t pg_start, int mask_type)
138 {
139         struct scatterlist *sg;
140         int i, j;
141
142         j = pg_start;
143
144         WARN_ON(!mem->num_sg);
145
146         if (mem->num_sg == mem->page_count) {
147                 for_each_sg(mem->sg_list, sg, mem->page_count, i) {
148                         writel(agp_bridge->driver->mask_memory(agp_bridge,
149                                         sg_dma_address(sg), mask_type),
150                                         intel_private.gtt+j);
151                         j++;
152                 }
153         } else {
154                 /* sg may merge pages, but we have to separate
155                  * per-page addr for GTT */
156                 unsigned int len, m;
157
158                 for_each_sg(mem->sg_list, sg, mem->num_sg, i) {
159                         len = sg_dma_len(sg) / PAGE_SIZE;
160                         for (m = 0; m < len; m++) {
161                                 writel(agp_bridge->driver->mask_memory(agp_bridge,
162                                                                        sg_dma_address(sg) + m * PAGE_SIZE,
163                                                                        mask_type),
164                                        intel_private.gtt+j);
165                                 j++;
166                         }
167                 }
168         }
169         readl(intel_private.gtt+j-1);
170 }
171
172 #else
173
174 static void intel_agp_insert_sg_entries(struct agp_memory *mem,
175                                         off_t pg_start, int mask_type)
176 {
177         int i, j;
178         u32 cache_bits = 0;
179
180         if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
181             agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
182         {
183                 cache_bits = I830_PTE_SYSTEM_CACHED;
184         }
185
186         for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
187                 writel(agp_bridge->driver->mask_memory(agp_bridge,
188                                 page_to_phys(mem->pages[i]), mask_type),
189                        intel_private.gtt+j);
190         }
191
192         readl(intel_private.gtt+j-1);
193 }
194
195 #endif
196
197 static int intel_i810_fetch_size(void)
198 {
199         u32 smram_miscc;
200         struct aper_size_info_fixed *values;
201
202         pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc);
203         values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
204
205         if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
206                 dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n");
207                 return 0;
208         }
209         if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
210                 agp_bridge->current_size = (void *) (values + 1);
211                 agp_bridge->aperture_size_idx = 1;
212                 return values[1].size;
213         } else {
214                 agp_bridge->current_size = (void *) (values);
215                 agp_bridge->aperture_size_idx = 0;
216                 return values[0].size;
217         }
218
219         return 0;
220 }
221
222 static int intel_i810_configure(void)
223 {
224         struct aper_size_info_fixed *current_size;
225         u32 temp;
226         int i;
227
228         current_size = A_SIZE_FIX(agp_bridge->current_size);
229
230         if (!intel_private.registers) {
231                 pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
232                 temp &= 0xfff80000;
233
234                 intel_private.registers = ioremap(temp, 128 * 4096);
235                 if (!intel_private.registers) {
236                         dev_err(&intel_private.pcidev->dev,
237                                 "can't remap memory\n");
238                         return -ENOMEM;
239                 }
240         }
241
242         if ((readl(intel_private.registers+I810_DRAM_CTL)
243                 & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
244                 /* This will need to be dynamically assigned */
245                 dev_info(&intel_private.pcidev->dev,
246                          "detected 4MB dedicated video ram\n");
247                 intel_private.num_dcache_entries = 1024;
248         }
249         pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
250         agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
251         writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
252         readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
253
254         if (agp_bridge->driver->needs_scratch_page) {
255                 for (i = 0; i < current_size->num_entries; i++) {
256                         writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
257                 }
258                 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI posting. */
259         }
260         global_cache_flush();
261         return 0;
262 }
263
264 static void intel_i810_cleanup(void)
265 {
266         writel(0, intel_private.registers+I810_PGETBL_CTL);
267         readl(intel_private.registers); /* PCI Posting. */
268         iounmap(intel_private.registers);
269 }
270
271 static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode)
272 {
273         return;
274 }
275
276 /* Exists to support ARGB cursors */
277 static struct page *i8xx_alloc_pages(void)
278 {
279         struct page *page;
280
281         page = alloc_pages(GFP_KERNEL | GFP_DMA32, 2);
282         if (page == NULL)
283                 return NULL;
284
285         if (set_pages_uc(page, 4) < 0) {
286                 set_pages_wb(page, 4);
287                 __free_pages(page, 2);
288                 return NULL;
289         }
290         get_page(page);
291         atomic_inc(&agp_bridge->current_memory_agp);
292         return page;
293 }
294
295 static void i8xx_destroy_pages(struct page *page)
296 {
297         if (page == NULL)
298                 return;
299
300         set_pages_wb(page, 4);
301         put_page(page);
302         __free_pages(page, 2);
303         atomic_dec(&agp_bridge->current_memory_agp);
304 }
305
306 static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
307                                         int type)
308 {
309         if (type < AGP_USER_TYPES)
310                 return type;
311         else if (type == AGP_USER_CACHED_MEMORY)
312                 return INTEL_AGP_CACHED_MEMORY;
313         else
314                 return 0;
315 }
316
317 static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
318                                 int type)
319 {
320         int i, j, num_entries;
321         void *temp;
322         int ret = -EINVAL;
323         int mask_type;
324
325         if (mem->page_count == 0)
326                 goto out;
327
328         temp = agp_bridge->current_size;
329         num_entries = A_SIZE_FIX(temp)->num_entries;
330
331         if ((pg_start + mem->page_count) > num_entries)
332                 goto out_err;
333
334
335         for (j = pg_start; j < (pg_start + mem->page_count); j++) {
336                 if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) {
337                         ret = -EBUSY;
338                         goto out_err;
339                 }
340         }
341
342         if (type != mem->type)
343                 goto out_err;
344
345         mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
346
347         switch (mask_type) {
348         case AGP_DCACHE_MEMORY:
349                 if (!mem->is_flushed)
350                         global_cache_flush();
351                 for (i = pg_start; i < (pg_start + mem->page_count); i++) {
352                         writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID,
353                                intel_private.registers+I810_PTE_BASE+(i*4));
354                 }
355                 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
356                 break;
357         case AGP_PHYS_MEMORY:
358         case AGP_NORMAL_MEMORY:
359                 if (!mem->is_flushed)
360                         global_cache_flush();
361                 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
362                         writel(agp_bridge->driver->mask_memory(agp_bridge,
363                                         page_to_phys(mem->pages[i]), mask_type),
364                                intel_private.registers+I810_PTE_BASE+(j*4));
365                 }
366                 readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
367                 break;
368         default:
369                 goto out_err;
370         }
371
372 out:
373         ret = 0;
374 out_err:
375         mem->is_flushed = true;
376         return ret;
377 }
378
379 static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start,
380                                 int type)
381 {
382         int i;
383
384         if (mem->page_count == 0)
385                 return 0;
386
387         for (i = pg_start; i < (mem->page_count + pg_start); i++) {
388                 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
389         }
390         readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
391
392         return 0;
393 }
394
395 /*
396  * The i810/i830 requires a physical address to program its mouse
397  * pointer into hardware.
398  * However the Xserver still writes to it through the agp aperture.
399  */
400 static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
401 {
402         struct agp_memory *new;
403         struct page *page;
404
405         switch (pg_count) {
406         case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
407                 break;
408         case 4:
409                 /* kludge to get 4 physical pages for ARGB cursor */
410                 page = i8xx_alloc_pages();
411                 break;
412         default:
413                 return NULL;
414         }
415
416         if (page == NULL)
417                 return NULL;
418
419         new = agp_create_memory(pg_count);
420         if (new == NULL)
421                 return NULL;
422
423         new->pages[0] = page;
424         if (pg_count == 4) {
425                 /* kludge to get 4 physical pages for ARGB cursor */
426                 new->pages[1] = new->pages[0] + 1;
427                 new->pages[2] = new->pages[1] + 1;
428                 new->pages[3] = new->pages[2] + 1;
429         }
430         new->page_count = pg_count;
431         new->num_scratch_pages = pg_count;
432         new->type = AGP_PHYS_MEMORY;
433         new->physical = page_to_phys(new->pages[0]);
434         return new;
435 }
436
437 static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
438 {
439         struct agp_memory *new;
440
441         if (type == AGP_DCACHE_MEMORY) {
442                 if (pg_count != intel_private.num_dcache_entries)
443                         return NULL;
444
445                 new = agp_create_memory(1);
446                 if (new == NULL)
447                         return NULL;
448
449                 new->type = AGP_DCACHE_MEMORY;
450                 new->page_count = pg_count;
451                 new->num_scratch_pages = 0;
452                 agp_free_page_array(new);
453                 return new;
454         }
455         if (type == AGP_PHYS_MEMORY)
456                 return alloc_agpphysmem_i8xx(pg_count, type);
457         return NULL;
458 }
459
460 static void intel_i810_free_by_type(struct agp_memory *curr)
461 {
462         agp_free_key(curr->key);
463         if (curr->type == AGP_PHYS_MEMORY) {
464                 if (curr->page_count == 4)
465                         i8xx_destroy_pages(curr->pages[0]);
466                 else {
467                         agp_bridge->driver->agp_destroy_page(curr->pages[0],
468                                                              AGP_PAGE_DESTROY_UNMAP);
469                         agp_bridge->driver->agp_destroy_page(curr->pages[0],
470                                                              AGP_PAGE_DESTROY_FREE);
471                 }
472                 agp_free_page_array(curr);
473         }
474         kfree(curr);
475 }
476
477 static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
478                                             dma_addr_t addr, int type)
479 {
480         /* Type checking must be done elsewhere */
481         return addr | bridge->driver->masks[type].mask;
482 }
483
484 static struct aper_size_info_fixed intel_i830_sizes[] =
485 {
486         {128, 32768, 5},
487         /* The 64M mode still requires a 128k gatt */
488         {64, 16384, 5},
489         {256, 65536, 6},
490         {512, 131072, 7},
491 };
492
493 static void intel_i830_init_gtt_entries(void)
494 {
495         u16 gmch_ctrl;
496         int gtt_entries = 0;
497         u8 rdct;
498         int local = 0;
499         static const int ddt[4] = { 0, 16, 32, 64 };
500         int size; /* reserved space (in kb) at the top of stolen memory */
501
502         pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
503
504         if (IS_I965) {
505                 u32 pgetbl_ctl;
506                 pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
507
508                 /* The 965 has a field telling us the size of the GTT,
509                  * which may be larger than what is necessary to map the
510                  * aperture.
511                  */
512                 switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
513                 case I965_PGETBL_SIZE_128KB:
514                         size = 128;
515                         break;
516                 case I965_PGETBL_SIZE_256KB:
517                         size = 256;
518                         break;
519                 case I965_PGETBL_SIZE_512KB:
520                         size = 512;
521                         break;
522                 case I965_PGETBL_SIZE_1MB:
523                         size = 1024;
524                         break;
525                 case I965_PGETBL_SIZE_2MB:
526                         size = 2048;
527                         break;
528                 case I965_PGETBL_SIZE_1_5MB:
529                         size = 1024 + 512;
530                         break;
531                 default:
532                         dev_info(&intel_private.pcidev->dev,
533                                  "unknown page table size, assuming 512KB\n");
534                         size = 512;
535                 }
536                 size += 4; /* add in BIOS popup space */
537         } else if (IS_G33 && !IS_PINEVIEW) {
538         /* G33's GTT size defined in gmch_ctrl */
539                 switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
540                 case G33_PGETBL_SIZE_1M:
541                         size = 1024;
542                         break;
543                 case G33_PGETBL_SIZE_2M:
544                         size = 2048;
545                         break;
546                 default:
547                         dev_info(&agp_bridge->dev->dev,
548                                  "unknown page table size 0x%x, assuming 512KB\n",
549                                 (gmch_ctrl & G33_PGETBL_SIZE_MASK));
550                         size = 512;
551                 }
552                 size += 4;
553         } else if (IS_G4X || IS_PINEVIEW) {
554                 /* On 4 series hardware, GTT stolen is separate from graphics
555                  * stolen, ignore it in stolen gtt entries counting.  However,
556                  * 4KB of the stolen memory doesn't get mapped to the GTT.
557                  */
558                 size = 4;
559         } else {
560                 /* On previous hardware, the GTT size was just what was
561                  * required to map the aperture.
562                  */
563                 size = agp_bridge->driver->fetch_size() + 4;
564         }
565
566         if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
567             agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
568                 switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
569                 case I830_GMCH_GMS_STOLEN_512:
570                         gtt_entries = KB(512) - KB(size);
571                         break;
572                 case I830_GMCH_GMS_STOLEN_1024:
573                         gtt_entries = MB(1) - KB(size);
574                         break;
575                 case I830_GMCH_GMS_STOLEN_8192:
576                         gtt_entries = MB(8) - KB(size);
577                         break;
578                 case I830_GMCH_GMS_LOCAL:
579                         rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
580                         gtt_entries = (I830_RDRAM_ND(rdct) + 1) *
581                                         MB(ddt[I830_RDRAM_DDT(rdct)]);
582                         local = 1;
583                         break;
584                 default:
585                         gtt_entries = 0;
586                         break;
587                 }
588         } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
589                    agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) {
590                 /*
591                  * SandyBridge has new memory control reg at 0x50.w
592                  */
593                 u16 snb_gmch_ctl;
594                 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
595                 switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
596                 case SNB_GMCH_GMS_STOLEN_32M:
597                         gtt_entries = MB(32) - KB(size);
598                         break;
599                 case SNB_GMCH_GMS_STOLEN_64M:
600                         gtt_entries = MB(64) - KB(size);
601                         break;
602                 case SNB_GMCH_GMS_STOLEN_96M:
603                         gtt_entries = MB(96) - KB(size);
604                         break;
605                 case SNB_GMCH_GMS_STOLEN_128M:
606                         gtt_entries = MB(128) - KB(size);
607                         break;
608                 case SNB_GMCH_GMS_STOLEN_160M:
609                         gtt_entries = MB(160) - KB(size);
610                         break;
611                 case SNB_GMCH_GMS_STOLEN_192M:
612                         gtt_entries = MB(192) - KB(size);
613                         break;
614                 case SNB_GMCH_GMS_STOLEN_224M:
615                         gtt_entries = MB(224) - KB(size);
616                         break;
617                 case SNB_GMCH_GMS_STOLEN_256M:
618                         gtt_entries = MB(256) - KB(size);
619                         break;
620                 case SNB_GMCH_GMS_STOLEN_288M:
621                         gtt_entries = MB(288) - KB(size);
622                         break;
623                 case SNB_GMCH_GMS_STOLEN_320M:
624                         gtt_entries = MB(320) - KB(size);
625                         break;
626                 case SNB_GMCH_GMS_STOLEN_352M:
627                         gtt_entries = MB(352) - KB(size);
628                         break;
629                 case SNB_GMCH_GMS_STOLEN_384M:
630                         gtt_entries = MB(384) - KB(size);
631                         break;
632                 case SNB_GMCH_GMS_STOLEN_416M:
633                         gtt_entries = MB(416) - KB(size);
634                         break;
635                 case SNB_GMCH_GMS_STOLEN_448M:
636                         gtt_entries = MB(448) - KB(size);
637                         break;
638                 case SNB_GMCH_GMS_STOLEN_480M:
639                         gtt_entries = MB(480) - KB(size);
640                         break;
641                 case SNB_GMCH_GMS_STOLEN_512M:
642                         gtt_entries = MB(512) - KB(size);
643                         break;
644                 }
645         } else {
646                 switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
647                 case I855_GMCH_GMS_STOLEN_1M:
648                         gtt_entries = MB(1) - KB(size);
649                         break;
650                 case I855_GMCH_GMS_STOLEN_4M:
651                         gtt_entries = MB(4) - KB(size);
652                         break;
653                 case I855_GMCH_GMS_STOLEN_8M:
654                         gtt_entries = MB(8) - KB(size);
655                         break;
656                 case I855_GMCH_GMS_STOLEN_16M:
657                         gtt_entries = MB(16) - KB(size);
658                         break;
659                 case I855_GMCH_GMS_STOLEN_32M:
660                         gtt_entries = MB(32) - KB(size);
661                         break;
662                 case I915_GMCH_GMS_STOLEN_48M:
663                         /* Check it's really I915G */
664                         if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
665                                 gtt_entries = MB(48) - KB(size);
666                         else
667                                 gtt_entries = 0;
668                         break;
669                 case I915_GMCH_GMS_STOLEN_64M:
670                         /* Check it's really I915G */
671                         if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
672                                 gtt_entries = MB(64) - KB(size);
673                         else
674                                 gtt_entries = 0;
675                         break;
676                 case G33_GMCH_GMS_STOLEN_128M:
677                         if (IS_G33 || IS_I965 || IS_G4X)
678                                 gtt_entries = MB(128) - KB(size);
679                         else
680                                 gtt_entries = 0;
681                         break;
682                 case G33_GMCH_GMS_STOLEN_256M:
683                         if (IS_G33 || IS_I965 || IS_G4X)
684                                 gtt_entries = MB(256) - KB(size);
685                         else
686                                 gtt_entries = 0;
687                         break;
688                 case INTEL_GMCH_GMS_STOLEN_96M:
689                         if (IS_I965 || IS_G4X)
690                                 gtt_entries = MB(96) - KB(size);
691                         else
692                                 gtt_entries = 0;
693                         break;
694                 case INTEL_GMCH_GMS_STOLEN_160M:
695                         if (IS_I965 || IS_G4X)
696                                 gtt_entries = MB(160) - KB(size);
697                         else
698                                 gtt_entries = 0;
699                         break;
700                 case INTEL_GMCH_GMS_STOLEN_224M:
701                         if (IS_I965 || IS_G4X)
702                                 gtt_entries = MB(224) - KB(size);
703                         else
704                                 gtt_entries = 0;
705                         break;
706                 case INTEL_GMCH_GMS_STOLEN_352M:
707                         if (IS_I965 || IS_G4X)
708                                 gtt_entries = MB(352) - KB(size);
709                         else
710                                 gtt_entries = 0;
711                         break;
712                 default:
713                         gtt_entries = 0;
714                         break;
715                 }
716         }
717         if (!local && gtt_entries > intel_max_stolen) {
718                 dev_info(&agp_bridge->dev->dev,
719                          "detected %dK stolen memory, trimming to %dK\n",
720                          gtt_entries / KB(1), intel_max_stolen / KB(1));
721                 gtt_entries = intel_max_stolen / KB(4);
722         } else if (gtt_entries > 0) {
723                 dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n",
724                        gtt_entries / KB(1), local ? "local" : "stolen");
725                 gtt_entries /= KB(4);
726         } else {
727                 dev_info(&agp_bridge->dev->dev,
728                        "no pre-allocated video memory detected\n");
729                 gtt_entries = 0;
730         }
731
732         intel_private.gtt_entries = gtt_entries;
733 }
734
735 static void intel_i830_fini_flush(void)
736 {
737         kunmap(intel_private.i8xx_page);
738         intel_private.i8xx_flush_page = NULL;
739         unmap_page_from_agp(intel_private.i8xx_page);
740
741         __free_page(intel_private.i8xx_page);
742         intel_private.i8xx_page = NULL;
743 }
744
745 static void intel_i830_setup_flush(void)
746 {
747         /* return if we've already set the flush mechanism up */
748         if (intel_private.i8xx_page)
749                 return;
750
751         intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
752         if (!intel_private.i8xx_page)
753                 return;
754
755         intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
756         if (!intel_private.i8xx_flush_page)
757                 intel_i830_fini_flush();
758 }
759
760 /* The chipset_flush interface needs to get data that has already been
761  * flushed out of the CPU all the way out to main memory, because the GPU
762  * doesn't snoop those buffers.
763  *
764  * The 8xx series doesn't have the same lovely interface for flushing the
765  * chipset write buffers that the later chips do. According to the 865
766  * specs, it's 64 octwords, or 1KB.  So, to get those previous things in
767  * that buffer out, we just fill 1KB and clflush it out, on the assumption
768  * that it'll push whatever was in there out.  It appears to work.
769  */
770 static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
771 {
772         unsigned int *pg = intel_private.i8xx_flush_page;
773
774         memset(pg, 0, 1024);
775
776         if (cpu_has_clflush)
777                 clflush_cache_range(pg, 1024);
778         else if (wbinvd_on_all_cpus() != 0)
779                 printk(KERN_ERR "Timed out waiting for cache flush.\n");
780 }
781
782 /* The intel i830 automatically initializes the agp aperture during POST.
783  * Use the memory already set aside for in the GTT.
784  */
785 static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge)
786 {
787         int page_order;
788         struct aper_size_info_fixed *size;
789         int num_entries;
790         u32 temp;
791
792         size = agp_bridge->current_size;
793         page_order = size->page_order;
794         num_entries = size->num_entries;
795         agp_bridge->gatt_table_real = NULL;
796
797         pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
798         temp &= 0xfff80000;
799
800         intel_private.registers = ioremap(temp, 128 * 4096);
801         if (!intel_private.registers)
802                 return -ENOMEM;
803
804         temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
805         global_cache_flush();   /* FIXME: ?? */
806
807         /* we have to call this as early as possible after the MMIO base address is known */
808         intel_i830_init_gtt_entries();
809
810         agp_bridge->gatt_table = NULL;
811
812         agp_bridge->gatt_bus_addr = temp;
813
814         return 0;
815 }
816
817 /* Return the gatt table to a sane state. Use the top of stolen
818  * memory for the GTT.
819  */
820 static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge)
821 {
822         return 0;
823 }
824
825 static int intel_i830_fetch_size(void)
826 {
827         u16 gmch_ctrl;
828         struct aper_size_info_fixed *values;
829
830         values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
831
832         if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB &&
833             agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) {
834                 /* 855GM/852GM/865G has 128MB aperture size */
835                 agp_bridge->current_size = (void *) values;
836                 agp_bridge->aperture_size_idx = 0;
837                 return values[0].size;
838         }
839
840         pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
841
842         if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
843                 agp_bridge->current_size = (void *) values;
844                 agp_bridge->aperture_size_idx = 0;
845                 return values[0].size;
846         } else {
847                 agp_bridge->current_size = (void *) (values + 1);
848                 agp_bridge->aperture_size_idx = 1;
849                 return values[1].size;
850         }
851
852         return 0;
853 }
854
855 static int intel_i830_configure(void)
856 {
857         struct aper_size_info_fixed *current_size;
858         u32 temp;
859         u16 gmch_ctrl;
860         int i;
861
862         current_size = A_SIZE_FIX(agp_bridge->current_size);
863
864         pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
865         agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
866
867         pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
868         gmch_ctrl |= I830_GMCH_ENABLED;
869         pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
870
871         writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
872         readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
873
874         if (agp_bridge->driver->needs_scratch_page) {
875                 for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) {
876                         writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
877                 }
878                 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */
879         }
880
881         global_cache_flush();
882
883         intel_i830_setup_flush();
884         return 0;
885 }
886
887 static void intel_i830_cleanup(void)
888 {
889         iounmap(intel_private.registers);
890 }
891
892 static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start,
893                                      int type)
894 {
895         int i, j, num_entries;
896         void *temp;
897         int ret = -EINVAL;
898         int mask_type;
899
900         if (mem->page_count == 0)
901                 goto out;
902
903         temp = agp_bridge->current_size;
904         num_entries = A_SIZE_FIX(temp)->num_entries;
905
906         if (pg_start < intel_private.gtt_entries) {
907                 dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
908                            "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
909                            pg_start, intel_private.gtt_entries);
910
911                 dev_info(&intel_private.pcidev->dev,
912                          "trying to insert into local/stolen memory\n");
913                 goto out_err;
914         }
915
916         if ((pg_start + mem->page_count) > num_entries)
917                 goto out_err;
918
919         /* The i830 can't check the GTT for entries since its read only,
920          * depend on the caller to make the correct offset decisions.
921          */
922
923         if (type != mem->type)
924                 goto out_err;
925
926         mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
927
928         if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
929             mask_type != INTEL_AGP_CACHED_MEMORY)
930                 goto out_err;
931
932         if (!mem->is_flushed)
933                 global_cache_flush();
934
935         for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
936                 writel(agp_bridge->driver->mask_memory(agp_bridge,
937                                 page_to_phys(mem->pages[i]), mask_type),
938                        intel_private.registers+I810_PTE_BASE+(j*4));
939         }
940         readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
941
942 out:
943         ret = 0;
944 out_err:
945         mem->is_flushed = true;
946         return ret;
947 }
948
949 static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start,
950                                      int type)
951 {
952         int i;
953
954         if (mem->page_count == 0)
955                 return 0;
956
957         if (pg_start < intel_private.gtt_entries) {
958                 dev_info(&intel_private.pcidev->dev,
959                          "trying to disable local/stolen memory\n");
960                 return -EINVAL;
961         }
962
963         for (i = pg_start; i < (mem->page_count + pg_start); i++) {
964                 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
965         }
966         readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
967
968         return 0;
969 }
970
971 static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type)
972 {
973         if (type == AGP_PHYS_MEMORY)
974                 return alloc_agpphysmem_i8xx(pg_count, type);
975         /* always return NULL for other allocation types for now */
976         return NULL;
977 }
978
979 static int intel_alloc_chipset_flush_resource(void)
980 {
981         int ret;
982         ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
983                                      PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
984                                      pcibios_align_resource, agp_bridge->dev);
985
986         return ret;
987 }
988
989 static void intel_i915_setup_chipset_flush(void)
990 {
991         int ret;
992         u32 temp;
993
994         pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp);
995         if (!(temp & 0x1)) {
996                 intel_alloc_chipset_flush_resource();
997                 intel_private.resource_valid = 1;
998                 pci_write_config_dword(agp_bridge->dev, I915_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
999         } else {
1000                 temp &= ~1;
1001
1002                 intel_private.resource_valid = 1;
1003                 intel_private.ifp_resource.start = temp;
1004                 intel_private.ifp_resource.end = temp + PAGE_SIZE;
1005                 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1006                 /* some BIOSes reserve this area in a pnp some don't */
1007                 if (ret)
1008                         intel_private.resource_valid = 0;
1009         }
1010 }
1011
1012 static void intel_i965_g33_setup_chipset_flush(void)
1013 {
1014         u32 temp_hi, temp_lo;
1015         int ret;
1016
1017         pci_read_config_dword(agp_bridge->dev, I965_IFPADDR + 4, &temp_hi);
1018         pci_read_config_dword(agp_bridge->dev, I965_IFPADDR, &temp_lo);
1019
1020         if (!(temp_lo & 0x1)) {
1021
1022                 intel_alloc_chipset_flush_resource();
1023
1024                 intel_private.resource_valid = 1;
1025                 pci_write_config_dword(agp_bridge->dev, I965_IFPADDR + 4,
1026                         upper_32_bits(intel_private.ifp_resource.start));
1027                 pci_write_config_dword(agp_bridge->dev, I965_IFPADDR, (intel_private.ifp_resource.start & 0xffffffff) | 0x1);
1028         } else {
1029                 u64 l64;
1030
1031                 temp_lo &= ~0x1;
1032                 l64 = ((u64)temp_hi << 32) | temp_lo;
1033
1034                 intel_private.resource_valid = 1;
1035                 intel_private.ifp_resource.start = l64;
1036                 intel_private.ifp_resource.end = l64 + PAGE_SIZE;
1037                 ret = request_resource(&iomem_resource, &intel_private.ifp_resource);
1038                 /* some BIOSes reserve this area in a pnp some don't */
1039                 if (ret)
1040                         intel_private.resource_valid = 0;
1041         }
1042 }
1043
1044 static void intel_i9xx_setup_flush(void)
1045 {
1046         /* return if already configured */
1047         if (intel_private.ifp_resource.start)
1048                 return;
1049
1050         if (IS_SNB)
1051                 return;
1052
1053         /* setup a resource for this object */
1054         intel_private.ifp_resource.name = "Intel Flush Page";
1055         intel_private.ifp_resource.flags = IORESOURCE_MEM;
1056
1057         /* Setup chipset flush for 915 */
1058         if (IS_I965 || IS_G33 || IS_G4X) {
1059                 intel_i965_g33_setup_chipset_flush();
1060         } else {
1061                 intel_i915_setup_chipset_flush();
1062         }
1063
1064         if (intel_private.ifp_resource.start)
1065                 intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE);
1066         if (!intel_private.i9xx_flush_page)
1067                 dev_err(&intel_private.pcidev->dev,
1068                         "can't ioremap flush page - no chipset flushing\n");
1069 }
1070
1071 static int intel_i9xx_configure(void)
1072 {
1073         struct aper_size_info_fixed *current_size;
1074         u32 temp;
1075         u16 gmch_ctrl;
1076         int i;
1077
1078         current_size = A_SIZE_FIX(agp_bridge->current_size);
1079
1080         pci_read_config_dword(intel_private.pcidev, I915_GMADDR, &temp);
1081
1082         agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1083
1084         pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
1085         gmch_ctrl |= I830_GMCH_ENABLED;
1086         pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
1087
1088         writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
1089         readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
1090
1091         if (agp_bridge->driver->needs_scratch_page) {
1092                 for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) {
1093                         writel(agp_bridge->scratch_page, intel_private.gtt+i);
1094                 }
1095                 readl(intel_private.gtt+i-1);   /* PCI Posting. */
1096         }
1097
1098         global_cache_flush();
1099
1100         intel_i9xx_setup_flush();
1101
1102         return 0;
1103 }
1104
1105 static void intel_i915_cleanup(void)
1106 {
1107         if (intel_private.i9xx_flush_page)
1108                 iounmap(intel_private.i9xx_flush_page);
1109         if (intel_private.resource_valid)
1110                 release_resource(&intel_private.ifp_resource);
1111         intel_private.ifp_resource.start = 0;
1112         intel_private.resource_valid = 0;
1113         iounmap(intel_private.gtt);
1114         iounmap(intel_private.registers);
1115 }
1116
1117 static void intel_i915_chipset_flush(struct agp_bridge_data *bridge)
1118 {
1119         if (intel_private.i9xx_flush_page)
1120                 writel(1, intel_private.i9xx_flush_page);
1121 }
1122
1123 static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start,
1124                                      int type)
1125 {
1126         int num_entries;
1127         void *temp;
1128         int ret = -EINVAL;
1129         int mask_type;
1130
1131         if (mem->page_count == 0)
1132                 goto out;
1133
1134         temp = agp_bridge->current_size;
1135         num_entries = A_SIZE_FIX(temp)->num_entries;
1136
1137         if (pg_start < intel_private.gtt_entries) {
1138                 dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
1139                            "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
1140                            pg_start, intel_private.gtt_entries);
1141
1142                 dev_info(&intel_private.pcidev->dev,
1143                          "trying to insert into local/stolen memory\n");
1144                 goto out_err;
1145         }
1146
1147         if ((pg_start + mem->page_count) > num_entries)
1148                 goto out_err;
1149
1150         /* The i915 can't check the GTT for entries since it's read only;
1151          * depend on the caller to make the correct offset decisions.
1152          */
1153
1154         if (type != mem->type)
1155                 goto out_err;
1156
1157         mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
1158
1159         if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
1160             mask_type != INTEL_AGP_CACHED_MEMORY)
1161                 goto out_err;
1162
1163         if (!mem->is_flushed)
1164                 global_cache_flush();
1165
1166         intel_agp_insert_sg_entries(mem, pg_start, mask_type);
1167
1168  out:
1169         ret = 0;
1170  out_err:
1171         mem->is_flushed = true;
1172         return ret;
1173 }
1174
1175 static int intel_i915_remove_entries(struct agp_memory *mem, off_t pg_start,
1176                                      int type)
1177 {
1178         int i;
1179
1180         if (mem->page_count == 0)
1181                 return 0;
1182
1183         if (pg_start < intel_private.gtt_entries) {
1184                 dev_info(&intel_private.pcidev->dev,
1185                          "trying to disable local/stolen memory\n");
1186                 return -EINVAL;
1187         }
1188
1189         for (i = pg_start; i < (mem->page_count + pg_start); i++)
1190                 writel(agp_bridge->scratch_page, intel_private.gtt+i);
1191
1192         readl(intel_private.gtt+i-1);
1193
1194         return 0;
1195 }
1196
1197 /* Return the aperture size by just checking the resource length.  The effect
1198  * described in the spec of the MSAC registers is just changing of the
1199  * resource size.
1200  */
1201 static int intel_i9xx_fetch_size(void)
1202 {
1203         int num_sizes = ARRAY_SIZE(intel_i830_sizes);
1204         int aper_size; /* size in megabytes */
1205         int i;
1206
1207         aper_size = pci_resource_len(intel_private.pcidev, 2) / MB(1);
1208
1209         for (i = 0; i < num_sizes; i++) {
1210                 if (aper_size == intel_i830_sizes[i].size) {
1211                         agp_bridge->current_size = intel_i830_sizes + i;
1212                         return aper_size;
1213                 }
1214         }
1215
1216         return 0;
1217 }
1218
1219 static int intel_i915_get_gtt_size(void)
1220 {
1221         int size;
1222
1223         if (IS_G33) {
1224                 u16 gmch_ctrl;
1225
1226                 /* G33's GTT size defined in gmch_ctrl */
1227                 pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
1228                 switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
1229                 case I830_GMCH_GMS_STOLEN_512:
1230                         size = 512;
1231                         break;
1232                 case I830_GMCH_GMS_STOLEN_1024:
1233                         size = 1024;
1234                         break;
1235                 case I830_GMCH_GMS_STOLEN_8192:
1236                         size = 8*1024;
1237                         break;
1238                 default:
1239                         dev_info(&agp_bridge->dev->dev,
1240                                  "unknown page table size 0x%x, assuming 512KB\n",
1241                                 (gmch_ctrl & I830_GMCH_GMS_MASK));
1242                         size = 512;
1243                 }
1244         } else {
1245                 /* On previous hardware, the GTT size was just what was
1246                  * required to map the aperture.
1247                  */
1248                 size = agp_bridge->driver->fetch_size();
1249         }
1250
1251         return KB(size);
1252 }
1253
1254 /* The intel i915 automatically initializes the agp aperture during POST.
1255  * Use the memory already set aside for in the GTT.
1256  */
1257 static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
1258 {
1259         int page_order;
1260         struct aper_size_info_fixed *size;
1261         int num_entries;
1262         u32 temp, temp2;
1263         int gtt_map_size;
1264
1265         size = agp_bridge->current_size;
1266         page_order = size->page_order;
1267         num_entries = size->num_entries;
1268         agp_bridge->gatt_table_real = NULL;
1269
1270         pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
1271         pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2);
1272
1273         gtt_map_size = intel_i915_get_gtt_size();
1274
1275         intel_private.gtt = ioremap(temp2, gtt_map_size);
1276         if (!intel_private.gtt)
1277                 return -ENOMEM;
1278
1279         intel_private.gtt_total_size = gtt_map_size / 4;
1280
1281         temp &= 0xfff80000;
1282
1283         intel_private.registers = ioremap(temp, 128 * 4096);
1284         if (!intel_private.registers) {
1285                 iounmap(intel_private.gtt);
1286                 return -ENOMEM;
1287         }
1288
1289         temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
1290         global_cache_flush();   /* FIXME: ? */
1291
1292         /* we have to call this as early as possible after the MMIO base address is known */
1293         intel_i830_init_gtt_entries();
1294
1295         agp_bridge->gatt_table = NULL;
1296
1297         agp_bridge->gatt_bus_addr = temp;
1298
1299         return 0;
1300 }
1301
1302 /*
1303  * The i965 supports 36-bit physical addresses, but to keep
1304  * the format of the GTT the same, the bits that don't fit
1305  * in a 32-bit word are shifted down to bits 4..7.
1306  *
1307  * Gcc is smart enough to notice that "(addr >> 28) & 0xf0"
1308  * is always zero on 32-bit architectures, so no need to make
1309  * this conditional.
1310  */
1311 static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
1312                                             dma_addr_t addr, int type)
1313 {
1314         /* Shift high bits down */
1315         addr |= (addr >> 28) & 0xf0;
1316
1317         /* Type checking must be done elsewhere */
1318         return addr | bridge->driver->masks[type].mask;
1319 }
1320
1321 static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
1322 {
1323         u16 snb_gmch_ctl;
1324
1325         switch (agp_bridge->dev->device) {
1326         case PCI_DEVICE_ID_INTEL_GM45_HB:
1327         case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB:
1328         case PCI_DEVICE_ID_INTEL_Q45_HB:
1329         case PCI_DEVICE_ID_INTEL_G45_HB:
1330         case PCI_DEVICE_ID_INTEL_G41_HB:
1331         case PCI_DEVICE_ID_INTEL_B43_HB:
1332         case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB:
1333         case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB:
1334         case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB:
1335         case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB:
1336                 *gtt_offset = *gtt_size = MB(2);
1337                 break;
1338         case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
1339         case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
1340                 *gtt_offset = MB(2);
1341
1342                 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
1343                 switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
1344                 default:
1345                 case SNB_GTT_SIZE_0M:
1346                         printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
1347                         *gtt_size = MB(0);
1348                         break;
1349                 case SNB_GTT_SIZE_1M:
1350                         *gtt_size = MB(1);
1351                         break;
1352                 case SNB_GTT_SIZE_2M:
1353                         *gtt_size = MB(2);
1354                         break;
1355                 }
1356                 break;
1357         default:
1358                 *gtt_offset = *gtt_size = KB(512);
1359         }
1360 }
1361
1362 /* The intel i965 automatically initializes the agp aperture during POST.
1363  * Use the memory already set aside for in the GTT.
1364  */
1365 static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
1366 {
1367         int page_order;
1368         struct aper_size_info_fixed *size;
1369         int num_entries;
1370         u32 temp;
1371         int gtt_offset, gtt_size;
1372
1373         size = agp_bridge->current_size;
1374         page_order = size->page_order;
1375         num_entries = size->num_entries;
1376         agp_bridge->gatt_table_real = NULL;
1377
1378         pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp);
1379
1380         temp &= 0xfff00000;
1381
1382         intel_i965_get_gtt_range(&gtt_offset, &gtt_size);
1383
1384         intel_private.gtt = ioremap((temp + gtt_offset) , gtt_size);
1385
1386         if (!intel_private.gtt)
1387                 return -ENOMEM;
1388
1389         intel_private.gtt_total_size = gtt_size / 4;
1390
1391         intel_private.registers = ioremap(temp, 128 * 4096);
1392         if (!intel_private.registers) {
1393                 iounmap(intel_private.gtt);
1394                 return -ENOMEM;
1395         }
1396
1397         temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
1398         global_cache_flush();   /* FIXME: ? */
1399
1400         /* we have to call this as early as possible after the MMIO base address is known */
1401         intel_i830_init_gtt_entries();
1402
1403         agp_bridge->gatt_table = NULL;
1404
1405         agp_bridge->gatt_bus_addr = temp;
1406
1407         return 0;
1408 }
1409
1410 static const struct agp_bridge_driver intel_810_driver = {
1411         .owner                  = THIS_MODULE,
1412         .aperture_sizes         = intel_i810_sizes,
1413         .size_type              = FIXED_APER_SIZE,
1414         .num_aperture_sizes     = 2,
1415         .needs_scratch_page     = true,
1416         .configure              = intel_i810_configure,
1417         .fetch_size             = intel_i810_fetch_size,
1418         .cleanup                = intel_i810_cleanup,
1419         .mask_memory            = intel_i810_mask_memory,
1420         .masks                  = intel_i810_masks,
1421         .agp_enable             = intel_i810_agp_enable,
1422         .cache_flush            = global_cache_flush,
1423         .create_gatt_table      = agp_generic_create_gatt_table,
1424         .free_gatt_table        = agp_generic_free_gatt_table,
1425         .insert_memory          = intel_i810_insert_entries,
1426         .remove_memory          = intel_i810_remove_entries,
1427         .alloc_by_type          = intel_i810_alloc_by_type,
1428         .free_by_type           = intel_i810_free_by_type,
1429         .agp_alloc_page         = agp_generic_alloc_page,
1430         .agp_alloc_pages        = agp_generic_alloc_pages,
1431         .agp_destroy_page       = agp_generic_destroy_page,
1432         .agp_destroy_pages      = agp_generic_destroy_pages,
1433         .agp_type_to_mask_type  = agp_generic_type_to_mask_type,
1434 };
1435
1436 static const struct agp_bridge_driver intel_830_driver = {
1437         .owner                  = THIS_MODULE,
1438         .aperture_sizes         = intel_i830_sizes,
1439         .size_type              = FIXED_APER_SIZE,
1440         .num_aperture_sizes     = 4,
1441         .needs_scratch_page     = true,
1442         .configure              = intel_i830_configure,
1443         .fetch_size             = intel_i830_fetch_size,
1444         .cleanup                = intel_i830_cleanup,
1445         .mask_memory            = intel_i810_mask_memory,
1446         .masks                  = intel_i810_masks,
1447         .agp_enable             = intel_i810_agp_enable,
1448         .cache_flush            = global_cache_flush,
1449         .create_gatt_table      = intel_i830_create_gatt_table,
1450         .free_gatt_table        = intel_i830_free_gatt_table,
1451         .insert_memory          = intel_i830_insert_entries,
1452         .remove_memory          = intel_i830_remove_entries,
1453         .alloc_by_type          = intel_i830_alloc_by_type,
1454         .free_by_type           = intel_i810_free_by_type,
1455         .agp_alloc_page         = agp_generic_alloc_page,
1456         .agp_alloc_pages        = agp_generic_alloc_pages,
1457         .agp_destroy_page       = agp_generic_destroy_page,
1458         .agp_destroy_pages      = agp_generic_destroy_pages,
1459         .agp_type_to_mask_type  = intel_i830_type_to_mask_type,
1460         .chipset_flush          = intel_i830_chipset_flush,
1461 };
1462
1463 static const struct agp_bridge_driver intel_915_driver = {
1464         .owner                  = THIS_MODULE,
1465         .aperture_sizes         = intel_i830_sizes,
1466         .size_type              = FIXED_APER_SIZE,
1467         .num_aperture_sizes     = 4,
1468         .needs_scratch_page     = true,
1469         .configure              = intel_i9xx_configure,
1470         .fetch_size             = intel_i9xx_fetch_size,
1471         .cleanup                = intel_i915_cleanup,
1472         .mask_memory            = intel_i810_mask_memory,
1473         .masks                  = intel_i810_masks,
1474         .agp_enable             = intel_i810_agp_enable,
1475         .cache_flush            = global_cache_flush,
1476         .create_gatt_table      = intel_i915_create_gatt_table,
1477         .free_gatt_table        = intel_i830_free_gatt_table,
1478         .insert_memory          = intel_i915_insert_entries,
1479         .remove_memory          = intel_i915_remove_entries,
1480         .alloc_by_type          = intel_i830_alloc_by_type,
1481         .free_by_type           = intel_i810_free_by_type,
1482         .agp_alloc_page         = agp_generic_alloc_page,
1483         .agp_alloc_pages        = agp_generic_alloc_pages,
1484         .agp_destroy_page       = agp_generic_destroy_page,
1485         .agp_destroy_pages      = agp_generic_destroy_pages,
1486         .agp_type_to_mask_type  = intel_i830_type_to_mask_type,
1487         .chipset_flush          = intel_i915_chipset_flush,
1488 #ifdef USE_PCI_DMA_API
1489         .agp_map_page           = intel_agp_map_page,
1490         .agp_unmap_page         = intel_agp_unmap_page,
1491         .agp_map_memory         = intel_agp_map_memory,
1492         .agp_unmap_memory       = intel_agp_unmap_memory,
1493 #endif
1494 };
1495
1496 static const struct agp_bridge_driver intel_i965_driver = {
1497         .owner                  = THIS_MODULE,
1498         .aperture_sizes         = intel_i830_sizes,
1499         .size_type              = FIXED_APER_SIZE,
1500         .num_aperture_sizes     = 4,
1501         .needs_scratch_page     = true,
1502         .configure              = intel_i9xx_configure,
1503         .fetch_size             = intel_i9xx_fetch_size,
1504         .cleanup                = intel_i915_cleanup,
1505         .mask_memory            = intel_i965_mask_memory,
1506         .masks                  = intel_i810_masks,
1507         .agp_enable             = intel_i810_agp_enable,
1508         .cache_flush            = global_cache_flush,
1509         .create_gatt_table      = intel_i965_create_gatt_table,
1510         .free_gatt_table        = intel_i830_free_gatt_table,
1511         .insert_memory          = intel_i915_insert_entries,
1512         .remove_memory          = intel_i915_remove_entries,
1513         .alloc_by_type          = intel_i830_alloc_by_type,
1514         .free_by_type           = intel_i810_free_by_type,
1515         .agp_alloc_page         = agp_generic_alloc_page,
1516         .agp_alloc_pages        = agp_generic_alloc_pages,
1517         .agp_destroy_page       = agp_generic_destroy_page,
1518         .agp_destroy_pages      = agp_generic_destroy_pages,
1519         .agp_type_to_mask_type  = intel_i830_type_to_mask_type,
1520         .chipset_flush          = intel_i915_chipset_flush,
1521 #ifdef USE_PCI_DMA_API
1522         .agp_map_page           = intel_agp_map_page,
1523         .agp_unmap_page         = intel_agp_unmap_page,
1524         .agp_map_memory         = intel_agp_map_memory,
1525         .agp_unmap_memory       = intel_agp_unmap_memory,
1526 #endif
1527 };
1528
1529 static const struct agp_bridge_driver intel_g33_driver = {
1530         .owner                  = THIS_MODULE,
1531         .aperture_sizes         = intel_i830_sizes,
1532         .size_type              = FIXED_APER_SIZE,
1533         .num_aperture_sizes     = 4,
1534         .needs_scratch_page     = true,
1535         .configure              = intel_i9xx_configure,
1536         .fetch_size             = intel_i9xx_fetch_size,
1537         .cleanup                = intel_i915_cleanup,
1538         .mask_memory            = intel_i965_mask_memory,
1539         .masks                  = intel_i810_masks,
1540         .agp_enable             = intel_i810_agp_enable,
1541         .cache_flush            = global_cache_flush,
1542         .create_gatt_table      = intel_i915_create_gatt_table,
1543         .free_gatt_table        = intel_i830_free_gatt_table,
1544         .insert_memory          = intel_i915_insert_entries,
1545         .remove_memory          = intel_i915_remove_entries,
1546         .alloc_by_type          = intel_i830_alloc_by_type,
1547         .free_by_type           = intel_i810_free_by_type,
1548         .agp_alloc_page         = agp_generic_alloc_page,
1549         .agp_alloc_pages        = agp_generic_alloc_pages,
1550         .agp_destroy_page       = agp_generic_destroy_page,
1551         .agp_destroy_pages      = agp_generic_destroy_pages,
1552         .agp_type_to_mask_type  = intel_i830_type_to_mask_type,
1553         .chipset_flush          = intel_i915_chipset_flush,
1554 #ifdef USE_PCI_DMA_API
1555         .agp_map_page           = intel_agp_map_page,
1556         .agp_unmap_page         = intel_agp_unmap_page,
1557         .agp_map_memory         = intel_agp_map_memory,
1558         .agp_unmap_memory       = intel_agp_unmap_memory,
1559 #endif
1560 };