]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - arch/ia64/sn/kernel/io_init.c
[ACPI] merge 3549 4320 4485 4588 4980 5483 5651 acpica asus fops pnpacpi branches...
[karo-tx-linux.git] / arch / ia64 / sn / kernel / io_init.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1992 - 1997, 2000-2005 Silicon Graphics, Inc. All rights reserved.
7  */
8
9 #include <linux/bootmem.h>
10 #include <linux/nodemask.h>
11 #include <asm/sn/types.h>
12 #include <asm/sn/addrs.h>
13 #include <asm/sn/geo.h>
14 #include <asm/sn/io.h>
15 #include <asm/sn/pcibr_provider.h>
16 #include <asm/sn/pcibus_provider_defs.h>
17 #include <asm/sn/pcidev.h>
18 #include <asm/sn/simulator.h>
19 #include <asm/sn/sn_sal.h>
20 #include <asm/sn/tioca_provider.h>
21 #include <asm/sn/tioce_provider.h>
22 #include "xtalk/hubdev.h"
23 #include "xtalk/xwidgetdev.h"
24
25 static struct list_head sn_sysdata_list;
26
27 /* sysdata list struct */
28 struct sysdata_el {
29         struct list_head entry;
30         void *sysdata;
31 };
32
33 struct slab_info {
34         struct hubdev_info hubdev;
35 };
36
37 struct brick {
38         moduleid_t id;          /* Module ID of this module        */
39         struct slab_info slab_info[MAX_SLABS + 1];
40 };
41
42 int sn_ioif_inited = 0;         /* SN I/O infrastructure initialized? */
43
44 struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES];       /* indexed by asic type */
45
46 static int max_segment_number = 0; /* Default highest segment number */
47 static int max_pcibus_number = 255; /* Default highest pci bus number */
48
49 /*
50  * Hooks and struct for unsupported pci providers
51  */
52
53 static dma_addr_t
54 sn_default_pci_map(struct pci_dev *pdev, unsigned long paddr, size_t size)
55 {
56         return 0;
57 }
58
59 static void
60 sn_default_pci_unmap(struct pci_dev *pdev, dma_addr_t addr, int direction)
61 {
62         return;
63 }
64
65 static void *
66 sn_default_pci_bus_fixup(struct pcibus_bussoft *soft, struct pci_controller *controller)
67 {
68         return NULL;
69 }
70
71 static struct sn_pcibus_provider sn_pci_default_provider = {
72         .dma_map = sn_default_pci_map,
73         .dma_map_consistent = sn_default_pci_map,
74         .dma_unmap = sn_default_pci_unmap,
75         .bus_fixup = sn_default_pci_bus_fixup,
76 };
77
78 /*
79  * Retrieve the DMA Flush List given nasid, widget, and device.
80  * This list is needed to implement the WAR - Flush DMA data on PIO Reads.
81  */
82 static inline u64
83 sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num,
84                              u64 address)
85 {
86
87         struct ia64_sal_retval ret_stuff;
88         ret_stuff.status = 0;
89         ret_stuff.v0 = 0;
90
91         SAL_CALL_NOLOCK(ret_stuff,
92                         (u64) SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST,
93                         (u64) nasid, (u64) widget_num,
94                         (u64) device_num, (u64) address, 0, 0, 0);
95         return ret_stuff.status;
96
97 }
98
99 /*
100  * Retrieve the hub device info structure for the given nasid.
101  */
102 static inline u64 sal_get_hubdev_info(u64 handle, u64 address)
103 {
104
105         struct ia64_sal_retval ret_stuff;
106         ret_stuff.status = 0;
107         ret_stuff.v0 = 0;
108
109         SAL_CALL_NOLOCK(ret_stuff,
110                         (u64) SN_SAL_IOIF_GET_HUBDEV_INFO,
111                         (u64) handle, (u64) address, 0, 0, 0, 0, 0);
112         return ret_stuff.v0;
113 }
114
115 /*
116  * Retrieve the pci bus information given the bus number.
117  */
118 static inline u64 sal_get_pcibus_info(u64 segment, u64 busnum, u64 address)
119 {
120
121         struct ia64_sal_retval ret_stuff;
122         ret_stuff.status = 0;
123         ret_stuff.v0 = 0;
124
125         SAL_CALL_NOLOCK(ret_stuff,
126                         (u64) SN_SAL_IOIF_GET_PCIBUS_INFO,
127                         (u64) segment, (u64) busnum, (u64) address, 0, 0, 0, 0);
128         return ret_stuff.v0;
129 }
130
131 /*
132  * Retrieve the pci device information given the bus and device|function number.
133  */
134 static inline u64
135 sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev,
136                     u64 sn_irq_info)
137 {
138         struct ia64_sal_retval ret_stuff;
139         ret_stuff.status = 0;
140         ret_stuff.v0 = 0;
141
142         SAL_CALL_NOLOCK(ret_stuff,
143                         (u64) SN_SAL_IOIF_GET_PCIDEV_INFO,
144                         (u64) segment, (u64) bus_number, (u64) devfn,
145                         (u64) pci_dev,
146                         sn_irq_info, 0, 0);
147         return ret_stuff.v0;
148 }
149
150 /*
151  * sn_pcidev_info_get() - Retrieve the pcidev_info struct for the specified
152  *                        device.
153  */
154 inline struct pcidev_info *
155 sn_pcidev_info_get(struct pci_dev *dev)
156 {
157         struct pcidev_info *pcidev;
158
159         list_for_each_entry(pcidev,
160                             &(SN_PCI_CONTROLLER(dev)->pcidev_info), pdi_list) {
161                 if (pcidev->pdi_linux_pcidev == dev) {
162                         return pcidev;
163                 }
164         }
165         return NULL;
166 }
167
168 /* Older PROM flush WAR
169  *
170  * 01/16/06 -- This war will be in place until a new official PROM is released.
171  * Additionally note that the struct sn_flush_device_war also has to be
172  * removed from arch/ia64/sn/include/xtalk/hubdev.h
173  */
174 static u8 war_implemented = 0;
175
176 static void sn_device_fixup_war(u64 nasid, u64 widget, int device,
177                                 struct sn_flush_device_common *common)
178 {
179         struct sn_flush_device_war *war_list;
180         struct sn_flush_device_war *dev_entry;
181         struct ia64_sal_retval isrv = {0,0,0,0};
182
183         if (!war_implemented) {
184                 printk(KERN_WARNING "PROM version < 4.50 -- implementing old "
185                        "PROM flush WAR\n");
186                 war_implemented = 1;
187         }
188
189         war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL);
190         if (!war_list)
191                 BUG();
192
193         SAL_CALL_NOLOCK(isrv, SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST,
194                         nasid, widget, __pa(war_list), 0, 0, 0 ,0);
195         if (isrv.status)
196                 panic("sn_device_fixup_war failed: %s\n",
197                       ia64_sal_strerror(isrv.status));
198
199         dev_entry = war_list + device;
200         memcpy(common,dev_entry, sizeof(*common));
201
202         kfree(war_list);
203 }
204
205 /*
206  * sn_fixup_ionodes() - This routine initializes the HUB data strcuture for
207  *      each node in the system.
208  */
209 static void sn_fixup_ionodes(void)
210 {
211         struct sn_flush_device_kernel *sn_flush_device_kernel;
212         struct sn_flush_device_kernel *dev_entry;
213         struct hubdev_info *hubdev;
214         u64 status;
215         u64 nasid;
216         int i, widget, device;
217
218         /*
219          * Get SGI Specific HUB chipset information.
220          * Inform Prom that this kernel can support domain bus numbering.
221          */
222         for (i = 0; i < num_cnodes; i++) {
223                 hubdev = (struct hubdev_info *)(NODEPDA(i)->pdinfo);
224                 nasid = cnodeid_to_nasid(i);
225                 hubdev->max_segment_number = 0xffffffff;
226                 hubdev->max_pcibus_number = 0xff;
227                 status = sal_get_hubdev_info(nasid, (u64) __pa(hubdev));
228                 if (status)
229                         continue;
230
231                 /* Save the largest Domain and pcibus numbers found. */
232                 if (hubdev->max_segment_number) {
233                         /*
234                          * Dealing with a Prom that supports segments.
235                          */
236                         max_segment_number = hubdev->max_segment_number;
237                         max_pcibus_number = hubdev->max_pcibus_number;
238                 }
239
240                 /* Attach the error interrupt handlers */
241                 if (nasid & 1)
242                         ice_error_init(hubdev);
243                 else
244                         hub_error_init(hubdev);
245
246                 for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++)
247                         hubdev->hdi_xwidget_info[widget].xwi_hubinfo = hubdev;
248
249                 if (!hubdev->hdi_flush_nasid_list.widget_p)
250                         continue;
251
252                 hubdev->hdi_flush_nasid_list.widget_p =
253                     kmalloc((HUB_WIDGET_ID_MAX + 1) *
254                             sizeof(struct sn_flush_device_kernel *),
255                             GFP_KERNEL);
256                 memset(hubdev->hdi_flush_nasid_list.widget_p, 0x0,
257                        (HUB_WIDGET_ID_MAX + 1) *
258                        sizeof(struct sn_flush_device_kernel *));
259
260                 for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) {
261                         sn_flush_device_kernel = kmalloc(DEV_PER_WIDGET *
262                                                          sizeof(struct
263                                                         sn_flush_device_kernel),
264                                                         GFP_KERNEL);
265                         if (!sn_flush_device_kernel)
266                                 BUG();
267                         memset(sn_flush_device_kernel, 0x0,
268                                DEV_PER_WIDGET *
269                                sizeof(struct sn_flush_device_kernel));
270
271                         dev_entry = sn_flush_device_kernel;
272                         for (device = 0; device < DEV_PER_WIDGET;
273                              device++,dev_entry++) {
274                                 dev_entry->common = kmalloc(sizeof(struct
275                                                         sn_flush_device_common),
276                                                             GFP_KERNEL);
277                                 if (!dev_entry->common)
278                                         BUG();
279                                 memset(dev_entry->common, 0x0, sizeof(struct
280                                                        sn_flush_device_common));
281
282                                 status = sal_get_device_dmaflush_list(nasid,
283                                                                         widget,
284                                                                         device,
285                                                       (u64)(dev_entry->common));
286                                 if (status) {
287                                         if (sn_sal_rev() < 0x0450) {
288                                                 /* shortlived WAR for older
289                                                  * PROM images
290                                                  */
291                                                 sn_device_fixup_war(nasid,
292                                                                     widget,
293                                                                     device,
294                                                              dev_entry->common);
295                                         }
296                                         else
297                                                 BUG();
298                                 }
299
300                                 spin_lock_init(&dev_entry->sfdl_flush_lock);
301                         }
302
303                         if (sn_flush_device_kernel)
304                                 hubdev->hdi_flush_nasid_list.widget_p[widget] =
305                                                        sn_flush_device_kernel;
306                 }
307         }
308 }
309
310 /*
311  * sn_pci_window_fixup() - Create a pci_window for each device resource.
312  *                         Until ACPI support is added, we need this code
313  *                         to setup pci_windows for use by
314  *                         pcibios_bus_to_resource(),
315  *                         pcibios_resource_to_bus(), etc.
316  */
317 static void
318 sn_pci_window_fixup(struct pci_dev *dev, unsigned int count,
319                     s64 * pci_addrs)
320 {
321         struct pci_controller *controller = PCI_CONTROLLER(dev->bus);
322         unsigned int i;
323         unsigned int idx;
324         unsigned int new_count;
325         struct pci_window *new_window;
326
327         if (count == 0)
328                 return;
329         idx = controller->windows;
330         new_count = controller->windows + count;
331         new_window = kcalloc(new_count, sizeof(struct pci_window), GFP_KERNEL);
332         if (new_window == NULL)
333                 BUG();
334         if (controller->window) {
335                 memcpy(new_window, controller->window,
336                        sizeof(struct pci_window) * controller->windows);
337                 kfree(controller->window);
338         }
339
340         /* Setup a pci_window for each device resource. */
341         for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
342                 if (pci_addrs[i] == -1)
343                         continue;
344
345                 new_window[idx].offset = dev->resource[i].start - pci_addrs[i];
346                 new_window[idx].resource = dev->resource[i];
347                 idx++;
348         }
349
350         controller->windows = new_count;
351         controller->window = new_window;
352 }
353
354 void sn_pci_unfixup_slot(struct pci_dev *dev)
355 {
356         struct pci_dev *host_pci_dev = SN_PCIDEV_INFO(dev)->host_pci_dev;
357
358         sn_irq_unfixup(dev);
359         pci_dev_put(host_pci_dev);
360         pci_dev_put(dev);
361 }
362
363 /*
364  * sn_pci_fixup_slot() - This routine sets up a slot's resources
365  * consistent with the Linux PCI abstraction layer.  Resources acquired
366  * from our PCI provider include PIO maps to BAR space and interrupt
367  * objects.
368  */
369 void sn_pci_fixup_slot(struct pci_dev *dev)
370 {
371         unsigned int count = 0;
372         int idx;
373         int segment = pci_domain_nr(dev->bus);
374         int status = 0;
375         struct pcibus_bussoft *bs;
376         struct pci_bus *host_pci_bus;
377         struct pci_dev *host_pci_dev;
378         struct pcidev_info *pcidev_info;
379         s64 pci_addrs[PCI_ROM_RESOURCE + 1];
380         struct sn_irq_info *sn_irq_info;
381         unsigned long size;
382         unsigned int bus_no, devfn;
383
384         pci_dev_get(dev); /* for the sysdata pointer */
385         pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
386         if (pcidev_info <= 0)
387                 BUG();          /* Cannot afford to run out of memory */
388
389         sn_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
390         if (sn_irq_info <= 0)
391                 BUG();          /* Cannot afford to run out of memory */
392         memset(sn_irq_info, 0, sizeof(struct sn_irq_info));
393
394         /* Call to retrieve pci device information needed by kernel. */
395         status = sal_get_pcidev_info((u64) segment, (u64) dev->bus->number, 
396                                      dev->devfn,
397                                      (u64) __pa(pcidev_info),
398                                      (u64) __pa(sn_irq_info));
399         if (status)
400                 BUG(); /* Cannot get platform pci device information */
401
402         /* Add pcidev_info to list in sn_pci_controller struct */
403         list_add_tail(&pcidev_info->pdi_list,
404                       &(SN_PCI_CONTROLLER(dev->bus)->pcidev_info));
405
406         /* Copy over PIO Mapped Addresses */
407         for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
408                 unsigned long start, end, addr;
409
410                 if (!pcidev_info->pdi_pio_mapped_addr[idx]) {
411                         pci_addrs[idx] = -1;
412                         continue;
413                 }
414
415                 start = dev->resource[idx].start;
416                 end = dev->resource[idx].end;
417                 size = end - start;
418                 if (size == 0) {
419                         pci_addrs[idx] = -1;
420                         continue;
421                 }
422                 pci_addrs[idx] = start;
423                 count++;
424                 addr = pcidev_info->pdi_pio_mapped_addr[idx];
425                 addr = ((addr << 4) >> 4) | __IA64_UNCACHED_OFFSET;
426                 dev->resource[idx].start = addr;
427                 dev->resource[idx].end = addr + size;
428                 if (dev->resource[idx].flags & IORESOURCE_IO)
429                         dev->resource[idx].parent = &ioport_resource;
430                 else
431                         dev->resource[idx].parent = &iomem_resource;
432         }
433         /* Create a pci_window in the pci_controller struct for
434          * each device resource.
435          */
436         if (count > 0)
437                 sn_pci_window_fixup(dev, count, pci_addrs);
438
439         /*
440          * Using the PROMs values for the PCI host bus, get the Linux
441          * PCI host_pci_dev struct and set up host bus linkages
442          */
443
444         bus_no = (pcidev_info->pdi_slot_host_handle >> 32) & 0xff;
445         devfn = pcidev_info->pdi_slot_host_handle & 0xffffffff;
446         host_pci_bus = pci_find_bus(segment, bus_no);
447         host_pci_dev = pci_get_slot(host_pci_bus, devfn);
448
449         pcidev_info->host_pci_dev = host_pci_dev;
450         pcidev_info->pdi_linux_pcidev = dev;
451         pcidev_info->pdi_host_pcidev_info = SN_PCIDEV_INFO(host_pci_dev);
452         bs = SN_PCIBUS_BUSSOFT(dev->bus);
453         pcidev_info->pdi_pcibus_info = bs;
454
455         if (bs && bs->bs_asic_type < PCIIO_ASIC_MAX_TYPES) {
456                 SN_PCIDEV_BUSPROVIDER(dev) = sn_pci_provider[bs->bs_asic_type];
457         } else {
458                 SN_PCIDEV_BUSPROVIDER(dev) = &sn_pci_default_provider;
459         }
460
461         /* Only set up IRQ stuff if this device has a host bus context */
462         if (bs && sn_irq_info->irq_irq) {
463                 pcidev_info->pdi_sn_irq_info = sn_irq_info;
464                 dev->irq = pcidev_info->pdi_sn_irq_info->irq_irq;
465                 sn_irq_fixup(dev, sn_irq_info);
466         } else {
467                 pcidev_info->pdi_sn_irq_info = NULL;
468                 kfree(sn_irq_info);
469         }
470 }
471
472 /*
473  * sn_pci_controller_fixup() - This routine sets up a bus's resources
474  * consistent with the Linux PCI abstraction layer.
475  */
476 void sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus)
477 {
478         int status = 0;
479         int nasid, cnode;
480         struct pci_controller *controller;
481         struct sn_pci_controller *sn_controller;
482         struct pcibus_bussoft *prom_bussoft_ptr;
483         struct hubdev_info *hubdev_info;
484         void *provider_soft = NULL;
485         struct sn_pcibus_provider *provider;
486
487         status = sal_get_pcibus_info((u64) segment, (u64) busnum,
488                                      (u64) ia64_tpa(&prom_bussoft_ptr));
489         if (status > 0)
490                 return;         /*bus # does not exist */
491         prom_bussoft_ptr = __va(prom_bussoft_ptr);
492
493         /* Allocate a sn_pci_controller, which has a pci_controller struct
494          * as the first member.
495          */
496         sn_controller = kzalloc(sizeof(struct sn_pci_controller), GFP_KERNEL);
497         if (!sn_controller)
498                 BUG();
499         INIT_LIST_HEAD(&sn_controller->pcidev_info);
500         controller = &sn_controller->pci_controller;
501         controller->segment = segment;
502
503         if (bus == NULL) {
504                 bus = pci_scan_bus(busnum, &pci_root_ops, controller);
505                 if (bus == NULL)
506                         goto error_return; /* error, or bus already scanned */
507                 bus->sysdata = NULL;
508         }
509
510         if (bus->sysdata)
511                 goto error_return; /* sysdata already alloc'd */
512
513         /*
514          * Per-provider fixup.  Copies the contents from prom to local
515          * area and links SN_PCIBUS_BUSSOFT().
516          */
517
518         if (prom_bussoft_ptr->bs_asic_type >= PCIIO_ASIC_MAX_TYPES)
519                 goto error_return; /* unsupported asic type */
520
521         if (prom_bussoft_ptr->bs_asic_type == PCIIO_ASIC_TYPE_PPB)
522                 goto error_return; /* no further fixup necessary */
523
524         provider = sn_pci_provider[prom_bussoft_ptr->bs_asic_type];
525         if (provider == NULL)
526                 goto error_return; /* no provider registerd for this asic */
527
528         bus->sysdata = controller;
529         if (provider->bus_fixup)
530                 provider_soft = (*provider->bus_fixup) (prom_bussoft_ptr, controller);
531
532         if (provider_soft == NULL) {
533                 /* fixup failed or not applicable */
534                 bus->sysdata = NULL;
535                 goto error_return;
536         }
537
538         /*
539          * Setup pci_windows for legacy IO and MEM space.
540          * (Temporary until ACPI support is in place.)
541          */
542         controller->window = kcalloc(2, sizeof(struct pci_window), GFP_KERNEL);
543         if (controller->window == NULL)
544                 BUG();
545         controller->window[0].offset = prom_bussoft_ptr->bs_legacy_io;
546         controller->window[0].resource.name = "legacy_io";
547         controller->window[0].resource.flags = IORESOURCE_IO;
548         controller->window[0].resource.start = prom_bussoft_ptr->bs_legacy_io;
549         controller->window[0].resource.end =
550             controller->window[0].resource.start + 0xffff;
551         controller->window[0].resource.parent = &ioport_resource;
552         controller->window[1].offset = prom_bussoft_ptr->bs_legacy_mem;
553         controller->window[1].resource.name = "legacy_mem";
554         controller->window[1].resource.flags = IORESOURCE_MEM;
555         controller->window[1].resource.start = prom_bussoft_ptr->bs_legacy_mem;
556         controller->window[1].resource.end =
557             controller->window[1].resource.start + (1024 * 1024) - 1;
558         controller->window[1].resource.parent = &iomem_resource;
559         controller->windows = 2;
560
561         /*
562          * Generic bus fixup goes here.  Don't reference prom_bussoft_ptr
563          * after this point.
564          */
565
566         PCI_CONTROLLER(bus)->platform_data = provider_soft;
567         nasid = NASID_GET(SN_PCIBUS_BUSSOFT(bus)->bs_base);
568         cnode = nasid_to_cnodeid(nasid);
569         hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
570         SN_PCIBUS_BUSSOFT(bus)->bs_xwidget_info =
571             &(hubdev_info->hdi_xwidget_info[SN_PCIBUS_BUSSOFT(bus)->bs_xid]);
572
573         /*
574          * If the node information we obtained during the fixup phase is invalid
575          * then set controller->node to -1 (undetermined)
576          */
577         if (controller->node >= num_online_nodes()) {
578                 struct pcibus_bussoft *b = SN_PCIBUS_BUSSOFT(bus);
579
580                 printk(KERN_WARNING "Device ASIC=%u XID=%u PBUSNUM=%u"
581                                     "L_IO=%lx L_MEM=%lx BASE=%lx\n",
582                         b->bs_asic_type, b->bs_xid, b->bs_persist_busnum,
583                         b->bs_legacy_io, b->bs_legacy_mem, b->bs_base);
584                 printk(KERN_WARNING "on node %d but only %d nodes online."
585                         "Association set to undetermined.\n",
586                         controller->node, num_online_nodes());
587                 controller->node = -1;
588         }
589         return;
590
591 error_return:
592
593         kfree(sn_controller);
594         return;
595 }
596
597 void sn_bus_store_sysdata(struct pci_dev *dev)
598 {
599         struct sysdata_el *element;
600
601         element = kzalloc(sizeof(struct sysdata_el), GFP_KERNEL);
602         if (!element) {
603                 dev_dbg(dev, "%s: out of memory!\n", __FUNCTION__);
604                 return;
605         }
606         element->sysdata = SN_PCIDEV_INFO(dev);
607         list_add(&element->entry, &sn_sysdata_list);
608 }
609
610 void sn_bus_free_sysdata(void)
611 {
612         struct sysdata_el *element;
613         struct list_head *list;
614
615 sn_sysdata_free_start:
616         list_for_each(list, &sn_sysdata_list) {
617                 element = list_entry(list, struct sysdata_el, entry);
618                 list_del(&element->entry);
619                 kfree(element->sysdata);
620                 kfree(element);
621                 goto sn_sysdata_free_start;
622         }
623         return;
624 }
625
626 /*
627  * Ugly hack to get PCI setup until we have a proper ACPI namespace.
628  */
629
630 #define PCI_BUSES_TO_SCAN 256
631
632 static int __init sn_pci_init(void)
633 {
634         int i = 0;
635         int j = 0;
636         struct pci_dev *pci_dev = NULL;
637         extern void sn_init_cpei_timer(void);
638 #ifdef CONFIG_PROC_FS
639         extern void register_sn_procfs(void);
640 #endif
641
642         if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM())
643                 return 0;
644
645         /*
646          * prime sn_pci_provider[].  Individial provider init routines will
647          * override their respective default entries.
648          */
649
650         for (i = 0; i < PCIIO_ASIC_MAX_TYPES; i++)
651                 sn_pci_provider[i] = &sn_pci_default_provider;
652
653         pcibr_init_provider();
654         tioca_init_provider();
655         tioce_init_provider();
656
657         /*
658          * This is needed to avoid bounce limit checks in the blk layer
659          */
660         ia64_max_iommu_merge_mask = ~PAGE_MASK;
661         sn_fixup_ionodes();
662         sn_irq_lh_init();
663         INIT_LIST_HEAD(&sn_sysdata_list);
664         sn_init_cpei_timer();
665
666 #ifdef CONFIG_PROC_FS
667         register_sn_procfs();
668 #endif
669
670         /* busses are not known yet ... */
671         for (i = 0; i <= max_segment_number; i++)
672                 for (j = 0; j <= max_pcibus_number; j++)
673                         sn_pci_controller_fixup(i, j, NULL);
674
675         /*
676          * Generic Linux PCI Layer has created the pci_bus and pci_dev 
677          * structures - time for us to add our SN PLatform specific 
678          * information.
679          */
680
681         while ((pci_dev =
682                 pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pci_dev)) != NULL)
683                 sn_pci_fixup_slot(pci_dev);
684
685         sn_ioif_inited = 1;     /* sn I/O infrastructure now initialized */
686
687         return 0;
688 }
689
690 /*
691  * hubdev_init_node() - Creates the HUB data structure and link them to it's 
692  *      own NODE specific data area.
693  */
694 void hubdev_init_node(nodepda_t * npda, cnodeid_t node)
695 {
696
697         struct hubdev_info *hubdev_info;
698
699         if (node >= num_online_nodes()) /* Headless/memless IO nodes */
700                 hubdev_info =
701                     (struct hubdev_info *)alloc_bootmem_node(NODE_DATA(0),
702                                                              sizeof(struct
703                                                                     hubdev_info));
704         else
705                 hubdev_info =
706                     (struct hubdev_info *)alloc_bootmem_node(NODE_DATA(node),
707                                                              sizeof(struct
708                                                                     hubdev_info));
709         npda->pdinfo = (void *)hubdev_info;
710
711 }
712
713 geoid_t
714 cnodeid_get_geoid(cnodeid_t cnode)
715 {
716
717         struct hubdev_info *hubdev;
718
719         hubdev = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
720         return hubdev->hdi_geoid;
721
722 }
723
724 subsys_initcall(sn_pci_init);
725 EXPORT_SYMBOL(sn_pci_fixup_slot);
726 EXPORT_SYMBOL(sn_pci_unfixup_slot);
727 EXPORT_SYMBOL(sn_pci_controller_fixup);
728 EXPORT_SYMBOL(sn_bus_store_sysdata);
729 EXPORT_SYMBOL(sn_bus_free_sysdata);