2 * Support PCI/PCIe on PowerNV platforms
4 * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
14 #include <linux/kernel.h>
15 #include <linux/pci.h>
16 #include <linux/debugfs.h>
17 #include <linux/delay.h>
18 #include <linux/string.h>
19 #include <linux/init.h>
20 #include <linux/bootmem.h>
21 #include <linux/irq.h>
23 #include <linux/msi.h>
25 #include <asm/sections.h>
28 #include <asm/pci-bridge.h>
29 #include <asm/machdep.h>
30 #include <asm/msi_bitmap.h>
31 #include <asm/ppc-pci.h>
33 #include <asm/iommu.h>
36 #include <asm/debug.h>
41 #define define_pe_printk_level(func, kern_level) \
42 static int func(const struct pnv_ioda_pe *pe, const char *fmt, ...) \
44 struct va_format vaf; \
49 va_start(args, fmt); \
55 strlcpy(pfix, dev_name(&pe->pdev->dev), \
58 sprintf(pfix, "%04x:%02x ", \
59 pci_domain_nr(pe->pbus), \
61 r = printk(kern_level "pci %s: [PE# %.3d] %pV", \
62 pfix, pe->pe_number, &vaf); \
69 define_pe_printk_level(pe_err, KERN_ERR);
70 define_pe_printk_level(pe_warn, KERN_WARNING);
71 define_pe_printk_level(pe_info, KERN_INFO);
73 static int pnv_ioda_alloc_pe(struct pnv_phb *phb)
78 pe = find_next_zero_bit(phb->ioda.pe_alloc,
79 phb->ioda.total_pe, 0);
80 if (pe >= phb->ioda.total_pe)
81 return IODA_INVALID_PE;
82 } while(test_and_set_bit(pe, phb->ioda.pe_alloc));
84 phb->ioda.pe_array[pe].phb = phb;
85 phb->ioda.pe_array[pe].pe_number = pe;
89 static void pnv_ioda_free_pe(struct pnv_phb *phb, int pe)
91 WARN_ON(phb->ioda.pe_array[pe].pdev);
93 memset(&phb->ioda.pe_array[pe], 0, sizeof(struct pnv_ioda_pe));
94 clear_bit(pe, phb->ioda.pe_alloc);
97 /* Currently those 2 are only used when MSIs are enabled, this will change
98 * but in the meantime, we need to protect them to avoid warnings
100 #ifdef CONFIG_PCI_MSI
101 static struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev)
103 struct pci_controller *hose = pci_bus_to_host(dev->bus);
104 struct pnv_phb *phb = hose->private_data;
105 struct pci_dn *pdn = pci_get_pdn(dev);
109 if (pdn->pe_number == IODA_INVALID_PE)
111 return &phb->ioda.pe_array[pdn->pe_number];
113 #endif /* CONFIG_PCI_MSI */
115 static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
117 struct pci_dev *parent;
118 uint8_t bcomp, dcomp, fcomp;
119 long rc, rid_end, rid;
121 /* Bus validation ? */
125 dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
126 fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
127 parent = pe->pbus->self;
128 if (pe->flags & PNV_IODA_PE_BUS_ALL)
129 count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1;
134 case 1: bcomp = OpalPciBusAll; break;
135 case 2: bcomp = OpalPciBus7Bits; break;
136 case 4: bcomp = OpalPciBus6Bits; break;
137 case 8: bcomp = OpalPciBus5Bits; break;
138 case 16: bcomp = OpalPciBus4Bits; break;
139 case 32: bcomp = OpalPciBus3Bits; break;
141 pr_err("%s: Number of subordinate busses %d"
143 pci_name(pe->pbus->self), count);
144 /* Do an exact match only */
145 bcomp = OpalPciBusAll;
147 rid_end = pe->rid + (count << 8);
149 parent = pe->pdev->bus->self;
150 bcomp = OpalPciBusAll;
151 dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
152 fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
153 rid_end = pe->rid + 1;
156 /* Associate PE in PELT */
157 rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
158 bcomp, dcomp, fcomp, OPAL_MAP_PE);
160 pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
163 opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
164 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
166 /* Add to all parents PELT-V */
168 struct pci_dn *pdn = pci_get_pdn(parent);
169 if (pdn && pdn->pe_number != IODA_INVALID_PE) {
170 rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number,
171 pe->pe_number, OPAL_ADD_PE_TO_DOMAIN);
172 /* XXX What to do in case of error ? */
174 parent = parent->bus->self;
176 /* Setup reverse map */
177 for (rid = pe->rid; rid < rid_end; rid++)
178 phb->ioda.pe_rmap[rid] = pe->pe_number;
180 /* Setup one MVTs on IODA1 */
181 if (phb->type == PNV_PHB_IODA1) {
182 pe->mve_number = pe->pe_number;
183 rc = opal_pci_set_mve(phb->opal_id, pe->mve_number,
186 pe_err(pe, "OPAL error %ld setting up MVE %d\n",
190 rc = opal_pci_set_mve_enable(phb->opal_id,
191 pe->mve_number, OPAL_ENABLE_MVE);
193 pe_err(pe, "OPAL error %ld enabling MVE %d\n",
198 } else if (phb->type == PNV_PHB_IODA2)
204 static void pnv_ioda_link_pe_by_weight(struct pnv_phb *phb,
205 struct pnv_ioda_pe *pe)
207 struct pnv_ioda_pe *lpe;
209 list_for_each_entry(lpe, &phb->ioda.pe_dma_list, dma_link) {
210 if (lpe->dma_weight < pe->dma_weight) {
211 list_add_tail(&pe->dma_link, &lpe->dma_link);
215 list_add_tail(&pe->dma_link, &phb->ioda.pe_dma_list);
218 static unsigned int pnv_ioda_dma_weight(struct pci_dev *dev)
220 /* This is quite simplistic. The "base" weight of a device
221 * is 10. 0 means no DMA is to be accounted for it.
224 /* If it's a bridge, no DMA */
225 if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL)
228 /* Reduce the weight of slow USB controllers */
229 if (dev->class == PCI_CLASS_SERIAL_USB_UHCI ||
230 dev->class == PCI_CLASS_SERIAL_USB_OHCI ||
231 dev->class == PCI_CLASS_SERIAL_USB_EHCI)
234 /* Increase the weight of RAID (includes Obsidian) */
235 if ((dev->class >> 8) == PCI_CLASS_STORAGE_RAID)
243 static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
245 struct pci_controller *hose = pci_bus_to_host(dev->bus);
246 struct pnv_phb *phb = hose->private_data;
247 struct pci_dn *pdn = pci_get_pdn(dev);
248 struct pnv_ioda_pe *pe;
252 pr_err("%s: Device tree node not associated properly\n",
256 if (pdn->pe_number != IODA_INVALID_PE)
259 /* PE#0 has been pre-set */
260 if (dev->bus->number == 0)
263 pe_num = pnv_ioda_alloc_pe(phb);
264 if (pe_num == IODA_INVALID_PE) {
265 pr_warning("%s: Not enough PE# available, disabling device\n",
270 /* NOTE: We get only one ref to the pci_dev for the pdn, not for the
271 * pointer in the PE data structure, both should be destroyed at the
272 * same time. However, this needs to be looked at more closely again
273 * once we actually start removing things (Hotplug, SR-IOV, ...)
275 * At some point we want to remove the PDN completely anyways
277 pe = &phb->ioda.pe_array[pe_num];
280 pdn->pe_number = pe_num;
285 pe->rid = dev->bus->number << 8 | pdn->devfn;
287 pe_info(pe, "Associated device to PE\n");
289 if (pnv_ioda_configure_pe(phb, pe)) {
290 /* XXX What do we do here ? */
292 pnv_ioda_free_pe(phb, pe_num);
293 pdn->pe_number = IODA_INVALID_PE;
299 /* Assign a DMA weight to the device */
300 pe->dma_weight = pnv_ioda_dma_weight(dev);
301 if (pe->dma_weight != 0) {
302 phb->ioda.dma_weight += pe->dma_weight;
303 phb->ioda.dma_pe_count++;
307 pnv_ioda_link_pe_by_weight(phb, pe);
311 #endif /* Useful for SRIOV case */
313 static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
317 list_for_each_entry(dev, &bus->devices, bus_list) {
318 struct pci_dn *pdn = pci_get_pdn(dev);
321 pr_warn("%s: No device node associated with device !\n",
327 pdn->pe_number = pe->pe_number;
328 pe->dma_weight += pnv_ioda_dma_weight(dev);
329 if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
330 pnv_ioda_setup_same_PE(dev->subordinate, pe);
335 * There're 2 types of PCI bus sensitive PEs: One that is compromised of
336 * single PCI bus. Another one that contains the primary PCI bus and its
337 * subordinate PCI devices and buses. The second type of PE is normally
338 * orgiriated by PCIe-to-PCI bridge or PLX switch downstream ports.
340 static void pnv_ioda_setup_bus_PE(struct pci_bus *bus, int all)
342 struct pci_controller *hose = pci_bus_to_host(bus);
343 struct pnv_phb *phb = hose->private_data;
344 struct pnv_ioda_pe *pe;
347 pe_num = pnv_ioda_alloc_pe(phb);
348 if (pe_num == IODA_INVALID_PE) {
349 pr_warning("%s: Not enough PE# available for PCI bus %04x:%02x\n",
350 __func__, pci_domain_nr(bus), bus->number);
354 pe = &phb->ioda.pe_array[pe_num];
355 pe->flags = (all ? PNV_IODA_PE_BUS_ALL : PNV_IODA_PE_BUS);
360 pe->rid = bus->busn_res.start << 8;
364 pe_info(pe, "Secondary bus %d..%d associated with PE#%d\n",
365 bus->busn_res.start, bus->busn_res.end, pe_num);
367 pe_info(pe, "Secondary bus %d associated with PE#%d\n",
368 bus->busn_res.start, pe_num);
370 if (pnv_ioda_configure_pe(phb, pe)) {
371 /* XXX What do we do here ? */
373 pnv_ioda_free_pe(phb, pe_num);
378 /* Associate it with all child devices */
379 pnv_ioda_setup_same_PE(bus, pe);
381 /* Put PE to the list */
382 list_add_tail(&pe->list, &phb->ioda.pe_list);
384 /* Account for one DMA PE if at least one DMA capable device exist
387 if (pe->dma_weight != 0) {
388 phb->ioda.dma_weight += pe->dma_weight;
389 phb->ioda.dma_pe_count++;
393 pnv_ioda_link_pe_by_weight(phb, pe);
396 static void pnv_ioda_setup_PEs(struct pci_bus *bus)
400 pnv_ioda_setup_bus_PE(bus, 0);
402 list_for_each_entry(dev, &bus->devices, bus_list) {
403 if (dev->subordinate) {
404 if (pci_pcie_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE)
405 pnv_ioda_setup_bus_PE(dev->subordinate, 1);
407 pnv_ioda_setup_PEs(dev->subordinate);
413 * Configure PEs so that the downstream PCI buses and devices
414 * could have their associated PE#. Unfortunately, we didn't
415 * figure out the way to identify the PLX bridge yet. So we
416 * simply put the PCI bus and the subordinate behind the root
417 * port to PE# here. The game rule here is expected to be changed
418 * as soon as we can detected PLX bridge correctly.
420 static void pnv_pci_ioda_setup_PEs(void)
422 struct pci_controller *hose, *tmp;
424 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
425 pnv_ioda_setup_PEs(hose->bus);
429 static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev)
431 struct pci_dn *pdn = pci_get_pdn(pdev);
432 struct pnv_ioda_pe *pe;
435 * The function can be called while the PE#
436 * hasn't been assigned. Do nothing for the
439 if (!pdn || pdn->pe_number == IODA_INVALID_PE)
442 pe = &phb->ioda.pe_array[pdn->pe_number];
443 set_iommu_table_base(&pdev->dev, &pe->tce32_table);
446 static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus)
450 list_for_each_entry(dev, &bus->devices, bus_list) {
451 set_iommu_table_base(&dev->dev, &pe->tce32_table);
452 if (dev->subordinate)
453 pnv_ioda_setup_bus_dma(pe, dev->subordinate);
457 static void pnv_pci_ioda1_tce_invalidate(struct iommu_table *tbl,
458 u64 *startp, u64 *endp)
460 u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index;
461 unsigned long start, end, inc;
463 start = __pa(startp);
466 /* BML uses this case for p6/p7/galaxy2: Shift addr and put in node */
471 start |= tbl->it_busno;
472 end |= tbl->it_busno;
473 } else if (tbl->it_type & TCE_PCI_SWINV_PAIR) {
474 /* p7ioc-style invalidation, 2 TCEs per write */
475 start |= (1ull << 63);
479 /* Default (older HW) */
483 end |= inc - 1; /* round up end to be different than start */
485 mb(); /* Ensure above stores are visible */
486 while (start <= end) {
487 __raw_writeq(start, invalidate);
492 * The iommu layer will do another mb() for us on build()
493 * and we don't care on free()
497 static void pnv_pci_ioda2_tce_invalidate(struct pnv_ioda_pe *pe,
498 struct iommu_table *tbl,
499 u64 *startp, u64 *endp)
501 unsigned long start, end, inc;
502 u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index;
504 /* We'll invalidate DMA address in PE scope */
506 start |= (pe->pe_number & 0xFF);
509 /* Figure out the start, end and step */
510 inc = tbl->it_offset + (((u64)startp - tbl->it_base) / sizeof(u64));
511 start |= (inc << 12);
512 inc = tbl->it_offset + (((u64)endp - tbl->it_base) / sizeof(u64));
517 while (start <= end) {
518 __raw_writeq(start, invalidate);
523 void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
524 u64 *startp, u64 *endp)
526 struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe,
528 struct pnv_phb *phb = pe->phb;
530 if (phb->type == PNV_PHB_IODA1)
531 pnv_pci_ioda1_tce_invalidate(tbl, startp, endp);
533 pnv_pci_ioda2_tce_invalidate(pe, tbl, startp, endp);
536 static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
537 struct pnv_ioda_pe *pe, unsigned int base,
541 struct page *tce_mem = NULL;
542 const __be64 *swinvp;
543 struct iommu_table *tbl;
548 /* 256M DMA window, 4K TCE pages, 8 bytes TCE */
549 #define TCE32_TABLE_SIZE ((0x10000000 / 0x1000) * 8)
551 /* XXX FIXME: Handle 64-bit only DMA devices */
552 /* XXX FIXME: Provide 64-bit DMA facilities & non-4K TCE tables etc.. */
553 /* XXX FIXME: Allocate multi-level tables on PHB3 */
555 /* We shouldn't already have a 32-bit DMA associated */
556 if (WARN_ON(pe->tce32_seg >= 0))
559 /* Grab a 32-bit TCE table */
560 pe->tce32_seg = base;
561 pe_info(pe, " Setting up 32-bit TCE table at %08x..%08x\n",
562 (base << 28), ((base + segs) << 28) - 1);
564 /* XXX Currently, we allocate one big contiguous table for the
565 * TCEs. We only really need one chunk per 256M of TCE space
566 * (ie per segment) but that's an optimization for later, it
567 * requires some added smarts with our get/put_tce implementation
569 tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL,
570 get_order(TCE32_TABLE_SIZE * segs));
572 pe_err(pe, " Failed to allocate a 32-bit TCE memory\n");
575 addr = page_address(tce_mem);
576 memset(addr, 0, TCE32_TABLE_SIZE * segs);
579 for (i = 0; i < segs; i++) {
580 rc = opal_pci_map_pe_dma_window(phb->opal_id,
583 __pa(addr) + TCE32_TABLE_SIZE * i,
584 TCE32_TABLE_SIZE, 0x1000);
586 pe_err(pe, " Failed to configure 32-bit TCE table,"
592 /* Setup linux iommu table */
593 tbl = &pe->tce32_table;
594 pnv_pci_setup_iommu_table(tbl, addr, TCE32_TABLE_SIZE * segs,
597 /* OPAL variant of P7IOC SW invalidated TCEs */
598 swinvp = of_get_property(phb->hose->dn, "ibm,opal-tce-kill", NULL);
600 /* We need a couple more fields -- an address and a data
601 * to or. Since the bus is only printed out on table free
602 * errors, and on the first pass the data will be a relative
603 * bus number, print that out instead.
606 tbl->it_index = (unsigned long)ioremap(be64_to_cpup(swinvp), 8);
607 tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE |
610 iommu_init_table(tbl, phb->hose->node);
611 iommu_register_group(tbl, pci_domain_nr(pe->pbus), pe->pe_number);
614 set_iommu_table_base(&pe->pdev->dev, tbl);
616 pnv_ioda_setup_bus_dma(pe, pe->pbus);
620 /* XXX Failure: Try to fallback to 64-bit only ? */
621 if (pe->tce32_seg >= 0)
624 __free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs));
627 static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
628 struct pnv_ioda_pe *pe)
630 struct page *tce_mem = NULL;
632 const __be64 *swinvp;
633 struct iommu_table *tbl;
634 unsigned int tce_table_size, end;
637 /* We shouldn't already have a 32-bit DMA associated */
638 if (WARN_ON(pe->tce32_seg >= 0))
641 /* The PE will reserve all possible 32-bits space */
643 end = (1 << ilog2(phb->ioda.m32_pci_base));
644 tce_table_size = (end / 0x1000) * 8;
645 pe_info(pe, "Setting up 32-bit TCE table at 0..%08x\n",
648 /* Allocate TCE table */
649 tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL,
650 get_order(tce_table_size));
652 pe_err(pe, "Failed to allocate a 32-bit TCE memory\n");
655 addr = page_address(tce_mem);
656 memset(addr, 0, tce_table_size);
659 * Map TCE table through TVT. The TVE index is the PE number
660 * shifted by 1 bit for 32-bits DMA space.
662 rc = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
663 pe->pe_number << 1, 1, __pa(addr),
664 tce_table_size, 0x1000);
666 pe_err(pe, "Failed to configure 32-bit TCE table,"
671 /* Setup linux iommu table */
672 tbl = &pe->tce32_table;
673 pnv_pci_setup_iommu_table(tbl, addr, tce_table_size, 0);
675 /* OPAL variant of PHB3 invalidated TCEs */
676 swinvp = of_get_property(phb->hose->dn, "ibm,opal-tce-kill", NULL);
678 /* We need a couple more fields -- an address and a data
679 * to or. Since the bus is only printed out on table free
680 * errors, and on the first pass the data will be a relative
681 * bus number, print that out instead.
684 tbl->it_index = (unsigned long)ioremap(be64_to_cpup(swinvp), 8);
685 tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE;
687 iommu_init_table(tbl, phb->hose->node);
690 set_iommu_table_base(&pe->pdev->dev, tbl);
692 pnv_ioda_setup_bus_dma(pe, pe->pbus);
696 if (pe->tce32_seg >= 0)
699 __free_pages(tce_mem, get_order(tce_table_size));
702 static void pnv_ioda_setup_dma(struct pnv_phb *phb)
704 struct pci_controller *hose = phb->hose;
705 unsigned int residual, remaining, segs, tw, base;
706 struct pnv_ioda_pe *pe;
708 /* If we have more PE# than segments available, hand out one
709 * per PE until we run out and let the rest fail. If not,
710 * then we assign at least one segment per PE, plus more based
711 * on the amount of devices under that PE
713 if (phb->ioda.dma_pe_count > phb->ioda.tce32_count)
716 residual = phb->ioda.tce32_count -
717 phb->ioda.dma_pe_count;
719 pr_info("PCI: Domain %04x has %ld available 32-bit DMA segments\n",
720 hose->global_number, phb->ioda.tce32_count);
721 pr_info("PCI: %d PE# for a total weight of %d\n",
722 phb->ioda.dma_pe_count, phb->ioda.dma_weight);
724 /* Walk our PE list and configure their DMA segments, hand them
725 * out one base segment plus any residual segments based on
728 remaining = phb->ioda.tce32_count;
729 tw = phb->ioda.dma_weight;
731 list_for_each_entry(pe, &phb->ioda.pe_dma_list, dma_link) {
735 pe_warn(pe, "No DMA32 resources available\n");
740 segs += ((pe->dma_weight * residual) + (tw / 2)) / tw;
741 if (segs > remaining)
746 * For IODA2 compliant PHB3, we needn't care about the weight.
747 * The all available 32-bits DMA space will be assigned to
750 if (phb->type == PNV_PHB_IODA1) {
751 pe_info(pe, "DMA weight %d, assigned %d DMA32 segments\n",
752 pe->dma_weight, segs);
753 pnv_pci_ioda_setup_dma_pe(phb, pe, base, segs);
755 pe_info(pe, "Assign DMA32 space\n");
757 pnv_pci_ioda2_setup_dma_pe(phb, pe);
765 #ifdef CONFIG_PCI_MSI
766 static void pnv_ioda2_msi_eoi(struct irq_data *d)
768 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
769 struct irq_chip *chip = irq_data_get_irq_chip(d);
770 struct pnv_phb *phb = container_of(chip, struct pnv_phb,
774 rc = opal_pci_msi_eoi(phb->opal_id, hw_irq);
780 static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
781 unsigned int hwirq, unsigned int virq,
782 unsigned int is_64, struct msi_msg *msg)
784 struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev);
785 struct pci_dn *pdn = pci_get_pdn(dev);
786 struct irq_data *idata;
787 struct irq_chip *ichip;
788 unsigned int xive_num = hwirq - phb->msi_base;
790 uint32_t addr32, data;
793 /* No PE assigned ? bail out ... no MSI for you ! */
797 /* Check if we have an MVE */
798 if (pe->mve_number < 0)
801 /* Force 32-bit MSI on some broken devices */
802 if (pdn && pdn->force_32bit_msi)
805 /* Assign XIVE to PE */
806 rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
808 pr_warn("%s: OPAL error %d setting XIVE %d PE\n",
809 pci_name(dev), rc, xive_num);
814 rc = opal_get_msi_64(phb->opal_id, pe->mve_number, xive_num, 1,
817 pr_warn("%s: OPAL error %d getting 64-bit MSI data\n",
821 msg->address_hi = addr64 >> 32;
822 msg->address_lo = addr64 & 0xfffffffful;
824 rc = opal_get_msi_32(phb->opal_id, pe->mve_number, xive_num, 1,
827 pr_warn("%s: OPAL error %d getting 32-bit MSI data\n",
832 msg->address_lo = addr32;
837 * Change the IRQ chip for the MSI interrupts on PHB3.
838 * The corresponding IRQ chip should be populated for
841 if (phb->type == PNV_PHB_IODA2) {
842 if (!phb->ioda.irq_chip_init) {
843 idata = irq_get_irq_data(virq);
844 ichip = irq_data_get_irq_chip(idata);
845 phb->ioda.irq_chip_init = 1;
846 phb->ioda.irq_chip = *ichip;
847 phb->ioda.irq_chip.irq_eoi = pnv_ioda2_msi_eoi;
850 irq_set_chip(virq, &phb->ioda.irq_chip);
853 pr_devel("%s: %s-bit MSI on hwirq %x (xive #%d),"
854 " address=%x_%08x data=%x PE# %d\n",
855 pci_name(dev), is_64 ? "64" : "32", hwirq, xive_num,
856 msg->address_hi, msg->address_lo, data, pe->pe_number);
861 static void pnv_pci_init_ioda_msis(struct pnv_phb *phb)
864 const __be32 *prop = of_get_property(phb->hose->dn,
865 "ibm,opal-msi-ranges", NULL);
868 prop = of_get_property(phb->hose->dn, "msi-ranges", NULL);
873 phb->msi_base = be32_to_cpup(prop);
874 count = be32_to_cpup(prop + 1);
875 if (msi_bitmap_alloc(&phb->msi_bmp, count, phb->hose->dn)) {
876 pr_err("PCI %d: Failed to allocate MSI bitmap !\n",
877 phb->hose->global_number);
881 phb->msi_setup = pnv_pci_ioda_msi_setup;
882 phb->msi32_support = 1;
883 pr_info(" Allocated bitmap for %d MSIs (base IRQ 0x%x)\n",
884 count, phb->msi_base);
887 static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) { }
888 #endif /* CONFIG_PCI_MSI */
891 * This function is supposed to be called on basis of PE from top
892 * to bottom style. So the the I/O or MMIO segment assigned to
893 * parent PE could be overrided by its child PEs if necessary.
895 static void pnv_ioda_setup_pe_seg(struct pci_controller *hose,
896 struct pnv_ioda_pe *pe)
898 struct pnv_phb *phb = hose->private_data;
899 struct pci_bus_region region;
900 struct resource *res;
905 * NOTE: We only care PCI bus based PE for now. For PCI
906 * device based PE, for example SRIOV sensitive VF should
907 * be figured out later.
909 BUG_ON(!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)));
911 pci_bus_for_each_resource(pe->pbus, res, i) {
912 if (!res || !res->flags ||
913 res->start > res->end)
916 if (res->flags & IORESOURCE_IO) {
917 region.start = res->start - phb->ioda.io_pci_base;
918 region.end = res->end - phb->ioda.io_pci_base;
919 index = region.start / phb->ioda.io_segsize;
921 while (index < phb->ioda.total_pe &&
922 region.start <= region.end) {
923 phb->ioda.io_segmap[index] = pe->pe_number;
924 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
925 pe->pe_number, OPAL_IO_WINDOW_TYPE, 0, index);
926 if (rc != OPAL_SUCCESS) {
927 pr_err("%s: OPAL error %d when mapping IO "
928 "segment #%d to PE#%d\n",
929 __func__, rc, index, pe->pe_number);
933 region.start += phb->ioda.io_segsize;
936 } else if (res->flags & IORESOURCE_MEM) {
937 /* WARNING: Assumes M32 is mem region 0 in PHB. We need to
938 * harden that algorithm when we start supporting M64
940 region.start = res->start -
941 hose->mem_offset[0] -
942 phb->ioda.m32_pci_base;
943 region.end = res->end -
944 hose->mem_offset[0] -
945 phb->ioda.m32_pci_base;
946 index = region.start / phb->ioda.m32_segsize;
948 while (index < phb->ioda.total_pe &&
949 region.start <= region.end) {
950 phb->ioda.m32_segmap[index] = pe->pe_number;
951 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
952 pe->pe_number, OPAL_M32_WINDOW_TYPE, 0, index);
953 if (rc != OPAL_SUCCESS) {
954 pr_err("%s: OPAL error %d when mapping M32 "
955 "segment#%d to PE#%d",
956 __func__, rc, index, pe->pe_number);
960 region.start += phb->ioda.m32_segsize;
967 static void pnv_pci_ioda_setup_seg(void)
969 struct pci_controller *tmp, *hose;
971 struct pnv_ioda_pe *pe;
973 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
974 phb = hose->private_data;
975 list_for_each_entry(pe, &phb->ioda.pe_list, list) {
976 pnv_ioda_setup_pe_seg(hose, pe);
981 static void pnv_pci_ioda_setup_DMA(void)
983 struct pci_controller *hose, *tmp;
986 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
987 pnv_ioda_setup_dma(hose->private_data);
989 /* Mark the PHB initialization done */
990 phb = hose->private_data;
991 phb->initialized = 1;
995 static void pnv_pci_ioda_create_dbgfs(void)
997 #ifdef CONFIG_DEBUG_FS
998 struct pci_controller *hose, *tmp;
1002 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
1003 phb = hose->private_data;
1005 sprintf(name, "PCI%04x", hose->global_number);
1006 phb->dbgfs = debugfs_create_dir(name, powerpc_debugfs_root);
1008 pr_warning("%s: Error on creating debugfs on PHB#%x\n",
1009 __func__, hose->global_number);
1011 #endif /* CONFIG_DEBUG_FS */
1014 static void pnv_pci_ioda_fixup(void)
1016 pnv_pci_ioda_setup_PEs();
1017 pnv_pci_ioda_setup_seg();
1018 pnv_pci_ioda_setup_DMA();
1020 pnv_pci_ioda_create_dbgfs();
1023 eeh_probe_mode_set(EEH_PROBE_MODE_DEV);
1024 eeh_addr_cache_build();
1030 * Returns the alignment for I/O or memory windows for P2P
1031 * bridges. That actually depends on how PEs are segmented.
1032 * For now, we return I/O or M32 segment size for PE sensitive
1033 * P2P bridges. Otherwise, the default values (4KiB for I/O,
1034 * 1MiB for memory) will be returned.
1036 * The current PCI bus might be put into one PE, which was
1037 * create against the parent PCI bridge. For that case, we
1038 * needn't enlarge the alignment so that we can save some
1041 static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
1044 struct pci_dev *bridge;
1045 struct pci_controller *hose = pci_bus_to_host(bus);
1046 struct pnv_phb *phb = hose->private_data;
1047 int num_pci_bridges = 0;
1051 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE) {
1053 if (num_pci_bridges >= 2)
1057 bridge = bridge->bus->self;
1060 /* We need support prefetchable memory window later */
1061 if (type & IORESOURCE_MEM)
1062 return phb->ioda.m32_segsize;
1064 return phb->ioda.io_segsize;
1067 /* Prevent enabling devices for which we couldn't properly
1070 static int pnv_pci_enable_device_hook(struct pci_dev *dev)
1072 struct pci_controller *hose = pci_bus_to_host(dev->bus);
1073 struct pnv_phb *phb = hose->private_data;
1076 /* The function is probably called while the PEs have
1077 * not be created yet. For example, resource reassignment
1078 * during PCI probe period. We just skip the check if
1081 if (!phb->initialized)
1084 pdn = pci_get_pdn(dev);
1085 if (!pdn || pdn->pe_number == IODA_INVALID_PE)
1091 static u32 pnv_ioda_bdfn_to_pe(struct pnv_phb *phb, struct pci_bus *bus,
1094 return phb->ioda.pe_rmap[(bus->number << 8) | devfn];
1097 static void pnv_pci_ioda_shutdown(struct pnv_phb *phb)
1099 opal_pci_reset(phb->opal_id, OPAL_PCI_IODA_TABLE_RESET,
1103 void __init pnv_pci_init_ioda_phb(struct device_node *np,
1104 u64 hub_id, int ioda_type)
1106 struct pci_controller *hose;
1107 struct pnv_phb *phb;
1108 unsigned long size, m32map_off, iomap_off, pemap_off;
1116 pr_info("Initializing IODA%d OPAL PHB %s\n", ioda_type, np->full_name);
1118 prop64 = of_get_property(np, "ibm,opal-phbid", NULL);
1120 pr_err(" Missing \"ibm,opal-phbid\" property !\n");
1123 phb_id = be64_to_cpup(prop64);
1124 pr_debug(" PHB-ID : 0x%016llx\n", phb_id);
1126 phb = alloc_bootmem(sizeof(struct pnv_phb));
1128 pr_err(" Out of memory !\n");
1132 /* Allocate PCI controller */
1133 memset(phb, 0, sizeof(struct pnv_phb));
1134 phb->hose = hose = pcibios_alloc_controller(np);
1136 pr_err(" Can't allocate PCI controller for %s\n",
1138 free_bootmem((unsigned long)phb, sizeof(struct pnv_phb));
1142 spin_lock_init(&phb->lock);
1143 prop32 = of_get_property(np, "bus-range", &len);
1144 if (prop32 && len == 8) {
1145 hose->first_busno = prop32[0];
1146 hose->last_busno = prop32[1];
1148 pr_warn(" Broken <bus-range> on %s\n", np->full_name);
1149 hose->first_busno = 0;
1150 hose->last_busno = 0xff;
1152 hose->private_data = phb;
1153 phb->hub_id = hub_id;
1154 phb->opal_id = phb_id;
1155 phb->type = ioda_type;
1157 /* Detect specific models for error handling */
1158 if (of_device_is_compatible(np, "ibm,p7ioc-pciex"))
1159 phb->model = PNV_PHB_MODEL_P7IOC;
1160 else if (of_device_is_compatible(np, "ibm,power8-pciex"))
1161 phb->model = PNV_PHB_MODEL_PHB3;
1163 phb->model = PNV_PHB_MODEL_UNKNOWN;
1165 /* Parse 32-bit and IO ranges (if any) */
1166 pci_process_bridge_OF_ranges(hose, np, !hose->global_number);
1169 phb->regs = of_iomap(np, 0);
1170 if (phb->regs == NULL)
1171 pr_err(" Failed to map registers !\n");
1173 /* Initialize more IODA stuff */
1174 prop32 = of_get_property(np, "ibm,opal-num-pes", NULL);
1176 phb->ioda.total_pe = 1;
1178 phb->ioda.total_pe = *prop32;
1180 phb->ioda.m32_size = resource_size(&hose->mem_resources[0]);
1181 /* FW Has already off top 64k of M32 space (MSI space) */
1182 phb->ioda.m32_size += 0x10000;
1184 phb->ioda.m32_segsize = phb->ioda.m32_size / phb->ioda.total_pe;
1185 phb->ioda.m32_pci_base = hose->mem_resources[0].start - hose->mem_offset[0];
1186 phb->ioda.io_size = hose->pci_io_size;
1187 phb->ioda.io_segsize = phb->ioda.io_size / phb->ioda.total_pe;
1188 phb->ioda.io_pci_base = 0; /* XXX calculate this ? */
1190 /* Allocate aux data & arrays. We don't have IO ports on PHB3 */
1191 size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long));
1193 size += phb->ioda.total_pe * sizeof(phb->ioda.m32_segmap[0]);
1195 if (phb->type == PNV_PHB_IODA1) {
1197 size += phb->ioda.total_pe * sizeof(phb->ioda.io_segmap[0]);
1200 size += phb->ioda.total_pe * sizeof(struct pnv_ioda_pe);
1201 aux = alloc_bootmem(size);
1202 memset(aux, 0, size);
1203 phb->ioda.pe_alloc = aux;
1204 phb->ioda.m32_segmap = aux + m32map_off;
1205 if (phb->type == PNV_PHB_IODA1)
1206 phb->ioda.io_segmap = aux + iomap_off;
1207 phb->ioda.pe_array = aux + pemap_off;
1208 set_bit(0, phb->ioda.pe_alloc);
1210 INIT_LIST_HEAD(&phb->ioda.pe_dma_list);
1211 INIT_LIST_HEAD(&phb->ioda.pe_list);
1213 /* Calculate how many 32-bit TCE segments we have */
1214 phb->ioda.tce32_count = phb->ioda.m32_pci_base >> 28;
1216 /* Clear unusable m64 */
1217 hose->mem_resources[1].flags = 0;
1218 hose->mem_resources[1].start = 0;
1219 hose->mem_resources[1].end = 0;
1220 hose->mem_resources[2].flags = 0;
1221 hose->mem_resources[2].start = 0;
1222 hose->mem_resources[2].end = 0;
1224 #if 0 /* We should really do that ... */
1225 rc = opal_pci_set_phb_mem_window(opal->phb_id,
1228 starting_real_address,
1229 starting_pci_address,
1233 pr_info(" %d PE's M32: 0x%x [segment=0x%x] IO: 0x%x [segment=0x%x]\n",
1235 phb->ioda.m32_size, phb->ioda.m32_segsize,
1236 phb->ioda.io_size, phb->ioda.io_segsize);
1238 phb->hose->ops = &pnv_pci_ops;
1240 phb->eeh_ops = &ioda_eeh_ops;
1243 /* Setup RID -> PE mapping function */
1244 phb->bdfn_to_pe = pnv_ioda_bdfn_to_pe;
1247 phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
1249 /* Setup shutdown function for kexec */
1250 phb->shutdown = pnv_pci_ioda_shutdown;
1252 /* Setup MSI support */
1253 pnv_pci_init_ioda_msis(phb);
1256 * We pass the PCI probe flag PCI_REASSIGN_ALL_RSRC here
1257 * to let the PCI core do resource assignment. It's supposed
1258 * that the PCI core will do correct I/O and MMIO alignment
1259 * for the P2P bridge bars so that each PCI bus (excluding
1260 * the child P2P bridges) can form individual PE.
1262 ppc_md.pcibios_fixup = pnv_pci_ioda_fixup;
1263 ppc_md.pcibios_enable_device_hook = pnv_pci_enable_device_hook;
1264 ppc_md.pcibios_window_alignment = pnv_pci_window_alignment;
1265 pci_add_flags(PCI_REASSIGN_ALL_RSRC);
1267 /* Reset IODA tables to a clean state */
1268 rc = opal_pci_reset(phb_id, OPAL_PCI_IODA_TABLE_RESET, OPAL_ASSERT_RESET);
1270 pr_warning(" OPAL Error %ld performing IODA table reset !\n", rc);
1273 * On IODA1 map everything to PE#0, on IODA2 we assume the IODA reset
1274 * has cleared the RTT which has the same effect
1276 if (ioda_type == PNV_PHB_IODA1)
1277 opal_pci_set_pe(phb_id, 0, 0, 7, 1, 1 , OPAL_MAP_PE);
1280 void __init pnv_pci_init_ioda2_phb(struct device_node *np)
1282 pnv_pci_init_ioda_phb(np, 0, PNV_PHB_IODA2);
1285 void __init pnv_pci_init_ioda_hub(struct device_node *np)
1287 struct device_node *phbn;
1291 pr_info("Probing IODA IO-Hub %s\n", np->full_name);
1293 prop64 = of_get_property(np, "ibm,opal-hubid", NULL);
1295 pr_err(" Missing \"ibm,opal-hubid\" property !\n");
1298 hub_id = be64_to_cpup(prop64);
1299 pr_devel(" HUB-ID : 0x%016llx\n", hub_id);
1301 /* Count child PHBs */
1302 for_each_child_of_node(np, phbn) {
1303 /* Look for IODA1 PHBs */
1304 if (of_device_is_compatible(phbn, "ibm,ioda-phb"))
1305 pnv_pci_init_ioda_phb(phbn, hub_id, PNV_PHB_IODA1);