2 * probe.c - PCI detection and setup code
5 #include <linux/kernel.h>
6 #include <linux/delay.h>
7 #include <linux/init.h>
9 #include <linux/of_pci.h>
10 #include <linux/pci_hotplug.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/cpumask.h>
14 #include <linux/pci-aspm.h>
15 #include <asm-generic/pci-bridge.h>
18 #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
19 #define CARDBUS_RESERVE_BUSNR 3
21 static struct resource busn_resource = {
25 .flags = IORESOURCE_BUS,
28 /* Ugh. Need to stop exporting this to modules. */
29 LIST_HEAD(pci_root_buses);
30 EXPORT_SYMBOL(pci_root_buses);
32 static LIST_HEAD(pci_domain_busn_res_list);
34 struct pci_domain_busn_res {
35 struct list_head list;
40 static struct resource *get_pci_domain_busn_res(int domain_nr)
42 struct pci_domain_busn_res *r;
44 list_for_each_entry(r, &pci_domain_busn_res_list, list)
45 if (r->domain_nr == domain_nr)
48 r = kzalloc(sizeof(*r), GFP_KERNEL);
52 r->domain_nr = domain_nr;
55 r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED;
57 list_add_tail(&r->list, &pci_domain_busn_res_list);
62 static int find_anything(struct device *dev, void *data)
68 * Some device drivers need know if pci is initiated.
69 * Basically, we think pci is not initiated when there
70 * is no device to be found on the pci_bus_type.
72 int no_pci_devices(void)
77 dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything);
78 no_devices = (dev == NULL);
82 EXPORT_SYMBOL(no_pci_devices);
87 static void release_pcibus_dev(struct device *dev)
89 struct pci_bus *pci_bus = to_pci_bus(dev);
91 put_device(pci_bus->bridge);
92 pci_bus_remove_resources(pci_bus);
93 pci_release_bus_of_node(pci_bus);
97 static struct class pcibus_class = {
99 .dev_release = &release_pcibus_dev,
100 .dev_groups = pcibus_groups,
103 static int __init pcibus_class_init(void)
105 return class_register(&pcibus_class);
107 postcore_initcall(pcibus_class_init);
109 static u64 pci_size(u64 base, u64 maxbase, u64 mask)
111 u64 size = mask & maxbase; /* Find the significant bits */
115 /* Get the lowest of them to find the decode size, and
116 from that the extent. */
117 size = (size & ~(size-1)) - 1;
119 /* base == maxbase can be valid only if the BAR has
120 already been programmed with all 1s. */
121 if (base == maxbase && ((base | size) & mask) != mask)
127 static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
132 if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
133 flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
134 flags |= IORESOURCE_IO;
138 flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
139 flags |= IORESOURCE_MEM;
140 if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
141 flags |= IORESOURCE_PREFETCH;
143 mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
145 case PCI_BASE_ADDRESS_MEM_TYPE_32:
147 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
148 /* 1M mem BAR treated as 32-bit BAR */
150 case PCI_BASE_ADDRESS_MEM_TYPE_64:
151 flags |= IORESOURCE_MEM_64;
154 /* mem unknown type treated as 32-bit BAR */
160 #define PCI_COMMAND_DECODE_ENABLE (PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
163 * pci_read_base - read a PCI BAR
164 * @dev: the PCI device
165 * @type: type of the BAR
166 * @res: resource buffer to be filled in
167 * @pos: BAR position in the config space
169 * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
171 int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
172 struct resource *res, unsigned int pos)
175 u64 l64, sz64, mask64;
177 struct pci_bus_region region, inverted_region;
179 mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
181 /* No printks while decoding is disabled! */
182 if (!dev->mmio_always_on) {
183 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
184 if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) {
185 pci_write_config_word(dev, PCI_COMMAND,
186 orig_cmd & ~PCI_COMMAND_DECODE_ENABLE);
190 res->name = pci_name(dev);
192 pci_read_config_dword(dev, pos, &l);
193 pci_write_config_dword(dev, pos, l | mask);
194 pci_read_config_dword(dev, pos, &sz);
195 pci_write_config_dword(dev, pos, l);
198 * All bits set in sz means the device isn't working properly.
199 * If the BAR isn't implemented, all bits must be 0. If it's a
200 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
203 if (sz == 0xffffffff)
207 * I don't know how l can have all bits set. Copied from old code.
208 * Maybe it fixes a bug on some ancient platform.
213 if (type == pci_bar_unknown) {
214 res->flags = decode_bar(dev, l);
215 res->flags |= IORESOURCE_SIZEALIGN;
216 if (res->flags & IORESOURCE_IO) {
217 l64 = l & PCI_BASE_ADDRESS_IO_MASK;
218 sz64 = sz & PCI_BASE_ADDRESS_IO_MASK;
219 mask64 = PCI_BASE_ADDRESS_IO_MASK & (u32)IO_SPACE_LIMIT;
221 l64 = l & PCI_BASE_ADDRESS_MEM_MASK;
222 sz64 = sz & PCI_BASE_ADDRESS_MEM_MASK;
223 mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK;
226 res->flags |= (l & IORESOURCE_ROM_ENABLE);
227 l64 = l & PCI_ROM_ADDRESS_MASK;
228 sz64 = sz & PCI_ROM_ADDRESS_MASK;
229 mask64 = (u32)PCI_ROM_ADDRESS_MASK;
232 if (res->flags & IORESOURCE_MEM_64) {
233 pci_read_config_dword(dev, pos + 4, &l);
234 pci_write_config_dword(dev, pos + 4, ~0);
235 pci_read_config_dword(dev, pos + 4, &sz);
236 pci_write_config_dword(dev, pos + 4, l);
238 l64 |= ((u64)l << 32);
239 sz64 |= ((u64)sz << 32);
240 mask64 |= ((u64)~0 << 32);
243 if (!dev->mmio_always_on && (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
244 pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
249 sz64 = pci_size(l64, sz64, mask64);
251 dev_info(&dev->dev, FW_BUG "reg 0x%x: invalid BAR (can't size)\n",
256 if (res->flags & IORESOURCE_MEM_64) {
257 if ((sizeof(pci_bus_addr_t) < 8 || sizeof(resource_size_t) < 8)
258 && sz64 > 0x100000000ULL) {
259 res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
262 dev_err(&dev->dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n",
263 pos, (unsigned long long)sz64);
267 if ((sizeof(pci_bus_addr_t) < 8) && l) {
268 /* Above 32-bit boundary; try to reallocate */
269 res->flags |= IORESOURCE_UNSET;
272 dev_info(&dev->dev, "reg 0x%x: can't handle BAR above 4GB (bus address %#010llx)\n",
273 pos, (unsigned long long)l64);
279 region.end = l64 + sz64;
281 pcibios_bus_to_resource(dev->bus, res, ®ion);
282 pcibios_resource_to_bus(dev->bus, &inverted_region, res);
285 * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is
286 * the corresponding resource address (the physical address used by
287 * the CPU. Converting that resource address back to a bus address
288 * should yield the original BAR value:
290 * resource_to_bus(bus_to_resource(A)) == A
292 * If it doesn't, CPU accesses to "bus_to_resource(A)" will not
293 * be claimed by the device.
295 if (inverted_region.start != region.start) {
296 res->flags |= IORESOURCE_UNSET;
298 res->end = region.end - region.start;
299 dev_info(&dev->dev, "reg 0x%x: initial BAR value %#010llx invalid\n",
300 pos, (unsigned long long)region.start);
310 dev_printk(KERN_DEBUG, &dev->dev, "reg 0x%x: %pR\n", pos, res);
312 return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
315 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
317 unsigned int pos, reg;
319 for (pos = 0; pos < howmany; pos++) {
320 struct resource *res = &dev->resource[pos];
321 reg = PCI_BASE_ADDRESS_0 + (pos << 2);
322 pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
326 struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
327 dev->rom_base_reg = rom;
328 res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
329 IORESOURCE_READONLY | IORESOURCE_SIZEALIGN;
330 __pci_read_base(dev, pci_bar_mem32, res, rom);
334 static void pci_read_bridge_io(struct pci_bus *child)
336 struct pci_dev *dev = child->self;
337 u8 io_base_lo, io_limit_lo;
338 unsigned long io_mask, io_granularity, base, limit;
339 struct pci_bus_region region;
340 struct resource *res;
342 io_mask = PCI_IO_RANGE_MASK;
343 io_granularity = 0x1000;
344 if (dev->io_window_1k) {
345 /* Support 1K I/O space granularity */
346 io_mask = PCI_IO_1K_RANGE_MASK;
347 io_granularity = 0x400;
350 res = child->resource[0];
351 pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
352 pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
353 base = (io_base_lo & io_mask) << 8;
354 limit = (io_limit_lo & io_mask) << 8;
356 if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
357 u16 io_base_hi, io_limit_hi;
359 pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
360 pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
361 base |= ((unsigned long) io_base_hi << 16);
362 limit |= ((unsigned long) io_limit_hi << 16);
366 res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
368 region.end = limit + io_granularity - 1;
369 pcibios_bus_to_resource(dev->bus, res, ®ion);
370 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
374 static void pci_read_bridge_mmio(struct pci_bus *child)
376 struct pci_dev *dev = child->self;
377 u16 mem_base_lo, mem_limit_lo;
378 unsigned long base, limit;
379 struct pci_bus_region region;
380 struct resource *res;
382 res = child->resource[1];
383 pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
384 pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
385 base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
386 limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
388 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
390 region.end = limit + 0xfffff;
391 pcibios_bus_to_resource(dev->bus, res, ®ion);
392 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
396 static void pci_read_bridge_mmio_pref(struct pci_bus *child)
398 struct pci_dev *dev = child->self;
399 u16 mem_base_lo, mem_limit_lo;
401 pci_bus_addr_t base, limit;
402 struct pci_bus_region region;
403 struct resource *res;
405 res = child->resource[2];
406 pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
407 pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
408 base64 = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
409 limit64 = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
411 if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
412 u32 mem_base_hi, mem_limit_hi;
414 pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
415 pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
418 * Some bridges set the base > limit by default, and some
419 * (broken) BIOSes do not initialize them. If we find
420 * this, just assume they are not being used.
422 if (mem_base_hi <= mem_limit_hi) {
423 base64 |= (u64) mem_base_hi << 32;
424 limit64 |= (u64) mem_limit_hi << 32;
428 base = (pci_bus_addr_t) base64;
429 limit = (pci_bus_addr_t) limit64;
431 if (base != base64) {
432 dev_err(&dev->dev, "can't handle bridge window above 4GB (bus address %#010llx)\n",
433 (unsigned long long) base64);
438 res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
439 IORESOURCE_MEM | IORESOURCE_PREFETCH;
440 if (res->flags & PCI_PREF_RANGE_TYPE_64)
441 res->flags |= IORESOURCE_MEM_64;
443 region.end = limit + 0xfffff;
444 pcibios_bus_to_resource(dev->bus, res, ®ion);
445 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
449 void pci_read_bridge_bases(struct pci_bus *child)
451 struct pci_dev *dev = child->self;
452 struct resource *res;
455 if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */
458 dev_info(&dev->dev, "PCI bridge to %pR%s\n",
460 dev->transparent ? " (subtractive decode)" : "");
462 pci_bus_remove_resources(child);
463 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
464 child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
466 pci_read_bridge_io(child);
467 pci_read_bridge_mmio(child);
468 pci_read_bridge_mmio_pref(child);
470 if (dev->transparent) {
471 pci_bus_for_each_resource(child->parent, res, i) {
472 if (res && res->flags) {
473 pci_bus_add_resource(child, res,
474 PCI_SUBTRACTIVE_DECODE);
475 dev_printk(KERN_DEBUG, &dev->dev,
476 " bridge window %pR (subtractive decode)\n",
483 static struct pci_bus *pci_alloc_bus(struct pci_bus *parent)
487 b = kzalloc(sizeof(*b), GFP_KERNEL);
491 INIT_LIST_HEAD(&b->node);
492 INIT_LIST_HEAD(&b->children);
493 INIT_LIST_HEAD(&b->devices);
494 INIT_LIST_HEAD(&b->slots);
495 INIT_LIST_HEAD(&b->resources);
496 b->max_bus_speed = PCI_SPEED_UNKNOWN;
497 b->cur_bus_speed = PCI_SPEED_UNKNOWN;
498 #ifdef CONFIG_PCI_DOMAINS_GENERIC
500 b->domain_nr = parent->domain_nr;
505 static void pci_release_host_bridge_dev(struct device *dev)
507 struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
509 if (bridge->release_fn)
510 bridge->release_fn(bridge);
512 pci_free_resource_list(&bridge->windows);
517 static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b)
519 struct pci_host_bridge *bridge;
521 bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
525 INIT_LIST_HEAD(&bridge->windows);
530 static const unsigned char pcix_bus_speed[] = {
531 PCI_SPEED_UNKNOWN, /* 0 */
532 PCI_SPEED_66MHz_PCIX, /* 1 */
533 PCI_SPEED_100MHz_PCIX, /* 2 */
534 PCI_SPEED_133MHz_PCIX, /* 3 */
535 PCI_SPEED_UNKNOWN, /* 4 */
536 PCI_SPEED_66MHz_PCIX_ECC, /* 5 */
537 PCI_SPEED_100MHz_PCIX_ECC, /* 6 */
538 PCI_SPEED_133MHz_PCIX_ECC, /* 7 */
539 PCI_SPEED_UNKNOWN, /* 8 */
540 PCI_SPEED_66MHz_PCIX_266, /* 9 */
541 PCI_SPEED_100MHz_PCIX_266, /* A */
542 PCI_SPEED_133MHz_PCIX_266, /* B */
543 PCI_SPEED_UNKNOWN, /* C */
544 PCI_SPEED_66MHz_PCIX_533, /* D */
545 PCI_SPEED_100MHz_PCIX_533, /* E */
546 PCI_SPEED_133MHz_PCIX_533 /* F */
549 const unsigned char pcie_link_speed[] = {
550 PCI_SPEED_UNKNOWN, /* 0 */
551 PCIE_SPEED_2_5GT, /* 1 */
552 PCIE_SPEED_5_0GT, /* 2 */
553 PCIE_SPEED_8_0GT, /* 3 */
554 PCI_SPEED_UNKNOWN, /* 4 */
555 PCI_SPEED_UNKNOWN, /* 5 */
556 PCI_SPEED_UNKNOWN, /* 6 */
557 PCI_SPEED_UNKNOWN, /* 7 */
558 PCI_SPEED_UNKNOWN, /* 8 */
559 PCI_SPEED_UNKNOWN, /* 9 */
560 PCI_SPEED_UNKNOWN, /* A */
561 PCI_SPEED_UNKNOWN, /* B */
562 PCI_SPEED_UNKNOWN, /* C */
563 PCI_SPEED_UNKNOWN, /* D */
564 PCI_SPEED_UNKNOWN, /* E */
565 PCI_SPEED_UNKNOWN /* F */
568 void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
570 bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS];
572 EXPORT_SYMBOL_GPL(pcie_update_link_speed);
574 static unsigned char agp_speeds[] = {
582 static enum pci_bus_speed agp_speed(int agp3, int agpstat)
588 else if (agpstat & 2)
590 else if (agpstat & 1)
602 return agp_speeds[index];
605 static void pci_set_bus_speed(struct pci_bus *bus)
607 struct pci_dev *bridge = bus->self;
610 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP);
612 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3);
616 pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat);
617 bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7);
619 pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd);
620 bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7);
623 pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
626 enum pci_bus_speed max;
628 pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS,
631 if (status & PCI_X_SSTATUS_533MHZ) {
632 max = PCI_SPEED_133MHz_PCIX_533;
633 } else if (status & PCI_X_SSTATUS_266MHZ) {
634 max = PCI_SPEED_133MHz_PCIX_266;
635 } else if (status & PCI_X_SSTATUS_133MHZ) {
636 if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2)
637 max = PCI_SPEED_133MHz_PCIX_ECC;
639 max = PCI_SPEED_133MHz_PCIX;
641 max = PCI_SPEED_66MHz_PCIX;
644 bus->max_bus_speed = max;
645 bus->cur_bus_speed = pcix_bus_speed[
646 (status & PCI_X_SSTATUS_FREQ) >> 6];
651 if (pci_is_pcie(bridge)) {
655 pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
656 bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS];
658 pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
659 pcie_update_link_speed(bus, linksta);
663 static struct irq_domain *pci_host_bridge_msi_domain(struct pci_bus *bus)
665 struct irq_domain *d;
668 * Any firmware interface that can resolve the msi_domain
669 * should be called from here.
671 d = pci_host_bridge_of_msi_domain(bus);
676 static void pci_set_bus_msi_domain(struct pci_bus *bus)
678 struct irq_domain *d;
681 * Either bus is the root, and we must obtain it from the
682 * firmware, or we inherit it from the bridge device.
684 if (pci_is_root_bus(bus))
685 d = pci_host_bridge_msi_domain(bus);
687 d = dev_get_msi_domain(&bus->self->dev);
689 dev_set_msi_domain(&bus->dev, d);
692 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
693 struct pci_dev *bridge, int busnr)
695 struct pci_bus *child;
700 * Allocate a new bus, and inherit stuff from the parent..
702 child = pci_alloc_bus(parent);
706 child->parent = parent;
707 child->ops = parent->ops;
708 child->msi = parent->msi;
709 child->sysdata = parent->sysdata;
710 child->bus_flags = parent->bus_flags;
712 /* initialize some portions of the bus device, but don't register it
713 * now as the parent is not properly set up yet.
715 child->dev.class = &pcibus_class;
716 dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
719 * Set up the primary, secondary and subordinate
722 child->number = child->busn_res.start = busnr;
723 child->primary = parent->busn_res.start;
724 child->busn_res.end = 0xff;
727 child->dev.parent = parent->bridge;
731 child->self = bridge;
732 child->bridge = get_device(&bridge->dev);
733 child->dev.parent = child->bridge;
734 pci_set_bus_of_node(child);
735 pci_set_bus_speed(child);
737 /* Set up default resource pointers and names.. */
738 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
739 child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
740 child->resource[i]->name = child->name;
742 bridge->subordinate = child;
745 pci_set_bus_msi_domain(child);
746 ret = device_register(&child->dev);
749 pcibios_add_bus(child);
751 /* Create legacy_io and legacy_mem files for this bus */
752 pci_create_legacy_files(child);
757 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
760 struct pci_bus *child;
762 child = pci_alloc_child_bus(parent, dev, busnr);
764 down_write(&pci_bus_sem);
765 list_add_tail(&child->node, &parent->children);
766 up_write(&pci_bus_sem);
770 EXPORT_SYMBOL(pci_add_new_bus);
772 static void pci_enable_crs(struct pci_dev *pdev)
776 /* Enable CRS Software Visibility if supported */
777 pcie_capability_read_word(pdev, PCI_EXP_RTCAP, &root_cap);
778 if (root_cap & PCI_EXP_RTCAP_CRSVIS)
779 pcie_capability_set_word(pdev, PCI_EXP_RTCTL,
780 PCI_EXP_RTCTL_CRSSVE);
784 * If it's a bridge, configure it and scan the bus behind it.
785 * For CardBus bridges, we don't scan behind as the devices will
786 * be handled by the bridge driver itself.
788 * We need to process bridges in two passes -- first we scan those
789 * already configured by the BIOS and after we are done with all of
790 * them, we proceed to assigning numbers to the remaining buses in
791 * order to avoid overlaps between old and new bus numbers.
793 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
795 struct pci_bus *child;
796 int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
799 u8 primary, secondary, subordinate;
802 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
803 primary = buses & 0xFF;
804 secondary = (buses >> 8) & 0xFF;
805 subordinate = (buses >> 16) & 0xFF;
807 dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
808 secondary, subordinate, pass);
810 if (!primary && (primary != bus->number) && secondary && subordinate) {
811 dev_warn(&dev->dev, "Primary bus is hard wired to 0\n");
812 primary = bus->number;
815 /* Check if setup is sensible at all */
817 (primary != bus->number || secondary <= bus->number ||
818 secondary > subordinate)) {
819 dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
820 secondary, subordinate);
824 /* Disable MasterAbortMode during probing to avoid reporting
825 of bus errors (in some architectures) */
826 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
827 pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
828 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
832 if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
833 !is_cardbus && !broken) {
836 * Bus already configured by firmware, process it in the first
837 * pass and just note the configuration.
843 * The bus might already exist for two reasons: Either we are
844 * rescanning the bus or the bus is reachable through more than
845 * one bridge. The second case can happen with the i450NX
848 child = pci_find_bus(pci_domain_nr(bus), secondary);
850 child = pci_add_new_bus(bus, dev, secondary);
853 child->primary = primary;
854 pci_bus_insert_busn_res(child, secondary, subordinate);
855 child->bridge_ctl = bctl;
858 cmax = pci_scan_child_bus(child);
859 if (cmax > subordinate)
860 dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n",
862 /* subordinate should equal child->busn_res.end */
863 if (subordinate > max)
867 * We need to assign a number to this bus which we always
868 * do in the second pass.
871 if (pcibios_assign_all_busses() || broken || is_cardbus)
872 /* Temporarily disable forwarding of the
873 configuration cycles on all bridges in
874 this bus segment to avoid possible
875 conflicts in the second pass between two
876 bridges programmed with overlapping
878 pci_write_config_dword(dev, PCI_PRIMARY_BUS,
884 pci_write_config_word(dev, PCI_STATUS, 0xffff);
886 /* Prevent assigning a bus number that already exists.
887 * This can happen when a bridge is hot-plugged, so in
888 * this case we only re-scan this bus. */
889 child = pci_find_bus(pci_domain_nr(bus), max+1);
891 child = pci_add_new_bus(bus, dev, max+1);
894 pci_bus_insert_busn_res(child, max+1, 0xff);
897 buses = (buses & 0xff000000)
898 | ((unsigned int)(child->primary) << 0)
899 | ((unsigned int)(child->busn_res.start) << 8)
900 | ((unsigned int)(child->busn_res.end) << 16);
903 * yenta.c forces a secondary latency timer of 176.
904 * Copy that behaviour here.
907 buses &= ~0xff000000;
908 buses |= CARDBUS_LATENCY_TIMER << 24;
912 * We need to blast all three values with a single write.
914 pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
917 child->bridge_ctl = bctl;
918 max = pci_scan_child_bus(child);
921 * For CardBus bridges, we leave 4 bus numbers
922 * as cards with a PCI-to-PCI bridge can be
925 for (i = 0; i < CARDBUS_RESERVE_BUSNR; i++) {
926 struct pci_bus *parent = bus;
927 if (pci_find_bus(pci_domain_nr(bus),
930 while (parent->parent) {
931 if ((!pcibios_assign_all_busses()) &&
932 (parent->busn_res.end > max) &&
933 (parent->busn_res.end <= max+i)) {
936 parent = parent->parent;
940 * Often, there are two cardbus bridges
941 * -- try to leave one valid bus number
951 * Set the subordinate bus number to its real value.
953 pci_bus_update_busn_res_end(child, max);
954 pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
958 (is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
959 pci_domain_nr(bus), child->number);
961 /* Has only triggered on CardBus, fixup is in yenta_socket */
962 while (bus->parent) {
963 if ((child->busn_res.end > bus->busn_res.end) ||
964 (child->number > bus->busn_res.end) ||
965 (child->number < bus->number) ||
966 (child->busn_res.end < bus->number)) {
967 dev_info(&child->dev, "%pR %s hidden behind%s bridge %s %pR\n",
969 (bus->number > child->busn_res.end &&
970 bus->busn_res.end < child->number) ?
971 "wholly" : "partially",
972 bus->self->transparent ? " transparent" : "",
980 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
984 EXPORT_SYMBOL(pci_scan_bridge);
987 * Read interrupt line and base address registers.
988 * The architecture-dependent code can tweak these, of course.
990 static void pci_read_irq(struct pci_dev *dev)
994 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
997 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
1001 void set_pcie_port_type(struct pci_dev *pdev)
1006 struct pci_dev *parent;
1008 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
1011 pdev->pcie_cap = pos;
1012 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16);
1013 pdev->pcie_flags_reg = reg16;
1014 pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, ®16);
1015 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
1018 * A Root Port is always the upstream end of a Link. No PCIe
1019 * component has two Links. Two Links are connected by a Switch
1020 * that has a Port on each Link and internal logic to connect the
1023 type = pci_pcie_type(pdev);
1024 if (type == PCI_EXP_TYPE_ROOT_PORT)
1025 pdev->has_secondary_link = 1;
1026 else if (type == PCI_EXP_TYPE_UPSTREAM ||
1027 type == PCI_EXP_TYPE_DOWNSTREAM) {
1028 parent = pci_upstream_bridge(pdev);
1031 * Usually there's an upstream device (Root Port or Switch
1032 * Downstream Port), but we can't assume one exists.
1034 if (parent && !parent->has_secondary_link)
1035 pdev->has_secondary_link = 1;
1039 void set_pcie_hotplug_bridge(struct pci_dev *pdev)
1043 pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, ®32);
1044 if (reg32 & PCI_EXP_SLTCAP_HPC)
1045 pdev->is_hotplug_bridge = 1;
1049 * pci_ext_cfg_is_aliased - is ext config space just an alias of std config?
1052 * PCI Express to PCI/PCI-X Bridge Specification, rev 1.0, 4.1.4 says that
1053 * when forwarding a type1 configuration request the bridge must check that
1054 * the extended register address field is zero. The bridge is not permitted
1055 * to forward the transactions and must handle it as an Unsupported Request.
1056 * Some bridges do not follow this rule and simply drop the extended register
1057 * bits, resulting in the standard config space being aliased, every 256
1058 * bytes across the entire configuration space. Test for this condition by
1059 * comparing the first dword of each potential alias to the vendor/device ID.
1061 * ASM1083/1085 PCIe-to-PCI Reversible Bridge (1b21:1080, rev 01 & 03)
1062 * AMD/ATI SBx00 PCI to PCI Bridge (1002:4384, rev 40)
1064 static bool pci_ext_cfg_is_aliased(struct pci_dev *dev)
1066 #ifdef CONFIG_PCI_QUIRKS
1070 pci_read_config_dword(dev, PCI_VENDOR_ID, &header);
1072 for (pos = PCI_CFG_SPACE_SIZE;
1073 pos < PCI_CFG_SPACE_EXP_SIZE; pos += PCI_CFG_SPACE_SIZE) {
1074 if (pci_read_config_dword(dev, pos, &tmp) != PCIBIOS_SUCCESSFUL
1086 * pci_cfg_space_size - get the configuration space size of the PCI device.
1089 * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
1090 * have 4096 bytes. Even if the device is capable, that doesn't mean we can
1091 * access it. Maybe we don't have a way to generate extended config space
1092 * accesses, or the device is behind a reverse Express bridge. So we try
1093 * reading the dword at 0x100 which must either be 0 or a valid extended
1094 * capability header.
1096 static int pci_cfg_space_size_ext(struct pci_dev *dev)
1099 int pos = PCI_CFG_SPACE_SIZE;
1101 if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
1103 if (status == 0xffffffff || pci_ext_cfg_is_aliased(dev))
1106 return PCI_CFG_SPACE_EXP_SIZE;
1109 return PCI_CFG_SPACE_SIZE;
1112 int pci_cfg_space_size(struct pci_dev *dev)
1118 class = dev->class >> 8;
1119 if (class == PCI_CLASS_BRIDGE_HOST)
1120 return pci_cfg_space_size_ext(dev);
1122 if (!pci_is_pcie(dev)) {
1123 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1127 pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
1128 if (!(status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ)))
1132 return pci_cfg_space_size_ext(dev);
1135 return PCI_CFG_SPACE_SIZE;
1138 #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
1140 void pci_msi_setup_pci_dev(struct pci_dev *dev)
1143 * Disable the MSI hardware to avoid screaming interrupts
1144 * during boot. This is the power on reset default so
1145 * usually this should be a noop.
1147 dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI);
1149 pci_msi_set_enable(dev, 0);
1151 dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1153 pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
1157 * pci_setup_device - fill in class and map information of a device
1158 * @dev: the device structure to fill
1160 * Initialize the device structure with information about the device's
1161 * vendor,class,memory and IO-space addresses,IRQ lines etc.
1162 * Called at initialisation of the PCI subsystem and by CardBus services.
1163 * Returns 0 on success and negative if unknown type of device (not normal,
1164 * bridge or CardBus).
1166 int pci_setup_device(struct pci_dev *dev)
1171 struct pci_bus_region region;
1172 struct resource *res;
1174 if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
1177 dev->sysdata = dev->bus->sysdata;
1178 dev->dev.parent = dev->bus->bridge;
1179 dev->dev.bus = &pci_bus_type;
1180 dev->hdr_type = hdr_type & 0x7f;
1181 dev->multifunction = !!(hdr_type & 0x80);
1182 dev->error_state = pci_channel_io_normal;
1183 set_pcie_port_type(dev);
1185 pci_dev_assign_slot(dev);
1186 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
1187 set this higher, assuming the system even supports it. */
1188 dev->dma_mask = 0xffffffff;
1190 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
1191 dev->bus->number, PCI_SLOT(dev->devfn),
1192 PCI_FUNC(dev->devfn));
1194 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
1195 dev->revision = class & 0xff;
1196 dev->class = class >> 8; /* upper 3 bytes */
1198 dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n",
1199 dev->vendor, dev->device, dev->hdr_type, dev->class);
1201 /* need to have dev->class ready */
1202 dev->cfg_size = pci_cfg_space_size(dev);
1204 /* "Unknown power state" */
1205 dev->current_state = PCI_UNKNOWN;
1207 pci_msi_setup_pci_dev(dev);
1209 /* Early fixups, before probing the BARs */
1210 pci_fixup_device(pci_fixup_early, dev);
1211 /* device class may be changed after fixup */
1212 class = dev->class >> 8;
1214 switch (dev->hdr_type) { /* header type */
1215 case PCI_HEADER_TYPE_NORMAL: /* standard header */
1216 if (class == PCI_CLASS_BRIDGE_PCI)
1219 pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
1220 pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1221 pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
1224 * Do the ugly legacy mode stuff here rather than broken chip
1225 * quirk code. Legacy mode ATA controllers have fixed
1226 * addresses. These are not always echoed in BAR0-3, and
1227 * BAR0-3 in a few cases contain junk!
1229 if (class == PCI_CLASS_STORAGE_IDE) {
1231 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
1232 if ((progif & 1) == 0) {
1233 region.start = 0x1F0;
1235 res = &dev->resource[0];
1236 res->flags = LEGACY_IO_RESOURCE;
1237 pcibios_bus_to_resource(dev->bus, res, ®ion);
1238 dev_info(&dev->dev, "legacy IDE quirk: reg 0x10: %pR\n",
1240 region.start = 0x3F6;
1242 res = &dev->resource[1];
1243 res->flags = LEGACY_IO_RESOURCE;
1244 pcibios_bus_to_resource(dev->bus, res, ®ion);
1245 dev_info(&dev->dev, "legacy IDE quirk: reg 0x14: %pR\n",
1248 if ((progif & 4) == 0) {
1249 region.start = 0x170;
1251 res = &dev->resource[2];
1252 res->flags = LEGACY_IO_RESOURCE;
1253 pcibios_bus_to_resource(dev->bus, res, ®ion);
1254 dev_info(&dev->dev, "legacy IDE quirk: reg 0x18: %pR\n",
1256 region.start = 0x376;
1258 res = &dev->resource[3];
1259 res->flags = LEGACY_IO_RESOURCE;
1260 pcibios_bus_to_resource(dev->bus, res, ®ion);
1261 dev_info(&dev->dev, "legacy IDE quirk: reg 0x1c: %pR\n",
1267 case PCI_HEADER_TYPE_BRIDGE: /* bridge header */
1268 if (class != PCI_CLASS_BRIDGE_PCI)
1270 /* The PCI-to-PCI bridge spec requires that subtractive
1271 decoding (i.e. transparent) bridge must have programming
1272 interface code of 0x01. */
1274 dev->transparent = ((dev->class & 0xff) == 1);
1275 pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
1276 set_pcie_hotplug_bridge(dev);
1277 pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
1279 pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor);
1280 pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device);
1284 case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */
1285 if (class != PCI_CLASS_BRIDGE_CARDBUS)
1288 pci_read_bases(dev, 1, 0);
1289 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1290 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
1293 default: /* unknown header */
1294 dev_err(&dev->dev, "unknown header type %02x, ignoring device\n",
1299 dev_err(&dev->dev, "ignoring class %#08x (doesn't match header type %02x)\n",
1300 dev->class, dev->hdr_type);
1301 dev->class = PCI_CLASS_NOT_DEFINED << 8;
1304 /* We found a fine healthy device, go go go... */
1308 static void pci_configure_mps(struct pci_dev *dev)
1310 struct pci_dev *bridge = pci_upstream_bridge(dev);
1313 if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge))
1316 mps = pcie_get_mps(dev);
1317 p_mps = pcie_get_mps(bridge);
1322 if (pcie_bus_config == PCIE_BUS_TUNE_OFF) {
1323 dev_warn(&dev->dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1324 mps, pci_name(bridge), p_mps);
1329 * Fancier MPS configuration is done later by
1330 * pcie_bus_configure_settings()
1332 if (pcie_bus_config != PCIE_BUS_DEFAULT)
1335 rc = pcie_set_mps(dev, p_mps);
1337 dev_warn(&dev->dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1342 dev_info(&dev->dev, "Max Payload Size set to %d (was %d, max %d)\n",
1343 p_mps, mps, 128 << dev->pcie_mpss);
1346 static struct hpp_type0 pci_default_type0 = {
1348 .cache_line_size = 8,
1349 .latency_timer = 0x40,
1354 static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
1356 u16 pci_cmd, pci_bctl;
1359 hpp = &pci_default_type0;
1361 if (hpp->revision > 1) {
1363 "PCI settings rev %d not supported; using defaults\n",
1365 hpp = &pci_default_type0;
1368 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size);
1369 pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer);
1370 pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
1371 if (hpp->enable_serr)
1372 pci_cmd |= PCI_COMMAND_SERR;
1373 if (hpp->enable_perr)
1374 pci_cmd |= PCI_COMMAND_PARITY;
1375 pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
1377 /* Program bridge control value */
1378 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
1379 pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
1380 hpp->latency_timer);
1381 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
1382 if (hpp->enable_serr)
1383 pci_bctl |= PCI_BRIDGE_CTL_SERR;
1384 if (hpp->enable_perr)
1385 pci_bctl |= PCI_BRIDGE_CTL_PARITY;
1386 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
1390 static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
1393 dev_warn(&dev->dev, "PCI-X settings not supported\n");
1396 static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
1404 if (hpp->revision > 1) {
1405 dev_warn(&dev->dev, "PCIe settings rev %d not supported\n",
1411 * Don't allow _HPX to change MPS or MRRS settings. We manage
1412 * those to make sure they're consistent with the rest of the
1415 hpp->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD |
1416 PCI_EXP_DEVCTL_READRQ;
1417 hpp->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD |
1418 PCI_EXP_DEVCTL_READRQ);
1420 /* Initialize Device Control Register */
1421 pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
1422 ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
1424 /* Initialize Link Control Register */
1425 if (pcie_cap_has_lnkctl(dev))
1426 pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
1427 ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
1429 /* Find Advanced Error Reporting Enhanced Capability */
1430 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
1434 /* Initialize Uncorrectable Error Mask Register */
1435 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, ®32);
1436 reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or;
1437 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
1439 /* Initialize Uncorrectable Error Severity Register */
1440 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, ®32);
1441 reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or;
1442 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
1444 /* Initialize Correctable Error Mask Register */
1445 pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, ®32);
1446 reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or;
1447 pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
1449 /* Initialize Advanced Error Capabilities and Control Register */
1450 pci_read_config_dword(dev, pos + PCI_ERR_CAP, ®32);
1451 reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
1452 pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
1455 * FIXME: The following two registers are not supported yet.
1457 * o Secondary Uncorrectable Error Severity Register
1458 * o Secondary Uncorrectable Error Mask Register
1462 static void pci_configure_device(struct pci_dev *dev)
1464 struct hotplug_params hpp;
1467 pci_configure_mps(dev);
1469 memset(&hpp, 0, sizeof(hpp));
1470 ret = pci_get_hp_params(dev, &hpp);
1474 program_hpp_type2(dev, hpp.t2);
1475 program_hpp_type1(dev, hpp.t1);
1476 program_hpp_type0(dev, hpp.t0);
1479 static void pci_release_capabilities(struct pci_dev *dev)
1481 pci_vpd_release(dev);
1482 pci_iov_release(dev);
1483 pci_free_cap_save_buffers(dev);
1487 * pci_release_dev - free a pci device structure when all users of it are finished.
1488 * @dev: device that's been disconnected
1490 * Will be called only by the device core when all users of this pci device are
1493 static void pci_release_dev(struct device *dev)
1495 struct pci_dev *pci_dev;
1497 pci_dev = to_pci_dev(dev);
1498 pci_release_capabilities(pci_dev);
1499 pci_release_of_node(pci_dev);
1500 pcibios_release_device(pci_dev);
1501 pci_bus_put(pci_dev->bus);
1502 kfree(pci_dev->driver_override);
1506 struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
1508 struct pci_dev *dev;
1510 dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
1514 INIT_LIST_HEAD(&dev->bus_list);
1515 dev->dev.type = &pci_dev_type;
1516 dev->bus = pci_bus_get(bus);
1520 EXPORT_SYMBOL(pci_alloc_dev);
1522 bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
1527 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1530 /* some broken boards return 0 or ~0 if a slot is empty: */
1531 if (*l == 0xffffffff || *l == 0x00000000 ||
1532 *l == 0x0000ffff || *l == 0xffff0000)
1536 * Configuration Request Retry Status. Some root ports return the
1537 * actual device ID instead of the synthetic ID (0xFFFF) required
1538 * by the PCIe spec. Ignore the device ID and only check for
1541 while ((*l & 0xffff) == 0x0001) {
1547 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1549 /* Card hasn't responded in 60 seconds? Must be stuck. */
1550 if (delay > crs_timeout) {
1551 printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not responding\n",
1552 pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
1560 EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
1563 * Read the config data for a PCI device, sanity-check it
1564 * and fill in the dev structure...
1566 static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
1568 struct pci_dev *dev;
1571 if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000))
1574 dev = pci_alloc_dev(bus);
1579 dev->vendor = l & 0xffff;
1580 dev->device = (l >> 16) & 0xffff;
1582 pci_set_of_node(dev);
1584 if (pci_setup_device(dev)) {
1585 pci_bus_put(dev->bus);
1593 static void pci_init_capabilities(struct pci_dev *dev)
1595 /* MSI/MSI-X list */
1596 pci_msi_init_pci_dev(dev);
1598 /* Buffers for saving PCIe and PCI-X capabilities */
1599 pci_allocate_cap_save_buffers(dev);
1601 /* Power Management */
1604 /* Vital Product Data */
1605 pci_vpd_pci22_init(dev);
1607 /* Alternative Routing-ID Forwarding */
1608 pci_configure_ari(dev);
1610 /* Single Root I/O Virtualization */
1613 /* Address Translation Services */
1616 /* Enable ACS P2P upstream forwarding */
1617 pci_enable_acs(dev);
1620 static void pci_set_msi_domain(struct pci_dev *dev)
1623 * If no domain has been set through the pcibios_add_device
1624 * callback, inherit the default from the bus device.
1626 if (!dev_get_msi_domain(&dev->dev))
1627 dev_set_msi_domain(&dev->dev,
1628 dev_get_msi_domain(&dev->bus->dev));
1631 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
1635 pci_configure_device(dev);
1637 device_initialize(&dev->dev);
1638 dev->dev.release = pci_release_dev;
1640 set_dev_node(&dev->dev, pcibus_to_node(bus));
1641 dev->dev.dma_mask = &dev->dma_mask;
1642 dev->dev.dma_parms = &dev->dma_parms;
1643 dev->dev.coherent_dma_mask = 0xffffffffull;
1644 of_pci_dma_configure(dev);
1646 pci_set_dma_max_seg_size(dev, 65536);
1647 pci_set_dma_seg_boundary(dev, 0xffffffff);
1649 /* Fix up broken headers */
1650 pci_fixup_device(pci_fixup_header, dev);
1652 /* moved out from quirk header fixup code */
1653 pci_reassigndev_resource_alignment(dev);
1655 /* Clear the state_saved flag. */
1656 dev->state_saved = false;
1658 /* Initialize various capabilities */
1659 pci_init_capabilities(dev);
1662 * Add the device to our list of discovered devices
1663 * and the bus list for fixup functions, etc.
1665 down_write(&pci_bus_sem);
1666 list_add_tail(&dev->bus_list, &bus->devices);
1667 up_write(&pci_bus_sem);
1669 ret = pcibios_add_device(dev);
1672 /* Setup MSI irq domain */
1673 pci_set_msi_domain(dev);
1675 /* Notifier could use PCI capabilities */
1676 dev->match_driver = false;
1677 ret = device_add(&dev->dev);
1681 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn)
1683 struct pci_dev *dev;
1685 dev = pci_get_slot(bus, devfn);
1691 dev = pci_scan_device(bus, devfn);
1695 pci_device_add(dev, bus);
1699 EXPORT_SYMBOL(pci_scan_single_device);
1701 static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn)
1707 if (pci_ari_enabled(bus)) {
1710 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1714 pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
1715 next_fn = PCI_ARI_CAP_NFN(cap);
1717 return 0; /* protect against malformed list */
1722 /* dev may be NULL for non-contiguous multifunction devices */
1723 if (!dev || dev->multifunction)
1724 return (fn + 1) % 8;
1729 static int only_one_child(struct pci_bus *bus)
1731 struct pci_dev *parent = bus->self;
1733 if (!parent || !pci_is_pcie(parent))
1735 if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT)
1737 if (parent->has_secondary_link &&
1738 !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
1744 * pci_scan_slot - scan a PCI slot on a bus for devices.
1745 * @bus: PCI bus to scan
1746 * @devfn: slot number to scan (must have zero function.)
1748 * Scan a PCI slot on the specified PCI bus for devices, adding
1749 * discovered devices to the @bus->devices list. New devices
1750 * will not have is_added set.
1752 * Returns the number of new devices found.
1754 int pci_scan_slot(struct pci_bus *bus, int devfn)
1756 unsigned fn, nr = 0;
1757 struct pci_dev *dev;
1759 if (only_one_child(bus) && (devfn > 0))
1760 return 0; /* Already scanned the entire slot */
1762 dev = pci_scan_single_device(bus, devfn);
1768 for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
1769 dev = pci_scan_single_device(bus, devfn + fn);
1773 dev->multifunction = 1;
1777 /* only one slot has pcie device */
1778 if (bus->self && nr)
1779 pcie_aspm_init_link_state(bus->self);
1783 EXPORT_SYMBOL(pci_scan_slot);
1785 static int pcie_find_smpss(struct pci_dev *dev, void *data)
1789 if (!pci_is_pcie(dev))
1793 * We don't have a way to change MPS settings on devices that have
1794 * drivers attached. A hot-added device might support only the minimum
1795 * MPS setting (MPS=128). Therefore, if the fabric contains a bridge
1796 * where devices may be hot-added, we limit the fabric MPS to 128 so
1797 * hot-added devices will work correctly.
1799 * However, if we hot-add a device to a slot directly below a Root
1800 * Port, it's impossible for there to be other existing devices below
1801 * the port. We don't limit the MPS in this case because we can
1802 * reconfigure MPS on both the Root Port and the hot-added device,
1803 * and there are no other devices involved.
1805 * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA.
1807 if (dev->is_hotplug_bridge &&
1808 pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
1811 if (*smpss > dev->pcie_mpss)
1812 *smpss = dev->pcie_mpss;
1817 static void pcie_write_mps(struct pci_dev *dev, int mps)
1821 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
1822 mps = 128 << dev->pcie_mpss;
1824 if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT &&
1826 /* For "Performance", the assumption is made that
1827 * downstream communication will never be larger than
1828 * the MRRS. So, the MPS only needs to be configured
1829 * for the upstream communication. This being the case,
1830 * walk from the top down and set the MPS of the child
1831 * to that of the parent bus.
1833 * Configure the device MPS with the smaller of the
1834 * device MPSS or the bridge MPS (which is assumed to be
1835 * properly configured at this point to the largest
1836 * allowable MPS based on its parent bus).
1838 mps = min(mps, pcie_get_mps(dev->bus->self));
1841 rc = pcie_set_mps(dev, mps);
1843 dev_err(&dev->dev, "Failed attempting to set the MPS\n");
1846 static void pcie_write_mrrs(struct pci_dev *dev)
1850 /* In the "safe" case, do not configure the MRRS. There appear to be
1851 * issues with setting MRRS to 0 on a number of devices.
1853 if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
1856 /* For Max performance, the MRRS must be set to the largest supported
1857 * value. However, it cannot be configured larger than the MPS the
1858 * device or the bus can support. This should already be properly
1859 * configured by a prior call to pcie_write_mps.
1861 mrrs = pcie_get_mps(dev);
1863 /* MRRS is a R/W register. Invalid values can be written, but a
1864 * subsequent read will verify if the value is acceptable or not.
1865 * If the MRRS value provided is not acceptable (e.g., too large),
1866 * shrink the value until it is acceptable to the HW.
1868 while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
1869 rc = pcie_set_readrq(dev, mrrs);
1873 dev_warn(&dev->dev, "Failed attempting to set the MRRS\n");
1878 dev_err(&dev->dev, "MRRS was unable to be configured with a safe value. If problems are experienced, try running with pci=pcie_bus_safe\n");
1881 static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
1885 if (!pci_is_pcie(dev))
1888 if (pcie_bus_config == PCIE_BUS_TUNE_OFF ||
1889 pcie_bus_config == PCIE_BUS_DEFAULT)
1892 mps = 128 << *(u8 *)data;
1893 orig_mps = pcie_get_mps(dev);
1895 pcie_write_mps(dev, mps);
1896 pcie_write_mrrs(dev);
1898 dev_info(&dev->dev, "Max Payload Size set to %4d/%4d (was %4d), Max Read Rq %4d\n",
1899 pcie_get_mps(dev), 128 << dev->pcie_mpss,
1900 orig_mps, pcie_get_readrq(dev));
1905 /* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down,
1906 * parents then children fashion. If this changes, then this code will not
1909 void pcie_bus_configure_settings(struct pci_bus *bus)
1916 if (!pci_is_pcie(bus->self))
1919 /* FIXME - Peer to peer DMA is possible, though the endpoint would need
1920 * to be aware of the MPS of the destination. To work around this,
1921 * simply force the MPS of the entire system to the smallest possible.
1923 if (pcie_bus_config == PCIE_BUS_PEER2PEER)
1926 if (pcie_bus_config == PCIE_BUS_SAFE) {
1927 smpss = bus->self->pcie_mpss;
1929 pcie_find_smpss(bus->self, &smpss);
1930 pci_walk_bus(bus, pcie_find_smpss, &smpss);
1933 pcie_bus_configure_set(bus->self, &smpss);
1934 pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
1936 EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
1938 unsigned int pci_scan_child_bus(struct pci_bus *bus)
1940 unsigned int devfn, pass, max = bus->busn_res.start;
1941 struct pci_dev *dev;
1943 dev_dbg(&bus->dev, "scanning bus\n");
1945 /* Go find them, Rover! */
1946 for (devfn = 0; devfn < 0x100; devfn += 8)
1947 pci_scan_slot(bus, devfn);
1949 /* Reserve buses for SR-IOV capability. */
1950 max += pci_iov_bus_range(bus);
1953 * After performing arch-dependent fixup of the bus, look behind
1954 * all PCI-to-PCI bridges on this bus.
1956 if (!bus->is_added) {
1957 dev_dbg(&bus->dev, "fixups for bus\n");
1958 pcibios_fixup_bus(bus);
1962 for (pass = 0; pass < 2; pass++)
1963 list_for_each_entry(dev, &bus->devices, bus_list) {
1964 if (pci_is_bridge(dev))
1965 max = pci_scan_bridge(bus, dev, max, pass);
1969 * We've scanned the bus and so we know all about what's on
1970 * the other side of any bridges that may be on this bus plus
1973 * Return how far we've got finding sub-buses.
1975 dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
1978 EXPORT_SYMBOL_GPL(pci_scan_child_bus);
1981 * pcibios_root_bridge_prepare - Platform-specific host bridge setup.
1982 * @bridge: Host bridge to set up.
1984 * Default empty implementation. Replace with an architecture-specific setup
1985 * routine, if necessary.
1987 int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
1992 void __weak pcibios_add_bus(struct pci_bus *bus)
1996 void __weak pcibios_remove_bus(struct pci_bus *bus)
2000 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
2001 struct pci_ops *ops, void *sysdata, struct list_head *resources)
2004 struct pci_host_bridge *bridge;
2005 struct pci_bus *b, *b2;
2006 struct resource_entry *window, *n;
2007 struct resource *res;
2008 resource_size_t offset;
2012 b = pci_alloc_bus(NULL);
2016 b->sysdata = sysdata;
2018 b->number = b->busn_res.start = bus;
2019 pci_bus_assign_domain_nr(b, parent);
2020 b2 = pci_find_bus(pci_domain_nr(b), bus);
2022 /* If we already got to this bus through a different bridge, ignore it */
2023 dev_dbg(&b2->dev, "bus already known\n");
2027 bridge = pci_alloc_host_bridge(b);
2031 bridge->dev.parent = parent;
2032 bridge->dev.release = pci_release_host_bridge_dev;
2033 dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(b), bus);
2034 error = pcibios_root_bridge_prepare(bridge);
2040 error = device_register(&bridge->dev);
2042 put_device(&bridge->dev);
2045 b->bridge = get_device(&bridge->dev);
2046 device_enable_async_suspend(b->bridge);
2047 pci_set_bus_of_node(b);
2048 pci_set_bus_msi_domain(b);
2051 set_dev_node(b->bridge, pcibus_to_node(b));
2053 b->dev.class = &pcibus_class;
2054 b->dev.parent = b->bridge;
2055 dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus);
2056 error = device_register(&b->dev);
2058 goto class_dev_reg_err;
2062 /* Create legacy_io and legacy_mem files for this bus */
2063 pci_create_legacy_files(b);
2066 dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev));
2068 printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev));
2070 /* Add initial resources to the bus */
2071 resource_list_for_each_entry_safe(window, n, resources) {
2072 list_move_tail(&window->node, &bridge->windows);
2074 offset = window->offset;
2075 if (res->flags & IORESOURCE_BUS)
2076 pci_bus_insert_busn_res(b, bus, res->end);
2078 pci_bus_add_resource(b, res, 0);
2080 if (resource_type(res) == IORESOURCE_IO)
2081 fmt = " (bus address [%#06llx-%#06llx])";
2083 fmt = " (bus address [%#010llx-%#010llx])";
2084 snprintf(bus_addr, sizeof(bus_addr), fmt,
2085 (unsigned long long) (res->start - offset),
2086 (unsigned long long) (res->end - offset));
2089 dev_info(&b->dev, "root bus resource %pR%s\n", res, bus_addr);
2092 down_write(&pci_bus_sem);
2093 list_add_tail(&b->node, &pci_root_buses);
2094 up_write(&pci_bus_sem);
2099 put_device(&bridge->dev);
2100 device_unregister(&bridge->dev);
2105 EXPORT_SYMBOL_GPL(pci_create_root_bus);
2107 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
2109 struct resource *res = &b->busn_res;
2110 struct resource *parent_res, *conflict;
2114 res->flags = IORESOURCE_BUS;
2116 if (!pci_is_root_bus(b))
2117 parent_res = &b->parent->busn_res;
2119 parent_res = get_pci_domain_busn_res(pci_domain_nr(b));
2120 res->flags |= IORESOURCE_PCI_FIXED;
2123 conflict = request_resource_conflict(parent_res, res);
2126 dev_printk(KERN_DEBUG, &b->dev,
2127 "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
2128 res, pci_is_root_bus(b) ? "domain " : "",
2129 parent_res, conflict->name, conflict);
2131 return conflict == NULL;
2134 int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max)
2136 struct resource *res = &b->busn_res;
2137 struct resource old_res = *res;
2138 resource_size_t size;
2141 if (res->start > bus_max)
2144 size = bus_max - res->start + 1;
2145 ret = adjust_resource(res, res->start, size);
2146 dev_printk(KERN_DEBUG, &b->dev,
2147 "busn_res: %pR end %s updated to %02x\n",
2148 &old_res, ret ? "can not be" : "is", bus_max);
2150 if (!ret && !res->parent)
2151 pci_bus_insert_busn_res(b, res->start, res->end);
2156 void pci_bus_release_busn_res(struct pci_bus *b)
2158 struct resource *res = &b->busn_res;
2161 if (!res->flags || !res->parent)
2164 ret = release_resource(res);
2165 dev_printk(KERN_DEBUG, &b->dev,
2166 "busn_res: %pR %s released\n",
2167 res, ret ? "can not be" : "is");
2170 struct pci_bus *pci_scan_root_bus_msi(struct device *parent, int bus,
2171 struct pci_ops *ops, void *sysdata,
2172 struct list_head *resources, struct msi_controller *msi)
2174 struct resource_entry *window;
2179 resource_list_for_each_entry(window, resources)
2180 if (window->res->flags & IORESOURCE_BUS) {
2185 b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
2193 "No busn resource found for root bus, will use [bus %02x-ff]\n",
2195 pci_bus_insert_busn_res(b, bus, 255);
2198 max = pci_scan_child_bus(b);
2201 pci_bus_update_busn_res_end(b, max);
2206 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
2207 struct pci_ops *ops, void *sysdata, struct list_head *resources)
2209 return pci_scan_root_bus_msi(parent, bus, ops, sysdata, resources,
2212 EXPORT_SYMBOL(pci_scan_root_bus);
2214 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops,
2217 LIST_HEAD(resources);
2220 pci_add_resource(&resources, &ioport_resource);
2221 pci_add_resource(&resources, &iomem_resource);
2222 pci_add_resource(&resources, &busn_resource);
2223 b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources);
2225 pci_scan_child_bus(b);
2227 pci_free_resource_list(&resources);
2231 EXPORT_SYMBOL(pci_scan_bus);
2234 * pci_rescan_bus_bridge_resize - scan a PCI bus for devices.
2235 * @bridge: PCI bridge for the bus to scan
2237 * Scan a PCI bus and child buses for new devices, add them,
2238 * and enable them, resizing bridge mmio/io resource if necessary
2239 * and possible. The caller must ensure the child devices are already
2240 * removed for resizing to occur.
2242 * Returns the max number of subordinate bus discovered.
2244 unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
2247 struct pci_bus *bus = bridge->subordinate;
2249 max = pci_scan_child_bus(bus);
2251 pci_assign_unassigned_bridge_resources(bridge);
2253 pci_bus_add_devices(bus);
2259 * pci_rescan_bus - scan a PCI bus for devices.
2260 * @bus: PCI bus to scan
2262 * Scan a PCI bus and child buses for new devices, adds them,
2265 * Returns the max number of subordinate bus discovered.
2267 unsigned int pci_rescan_bus(struct pci_bus *bus)
2271 max = pci_scan_child_bus(bus);
2272 pci_assign_unassigned_bus_resources(bus);
2273 pci_bus_add_devices(bus);
2277 EXPORT_SYMBOL_GPL(pci_rescan_bus);
2280 * pci_rescan_bus(), pci_rescan_bus_bridge_resize() and PCI device removal
2281 * routines should always be executed under this mutex.
2283 static DEFINE_MUTEX(pci_rescan_remove_lock);
2285 void pci_lock_rescan_remove(void)
2287 mutex_lock(&pci_rescan_remove_lock);
2289 EXPORT_SYMBOL_GPL(pci_lock_rescan_remove);
2291 void pci_unlock_rescan_remove(void)
2293 mutex_unlock(&pci_rescan_remove_lock);
2295 EXPORT_SYMBOL_GPL(pci_unlock_rescan_remove);
2297 static int __init pci_sort_bf_cmp(const struct device *d_a,
2298 const struct device *d_b)
2300 const struct pci_dev *a = to_pci_dev(d_a);
2301 const struct pci_dev *b = to_pci_dev(d_b);
2303 if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
2304 else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1;
2306 if (a->bus->number < b->bus->number) return -1;
2307 else if (a->bus->number > b->bus->number) return 1;
2309 if (a->devfn < b->devfn) return -1;
2310 else if (a->devfn > b->devfn) return 1;
2315 void __init pci_sort_breadthfirst(void)
2317 bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);