Lines Matching +full:- +full:cfg
1 // SPDX-License-Identifier: GPL-2.0
3 * Low-level direct PCI config space access via ECAM - common code between
4 * i386 and x86-64.
7 * - known chipset handling
8 * - ACPI decoding and validation
10 * Per-architecture code takes care of the mappings and accesses
37 static void __init pci_mmconfig_remove(struct pci_mmcfg_region *cfg) in pci_mmconfig_remove() argument
39 if (cfg->res.parent) in pci_mmconfig_remove()
40 release_resource(&cfg->res); in pci_mmconfig_remove()
41 list_del(&cfg->list); in pci_mmconfig_remove()
42 kfree(cfg); in pci_mmconfig_remove()
47 struct pci_mmcfg_region *cfg, *tmp; in free_all_mmcfg() local
50 list_for_each_entry_safe(cfg, tmp, &pci_mmcfg_list, list) in free_all_mmcfg()
51 pci_mmconfig_remove(cfg); in free_all_mmcfg()
56 struct pci_mmcfg_region *cfg; in list_add_sorted() local
59 list_for_each_entry_rcu(cfg, &pci_mmcfg_list, list, pci_mmcfg_lock_held()) { in list_add_sorted()
60 if (cfg->segment > new->segment || in list_add_sorted()
61 (cfg->segment == new->segment && in list_add_sorted()
62 cfg->start_bus >= new->start_bus)) { in list_add_sorted()
63 list_add_tail_rcu(&new->list, &cfg->list); in list_add_sorted()
67 list_add_tail_rcu(&new->list, &pci_mmcfg_list); in list_add_sorted()
83 new->address = addr; in pci_mmconfig_alloc()
84 new->segment = segment; in pci_mmconfig_alloc()
85 new->start_bus = start; in pci_mmconfig_alloc()
86 new->end_bus = end; in pci_mmconfig_alloc()
88 res = &new->res; in pci_mmconfig_alloc()
89 res->start = addr + PCI_MMCFG_BUS_OFFSET(start); in pci_mmconfig_alloc()
90 res->end = addr + PCI_MMCFG_BUS_OFFSET(end + 1) - 1; in pci_mmconfig_alloc()
91 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; in pci_mmconfig_alloc()
92 snprintf(new->name, PCI_MMCFG_RESOURCE_NAME_LEN, in pci_mmconfig_alloc()
93 "PCI ECAM %04x [bus %02x-%02x]", segment, start, end); in pci_mmconfig_alloc()
94 res->name = new->name; in pci_mmconfig_alloc()
112 pr_info("ECAM %pR (base %#lx) for domain %04x [bus %02x-%02x]\n", in pci_mmconfig_add()
113 &new->res, (unsigned long)addr, segment, start, end); in pci_mmconfig_add()
120 struct pci_mmcfg_region *cfg; in pci_mmconfig_lookup() local
122 list_for_each_entry_rcu(cfg, &pci_mmcfg_list, list, pci_mmcfg_lock_held()) in pci_mmconfig_lookup()
123 if (cfg->segment == segment && in pci_mmconfig_lookup()
124 cfg->start_bus <= bus && bus <= cfg->end_bus) in pci_mmconfig_lookup()
125 return cfg; in pci_mmconfig_lookup()
133 raw_pci_ops->read(0, 0, PCI_DEVFN(0, 0), 0xce, 2, &win); in pci_mmcfg_e7520()
149 raw_pci_ops->read(0, 0, PCI_DEVFN(0, 0), 0x48, 4, &pciexbar); in pci_mmcfg_intel_945()
183 if (pci_mmconfig_add(0, 0, (len >> 20) - 1, pciexbar & mask) == NULL) in pci_mmcfg_intel_945()
224 segnbits = busnbits - 8; in pci_mmcfg_amd_fam10h()
228 end_bus = (1 << busnbits) - 1; in pci_mmcfg_amd_fam10h()
273 raw_pci_ops->read(0, bus, PCI_DEVFN(0, 0), 0, 4, &l); in pci_mmcfg_nvidia_mcp55()
280 raw_pci_ops->read(0, bus, PCI_DEVFN(0, 0), extcfg_regnum, in pci_mmcfg_nvidia_mcp55()
291 end = start + extcfg_sizebus[size_index] - 1; in pci_mmcfg_nvidia_mcp55()
326 struct pci_mmcfg_region *cfg, *cfgx; in pci_mmcfg_check_end_bus_number() local
329 list_for_each_entry(cfg, &pci_mmcfg_list, list) { in pci_mmcfg_check_end_bus_number()
330 if (cfg->end_bus < cfg->start_bus) in pci_mmcfg_check_end_bus_number()
331 cfg->end_bus = 255; in pci_mmcfg_check_end_bus_number()
334 if (cfg->list.next == &pci_mmcfg_list) in pci_mmcfg_check_end_bus_number()
337 cfgx = list_entry(cfg->list.next, typeof(*cfg), list); in pci_mmcfg_check_end_bus_number()
338 if (cfg->end_bus >= cfgx->start_bus) in pci_mmcfg_check_end_bus_number()
339 cfg->end_bus = cfgx->start_bus - 1; in pci_mmcfg_check_end_bus_number()
359 raw_pci_ops->read(0, bus, devfn, 0, 4, &l); in pci_mmcfg_check_hostbridge()
384 if (res->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32) { in check_mcfg_resource()
386 &res->data.fixed_memory32; in check_mcfg_resource()
389 if ((mcfg_res->start >= fixmem32->address) && in check_mcfg_resource()
390 (mcfg_res->end < (fixmem32->address + in check_mcfg_resource()
391 fixmem32->address_length))) { in check_mcfg_resource()
392 mcfg_res->flags = 1; in check_mcfg_resource()
396 if ((res->type != ACPI_RESOURCE_TYPE_ADDRESS32) && in check_mcfg_resource()
397 (res->type != ACPI_RESOURCE_TYPE_ADDRESS64)) in check_mcfg_resource()
406 if ((mcfg_res->start >= address.address.minimum) && in check_mcfg_resource()
407 (mcfg_res->end < (address.address.minimum + address.address.address_length))) { in check_mcfg_resource()
408 mcfg_res->flags = 1; in check_mcfg_resource()
422 if (mcfg_res->flags) in find_mboard_resource()
433 mcfg_res.end = end - 1; in is_acpi_reserved()
448 u64 start = res->start; in is_efi_mmio()
449 u64 end = res->start + resource_size(res); in is_efi_mmio()
454 if (md->type == EFI_MEMORY_MAPPED_IO) { in is_efi_mmio()
455 size = md->num_pages << EFI_PAGE_SHIFT; in is_efi_mmio()
456 mmio_start = md->phys_addr; in is_efi_mmio()
471 struct pci_mmcfg_region *cfg, in is_mmconf_reserved() argument
474 u64 addr = cfg->res.start; in is_mmconf_reserved()
475 u64 size = resource_size(&cfg->res); in is_mmconf_reserved()
490 &cfg->res, method); in is_mmconf_reserved()
492 pr_info("ECAM %pR reserved as %s\n", &cfg->res, method); in is_mmconf_reserved()
496 cfg->end_bus = cfg->start_bus + ((size>>20) - 1); in is_mmconf_reserved()
497 num_buses = cfg->end_bus - cfg->start_bus + 1; in is_mmconf_reserved()
498 cfg->res.end = cfg->res.start + in is_mmconf_reserved()
499 PCI_MMCFG_BUS_OFFSET(num_buses) - 1; in is_mmconf_reserved()
500 snprintf(cfg->name, PCI_MMCFG_RESOURCE_NAME_LEN, in is_mmconf_reserved()
501 "PCI ECAM %04x [bus %02x-%02x]", in is_mmconf_reserved()
502 cfg->segment, cfg->start_bus, cfg->end_bus); in is_mmconf_reserved()
506 &cfg->res, (unsigned long) cfg->address); in is_mmconf_reserved()
508 pr_info("ECAM %pR (base %#lx) for %04x [bus%02x-%02x] (size reduced!)\n", in is_mmconf_reserved()
509 &cfg->res, (unsigned long) cfg->address, in is_mmconf_reserved()
510 cfg->segment, cfg->start_bus, cfg->end_bus); in is_mmconf_reserved()
517 struct pci_mmcfg_region *cfg, int early) in pci_mmcfg_reserved() argument
542 return is_mmconf_reserved(e820__mapped_all, cfg, dev, in pci_mmcfg_reserved()
549 if (is_mmconf_reserved(is_acpi_reserved, cfg, dev, in pci_mmcfg_reserved()
555 &cfg->res); in pci_mmcfg_reserved()
558 &cfg->res); in pci_mmcfg_reserved()
560 if (is_efi_mmio(&cfg->res)) { in pci_mmcfg_reserved()
562 &cfg->res); in pci_mmcfg_reserved()
564 &cfg->res); in pci_mmcfg_reserved()
567 &cfg->res, conflict->name, conflict); in pci_mmcfg_reserved()
570 &cfg->res); in pci_mmcfg_reserved()
586 struct pci_mmcfg_region *cfg; in pci_mmcfg_reject_broken() local
588 list_for_each_entry(cfg, &pci_mmcfg_list, list) { in pci_mmcfg_reject_broken()
589 if (!pci_mmcfg_reserved(NULL, cfg, early)) { in pci_mmcfg_reject_broken()
591 &cfg->res); in pci_mmcfg_reject_broken()
599 struct acpi_mcfg_allocation *cfg) in acpi_mcfg_valid_entry() argument
601 if (cfg->address < 0xFFFFFFFF) in acpi_mcfg_valid_entry()
604 if (!strncmp(mcfg->header.oem_id, "SGI", 3)) in acpi_mcfg_valid_entry()
607 if ((mcfg->header.revision >= 1) && (dmi_get_bios_year() >= 2010)) in acpi_mcfg_valid_entry()
610 pr_err("ECAM at %#llx for %04x [bus %02x-%02x] is above 4GB, ignored\n", in acpi_mcfg_valid_entry()
611 cfg->address, cfg->pci_segment, cfg->start_bus_number, in acpi_mcfg_valid_entry()
612 cfg->end_bus_number); in acpi_mcfg_valid_entry()
619 struct acpi_mcfg_allocation *cfg_table, *cfg; in pci_parse_mcfg() local
624 return -EINVAL; in pci_parse_mcfg()
631 i = header->length - sizeof(struct acpi_table_mcfg); in pci_parse_mcfg()
634 i -= sizeof(struct acpi_mcfg_allocation); in pci_parse_mcfg()
638 return -ENODEV; in pci_parse_mcfg()
643 cfg = &cfg_table[i]; in pci_parse_mcfg()
644 if (!acpi_mcfg_valid_entry(mcfg, cfg)) { in pci_parse_mcfg()
646 return -ENODEV; in pci_parse_mcfg()
649 if (pci_mmconfig_add(cfg->pci_segment, cfg->start_bus_number, in pci_parse_mcfg()
650 cfg->end_bus_number, cfg->address) == NULL) { in pci_parse_mcfg()
653 return -ENOMEM; in pci_parse_mcfg()
667 struct pci_mmcfg_region *cfg; in pci_mmcfg_for_each_region() local
673 list_for_each_entry(cfg, &pci_mmcfg_list, list) { in pci_mmcfg_for_each_region()
674 rc = func(cfg->res.start, resource_size(&cfg->res), data); in pci_mmcfg_for_each_region()
695 const struct pci_mmcfg_region *cfg; in __pci_mmcfg_init() local
697 list_for_each_entry(cfg, &pci_mmcfg_list, list) { in __pci_mmcfg_init()
698 if (cfg->segment) in __pci_mmcfg_init()
700 pcibios_last_bus = cfg->end_bus; in __pci_mmcfg_init()
749 struct pci_mmcfg_region *cfg; in pci_mmcfg_late_insert_resources() local
764 list_for_each_entry(cfg, &pci_mmcfg_list, list) { in pci_mmcfg_late_insert_resources()
765 if (!cfg->res.parent) { in pci_mmcfg_late_insert_resources()
766 pr_debug("%s() insert %pR\n", __func__, &cfg->res); in pci_mmcfg_late_insert_resources()
767 insert_resource(&iomem_resource, &cfg->res); in pci_mmcfg_late_insert_resources()
787 struct pci_mmcfg_region *cfg; in pci_mmconfig_insert() local
789 dev_dbg(dev, "%s(%04x [bus %02x-%02x])\n", __func__, seg, start, end); in pci_mmconfig_insert()
792 return -ENODEV; in pci_mmconfig_insert()
795 return -EINVAL; in pci_mmconfig_insert()
798 cfg = pci_mmconfig_lookup(seg, start); in pci_mmconfig_insert()
799 if (cfg) { in pci_mmconfig_insert()
800 if (cfg->end_bus < end) in pci_mmconfig_insert()
801 …dev_info(dev, FW_INFO "ECAM %pR for domain %04x [bus %02x-%02x] only partially covers this bridge\… in pci_mmconfig_insert()
802 &cfg->res, cfg->segment, cfg->start_bus, in pci_mmconfig_insert()
803 cfg->end_bus); in pci_mmconfig_insert()
805 return -EEXIST; in pci_mmconfig_insert()
809 * Don't move earlier; we must return -EEXIST, not -EINVAL, if in pci_mmconfig_insert()
814 return -EINVAL; in pci_mmconfig_insert()
817 rc = -EBUSY; in pci_mmconfig_insert()
818 cfg = pci_mmconfig_alloc(seg, start, end, addr); in pci_mmconfig_insert()
819 if (cfg == NULL) { in pci_mmconfig_insert()
821 rc = -ENOMEM; in pci_mmconfig_insert()
822 } else if (!pci_mmcfg_reserved(dev, cfg, 0)) { in pci_mmconfig_insert()
824 &cfg->res); in pci_mmconfig_insert()
829 &cfg->res); in pci_mmconfig_insert()
833 &cfg->res, tmp->name, tmp); in pci_mmconfig_insert()
834 } else if (pci_mmcfg_arch_map(cfg)) { in pci_mmconfig_insert()
835 dev_warn(dev, "fail to map ECAM %pR\n", &cfg->res); in pci_mmconfig_insert()
837 list_add_sorted(cfg); in pci_mmconfig_insert()
839 &cfg->res, (unsigned long)addr); in pci_mmconfig_insert()
840 cfg = NULL; in pci_mmconfig_insert()
845 if (cfg) { in pci_mmconfig_insert()
846 if (cfg->res.parent) in pci_mmconfig_insert()
847 release_resource(&cfg->res); in pci_mmconfig_insert()
848 kfree(cfg); in pci_mmconfig_insert()
859 struct pci_mmcfg_region *cfg; in pci_mmconfig_delete() local
862 list_for_each_entry_rcu(cfg, &pci_mmcfg_list, list) in pci_mmconfig_delete()
863 if (cfg->segment == seg && cfg->start_bus == start && in pci_mmconfig_delete()
864 cfg->end_bus == end) { in pci_mmconfig_delete()
865 list_del_rcu(&cfg->list); in pci_mmconfig_delete()
867 pci_mmcfg_arch_unmap(cfg); in pci_mmconfig_delete()
868 if (cfg->res.parent) in pci_mmconfig_delete()
869 release_resource(&cfg->res); in pci_mmconfig_delete()
871 kfree(cfg); in pci_mmconfig_delete()
876 return -ENOENT; in pci_mmconfig_delete()