Lines Matching +full:no +full:- +full:l1ss

1 // SPDX-License-Identifier: GPL-2.0
21 #include "pcie-designware.h"
44 .name = "PCI-MSI",
66 num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; in dw_handle_msi_irq()
79 generic_handle_domain_irq(pp->irq_domain, in dw_handle_msi_irq()
109 msi_target = (u64)pp->msi_data; in dw_pci_setup_msi_msg()
111 msg->address_lo = lower_32_bits(msi_target); in dw_pci_setup_msi_msg()
112 msg->address_hi = upper_32_bits(msi_target); in dw_pci_setup_msi_msg()
114 msg->data = d->hwirq; in dw_pci_setup_msi_msg()
116 dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n", in dw_pci_setup_msi_msg()
117 (int)d->hwirq, msg->address_hi, msg->address_lo); in dw_pci_setup_msi_msg()
127 raw_spin_lock_irqsave(&pp->lock, flags); in dw_pci_bottom_mask()
129 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; in dw_pci_bottom_mask()
131 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; in dw_pci_bottom_mask()
133 pp->irq_mask[ctrl] |= BIT(bit); in dw_pci_bottom_mask()
134 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]); in dw_pci_bottom_mask()
136 raw_spin_unlock_irqrestore(&pp->lock, flags); in dw_pci_bottom_mask()
146 raw_spin_lock_irqsave(&pp->lock, flags); in dw_pci_bottom_unmask()
148 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; in dw_pci_bottom_unmask()
150 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; in dw_pci_bottom_unmask()
152 pp->irq_mask[ctrl] &= ~BIT(bit); in dw_pci_bottom_unmask()
153 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]); in dw_pci_bottom_unmask()
155 raw_spin_unlock_irqrestore(&pp->lock, flags); in dw_pci_bottom_unmask()
164 ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; in dw_pci_bottom_ack()
166 bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; in dw_pci_bottom_ack()
172 .name = "DWPCI-MSI",
183 struct dw_pcie_rp *pp = domain->host_data; in dw_pcie_irq_domain_alloc()
188 raw_spin_lock_irqsave(&pp->lock, flags); in dw_pcie_irq_domain_alloc()
190 bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors, in dw_pcie_irq_domain_alloc()
193 raw_spin_unlock_irqrestore(&pp->lock, flags); in dw_pcie_irq_domain_alloc()
196 return -ENOSPC; in dw_pcie_irq_domain_alloc()
200 pp->msi_irq_chip, in dw_pcie_irq_domain_alloc()
211 struct dw_pcie_rp *pp = domain->host_data; in dw_pcie_irq_domain_free()
214 raw_spin_lock_irqsave(&pp->lock, flags); in dw_pcie_irq_domain_free()
216 bitmap_release_region(pp->msi_irq_in_use, d->hwirq, in dw_pcie_irq_domain_free()
219 raw_spin_unlock_irqrestore(&pp->lock, flags); in dw_pcie_irq_domain_free()
230 struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node); in dw_pcie_allocate_domains()
232 pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors, in dw_pcie_allocate_domains()
234 if (!pp->irq_domain) { in dw_pcie_allocate_domains()
235 dev_err(pci->dev, "Failed to create IRQ domain\n"); in dw_pcie_allocate_domains()
236 return -ENOMEM; in dw_pcie_allocate_domains()
239 irq_domain_update_bus_token(pp->irq_domain, DOMAIN_BUS_NEXUS); in dw_pcie_allocate_domains()
241 pp->msi_domain = pci_msi_create_irq_domain(fwnode, in dw_pcie_allocate_domains()
243 pp->irq_domain); in dw_pcie_allocate_domains()
244 if (!pp->msi_domain) { in dw_pcie_allocate_domains()
245 dev_err(pci->dev, "Failed to create MSI domain\n"); in dw_pcie_allocate_domains()
246 irq_domain_remove(pp->irq_domain); in dw_pcie_allocate_domains()
247 return -ENOMEM; in dw_pcie_allocate_domains()
258 if (pp->msi_irq[ctrl] > 0) in dw_pcie_free_msi()
259 irq_set_chained_handler_and_data(pp->msi_irq[ctrl], in dw_pcie_free_msi()
263 irq_domain_remove(pp->msi_domain); in dw_pcie_free_msi()
264 irq_domain_remove(pp->irq_domain); in dw_pcie_free_msi()
270 u64 msi_target = (u64)pp->msi_data; in dw_pcie_msi_init()
272 if (!pci_msi_enabled() || !pp->has_msi_ctrl) in dw_pcie_msi_init()
283 struct device *dev = pci->dev; in dw_pcie_parse_split_msi_irq()
294 if (irq == -ENXIO) in dw_pcie_parse_split_msi_irq()
301 pp->msi_irq[ctrl] = irq; in dw_pcie_parse_split_msi_irq()
304 /* If no "msiX" IRQs, caller should fallback to "msi" IRQ */ in dw_pcie_parse_split_msi_irq()
306 return -ENXIO; in dw_pcie_parse_split_msi_irq()
309 if (pp->num_vectors > max_vectors) { in dw_pcie_parse_split_msi_irq()
312 pp->num_vectors = max_vectors; in dw_pcie_parse_split_msi_irq()
314 if (!pp->num_vectors) in dw_pcie_parse_split_msi_irq()
315 pp->num_vectors = max_vectors; in dw_pcie_parse_split_msi_irq()
323 struct device *dev = pci->dev; in dw_pcie_msi_host_init()
330 pp->irq_mask[ctrl] = ~0; in dw_pcie_msi_host_init()
332 if (!pp->msi_irq[0]) { in dw_pcie_msi_host_init()
334 if (ret < 0 && ret != -ENXIO) in dw_pcie_msi_host_init()
338 if (!pp->num_vectors) in dw_pcie_msi_host_init()
339 pp->num_vectors = MSI_DEF_NUM_VECTORS; in dw_pcie_msi_host_init()
340 num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; in dw_pcie_msi_host_init()
342 if (!pp->msi_irq[0]) { in dw_pcie_msi_host_init()
343 pp->msi_irq[0] = platform_get_irq_byname_optional(pdev, "msi"); in dw_pcie_msi_host_init()
344 if (pp->msi_irq[0] < 0) { in dw_pcie_msi_host_init()
345 pp->msi_irq[0] = platform_get_irq(pdev, 0); in dw_pcie_msi_host_init()
346 if (pp->msi_irq[0] < 0) in dw_pcie_msi_host_init()
347 return pp->msi_irq[0]; in dw_pcie_msi_host_init()
351 dev_dbg(dev, "Using %d MSI vectors\n", pp->num_vectors); in dw_pcie_msi_host_init()
353 pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip; in dw_pcie_msi_host_init()
360 if (pp->msi_irq[ctrl] > 0) in dw_pcie_msi_host_init()
361 irq_set_chained_handler_and_data(pp->msi_irq[ctrl], in dw_pcie_msi_host_init()
366 * Even though the iMSI-RX Module supports 64-bit addresses some in dw_pcie_msi_host_init()
367 * peripheral PCIe devices may lack 64-bit message support. In in dw_pcie_msi_host_init()
372 * done by allocating from the artificially limited DMA-coherent in dw_pcie_msi_host_init()
377 msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data, in dw_pcie_msi_host_init()
381 dev_warn(dev, "Failed to allocate 32-bit MSI address\n"); in dw_pcie_msi_host_init()
383 msi_vaddr = dmam_alloc_coherent(dev, sizeof(u64), &pp->msi_data, in dw_pcie_msi_host_init()
388 return -ENOMEM; in dw_pcie_msi_host_init()
401 win = resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM); in dw_pcie_host_request_msg_tlp_res()
403 res = devm_kzalloc(pci->dev, sizeof(*res), GFP_KERNEL); in dw_pcie_host_request_msg_tlp_res()
411 res->start = win->res->end - pci->region_align + 1; in dw_pcie_host_request_msg_tlp_res()
412 res->end = win->res->end; in dw_pcie_host_request_msg_tlp_res()
413 res->name = "msg"; in dw_pcie_host_request_msg_tlp_res()
414 res->flags = win->res->flags | IORESOURCE_BUSY; in dw_pcie_host_request_msg_tlp_res()
416 if (!devm_request_resource(pci->dev, win->res, res)) in dw_pcie_host_request_msg_tlp_res()
417 pp->msg_res = res; in dw_pcie_host_request_msg_tlp_res()
424 struct device *dev = pci->dev; in dw_pcie_host_init()
425 struct device_node *np = dev->of_node; in dw_pcie_host_init()
432 raw_spin_lock_init(&pp->lock); in dw_pcie_host_init()
441 return -ENODEV; in dw_pcie_host_init()
444 pp->cfg0_size = resource_size(res); in dw_pcie_host_init()
445 pp->cfg0_base = res->start; in dw_pcie_host_init()
447 pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res); in dw_pcie_host_init()
448 if (IS_ERR(pp->va_cfg0_base)) in dw_pcie_host_init()
449 return PTR_ERR(pp->va_cfg0_base); in dw_pcie_host_init()
453 return -ENOMEM; in dw_pcie_host_init()
455 pp->bridge = bridge; in dw_pcie_host_init()
458 win = resource_list_first_type(&bridge->windows, IORESOURCE_IO); in dw_pcie_host_init()
460 pp->io_size = resource_size(win->res); in dw_pcie_host_init()
461 pp->io_bus_addr = win->res->start - win->offset; in dw_pcie_host_init()
462 pp->io_base = pci_pio_to_address(win->res->start); in dw_pcie_host_init()
466 bridge->ops = &dw_pcie_ops; in dw_pcie_host_init()
467 bridge->child_ops = &dw_child_pcie_ops; in dw_pcie_host_init()
469 if (pp->ops->init) { in dw_pcie_host_init()
470 ret = pp->ops->init(pp); in dw_pcie_host_init()
476 pp->has_msi_ctrl = !(pp->ops->msi_init || in dw_pcie_host_init()
477 of_property_present(np, "msi-parent") || in dw_pcie_host_init()
478 of_property_present(np, "msi-map")); in dw_pcie_host_init()
484 if (!pp->has_msi_ctrl && !pp->num_vectors) { in dw_pcie_host_init()
485 pp->num_vectors = MSI_DEF_NUM_VECTORS; in dw_pcie_host_init()
486 } else if (pp->num_vectors > MAX_MSI_IRQS) { in dw_pcie_host_init()
488 ret = -EINVAL; in dw_pcie_host_init()
492 if (pp->ops->msi_init) { in dw_pcie_host_init()
493 ret = pp->ops->msi_init(pp); in dw_pcie_host_init()
496 } else if (pp->has_msi_ctrl) { in dw_pcie_host_init()
516 if (pp->use_atu_msg) in dw_pcie_host_init()
535 * If there is no Link Up IRQ, we should not bypass the delay in dw_pcie_host_init()
538 if (!pp->use_linkup_irq) in dw_pcie_host_init()
542 bridge->sysdata = pp; in dw_pcie_host_init()
548 if (pp->ops->post_init) in dw_pcie_host_init()
549 pp->ops->post_init(pp); in dw_pcie_host_init()
560 if (pp->has_msi_ctrl) in dw_pcie_host_init()
564 if (pp->ops->deinit) in dw_pcie_host_init()
565 pp->ops->deinit(pp); in dw_pcie_host_init()
575 pci_stop_root_bus(pp->bridge->bus); in dw_pcie_host_deinit()
576 pci_remove_root_bus(pp->bridge->bus); in dw_pcie_host_deinit()
582 if (pp->has_msi_ctrl) in dw_pcie_host_deinit()
585 if (pp->ops->deinit) in dw_pcie_host_deinit()
586 pp->ops->deinit(pp); in dw_pcie_host_deinit()
593 struct dw_pcie_rp *pp = bus->sysdata; in dw_pcie_other_conf_map_bus()
610 busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | in dw_pcie_other_conf_map_bus()
613 if (pci_is_root_bus(bus->parent)) in dw_pcie_other_conf_map_bus()
619 atu.cpu_addr = pp->cfg0_base; in dw_pcie_other_conf_map_bus()
621 atu.size = pp->cfg0_size; in dw_pcie_other_conf_map_bus()
627 return pp->va_cfg0_base + where; in dw_pcie_other_conf_map_bus()
633 struct dw_pcie_rp *pp = bus->sysdata; in dw_pcie_rd_other_conf()
642 if (pp->cfg0_io_shared) { in dw_pcie_rd_other_conf()
644 atu.cpu_addr = pp->io_base; in dw_pcie_rd_other_conf()
645 atu.pci_addr = pp->io_bus_addr; in dw_pcie_rd_other_conf()
646 atu.size = pp->io_size; in dw_pcie_rd_other_conf()
659 struct dw_pcie_rp *pp = bus->sysdata; in dw_pcie_wr_other_conf()
668 if (pp->cfg0_io_shared) { in dw_pcie_wr_other_conf()
670 atu.cpu_addr = pp->io_base; in dw_pcie_wr_other_conf()
671 atu.pci_addr = pp->io_bus_addr; in dw_pcie_wr_other_conf()
672 atu.size = pp->io_size; in dw_pcie_wr_other_conf()
690 struct dw_pcie_rp *pp = bus->sysdata; in dw_pcie_own_conf_map_bus()
696 return pci->dbi_base + where; in dw_pcie_own_conf_map_bus()
714 if (!pci->num_ob_windows) { in dw_pcie_iatu_setup()
715 dev_err(pci->dev, "No outbound iATU found\n"); in dw_pcie_iatu_setup()
716 return -EINVAL; in dw_pcie_iatu_setup()
721 * the MEM/IO (dma-)ranges setups. in dw_pcie_iatu_setup()
723 for (i = 0; i < pci->num_ob_windows; i++) in dw_pcie_iatu_setup()
726 for (i = 0; i < pci->num_ib_windows; i++) in dw_pcie_iatu_setup()
730 resource_list_for_each_entry(entry, &pp->bridge->windows) { in dw_pcie_iatu_setup()
731 if (resource_type(entry->res) != IORESOURCE_MEM) in dw_pcie_iatu_setup()
734 if (pci->num_ob_windows <= ++i) in dw_pcie_iatu_setup()
739 atu.cpu_addr = entry->res->start; in dw_pcie_iatu_setup()
740 atu.pci_addr = entry->res->start - entry->offset; in dw_pcie_iatu_setup()
743 if (pp->msg_res && pp->msg_res->parent == entry->res) in dw_pcie_iatu_setup()
744 atu.size = resource_size(entry->res) - in dw_pcie_iatu_setup()
745 resource_size(pp->msg_res); in dw_pcie_iatu_setup()
747 atu.size = resource_size(entry->res); in dw_pcie_iatu_setup()
751 dev_err(pci->dev, "Failed to set MEM range %pr\n", in dw_pcie_iatu_setup()
752 entry->res); in dw_pcie_iatu_setup()
757 if (pp->io_size) { in dw_pcie_iatu_setup()
758 if (pci->num_ob_windows > ++i) { in dw_pcie_iatu_setup()
761 atu.cpu_addr = pp->io_base; in dw_pcie_iatu_setup()
762 atu.pci_addr = pp->io_bus_addr; in dw_pcie_iatu_setup()
763 atu.size = pp->io_size; in dw_pcie_iatu_setup()
767 dev_err(pci->dev, "Failed to set IO range %pr\n", in dw_pcie_iatu_setup()
768 entry->res); in dw_pcie_iatu_setup()
772 pp->cfg0_io_shared = true; in dw_pcie_iatu_setup()
776 if (pci->num_ob_windows <= i) in dw_pcie_iatu_setup()
777 dev_warn(pci->dev, "Ranges exceed outbound iATU size (%d)\n", in dw_pcie_iatu_setup()
778 pci->num_ob_windows); in dw_pcie_iatu_setup()
780 pp->msg_atu_index = i; in dw_pcie_iatu_setup()
783 resource_list_for_each_entry(entry, &pp->bridge->dma_ranges) { in dw_pcie_iatu_setup()
784 if (resource_type(entry->res) != IORESOURCE_MEM) in dw_pcie_iatu_setup()
787 if (pci->num_ib_windows <= i) in dw_pcie_iatu_setup()
791 entry->res->start, in dw_pcie_iatu_setup()
792 entry->res->start - entry->offset, in dw_pcie_iatu_setup()
793 resource_size(entry->res)); in dw_pcie_iatu_setup()
795 dev_err(pci->dev, "Failed to set DMA range %pr\n", in dw_pcie_iatu_setup()
796 entry->res); in dw_pcie_iatu_setup()
801 if (pci->num_ib_windows <= i) in dw_pcie_iatu_setup()
802 dev_warn(pci->dev, "Dma-ranges exceed inbound iATU size (%u)\n", in dw_pcie_iatu_setup()
803 pci->num_ib_windows); in dw_pcie_iatu_setup()
815 * Enable DBI read-only registers for writing/updating configuration. in dw_pcie_setup_rc()
822 if (pp->has_msi_ctrl) { in dw_pcie_setup_rc()
823 num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; in dw_pcie_setup_rc()
829 pp->irq_mask[ctrl]); in dw_pcie_setup_rc()
866 if (pp->bridge->child_ops == &dw_child_pcie_ops) { in dw_pcie_setup_rc()
893 if (pci->num_ob_windows <= pci->pp.msg_atu_index) in dw_pcie_pme_turn_off()
894 return -ENOSPC; in dw_pcie_pme_turn_off()
896 if (!pci->pp.msg_res) in dw_pcie_pme_turn_off()
897 return -ENOSPC; in dw_pcie_pme_turn_off()
902 atu.size = resource_size(pci->pp.msg_res); in dw_pcie_pme_turn_off()
903 atu.index = pci->pp.msg_atu_index; in dw_pcie_pme_turn_off()
905 atu.cpu_addr = pci->pp.msg_res->start; in dw_pcie_pme_turn_off()
911 mem = ioremap(atu.cpu_addr, pci->region_align); in dw_pcie_pme_turn_off()
913 return -ENOMEM; in dw_pcie_pme_turn_off()
930 * If L1SS is supported, then do not put the link into L2 as some in dw_pcie_suspend_noirq()
936 if (pci->pp.ops->pme_turn_off) { in dw_pcie_suspend_noirq()
937 pci->pp.ops->pme_turn_off(&pci->pp); in dw_pcie_suspend_noirq()
951 dev_err(pci->dev, "Timeout waiting for L2 entry! LTSSM: 0x%x\n", val); in dw_pcie_suspend_noirq()
958 * main power. This is harmless when no endpoint is connected. in dw_pcie_suspend_noirq()
963 if (pci->pp.ops->deinit) in dw_pcie_suspend_noirq()
964 pci->pp.ops->deinit(&pci->pp); in dw_pcie_suspend_noirq()
966 pci->suspended = true; in dw_pcie_suspend_noirq()
976 if (!pci->suspended) in dw_pcie_resume_noirq()
979 pci->suspended = false; in dw_pcie_resume_noirq()
981 if (pci->pp.ops->init) { in dw_pcie_resume_noirq()
982 ret = pci->pp.ops->init(&pci->pp); in dw_pcie_resume_noirq()
984 dev_err(pci->dev, "Host init failed: %d\n", ret); in dw_pcie_resume_noirq()
989 dw_pcie_setup_rc(&pci->pp); in dw_pcie_resume_noirq()