Lines Matching +full:perst +full:- +full:regs

1 // SPDX-License-Identifier: GPL-2.0
11 #include <linux/clk-provider.h>
78 #define PCIE_MSI_ENABLE GENMASK(PCIE_MSI_SET_NUM + 8 - 1, 8)
82 GENMASK(PCIE_INTX_SHIFT + PCI_NUM_INTX - 1, PCIE_INTX_SHIFT)
86 #define PCIE_MSI_SET_ENABLE GENMASK(PCIE_MSI_SET_NUM - 1, 0)
120 (((((size) - 1) << 1) & GENMASK(6, 1)) | PCIE_ATR_EN)
141 SKIP_PCIE_RSTB = BIT(0), /* Skip PERST# assertion during device
148 * struct mtk_gen3_pcie_pdata - differentiate between host generations
163 * struct mtk_msi_set - MSI information for each set
175 * struct mtk_gen3_pcie - PCIe port information
195 * @soc: pointer to SoC-dependent operations
254 * mtk_pcie_config_tlp_header() - Configure a configuration TLP header
265 struct mtk_gen3_pcie *pcie = bus->sysdata; in mtk_pcie_config_tlp_header()
269 bytes = (GENMASK(size - 1, 0) & 0xf) << (where & 0x3); in mtk_pcie_config_tlp_header()
272 PCIE_CFG_HEADER(bus->number, devfn); in mtk_pcie_config_tlp_header()
274 writel_relaxed(val, pcie->base + PCIE_CFGNUM_REG); in mtk_pcie_config_tlp_header()
280 struct mtk_gen3_pcie *pcie = bus->sysdata; in mtk_pcie_map_bus()
282 return pcie->base + PCIE_CFG_OFFSET_ADDR + where; in mtk_pcie_map_bus()
325 table_size = BIT(fls(remaining) - 1); in mtk_pcie_set_trans_table()
328 addr_align = BIT(ffs(cpu_addr) - 1); in mtk_pcie_set_trans_table()
334 dev_err(pcie->dev, "illegal table size %#llx\n", in mtk_pcie_set_trans_table()
336 return -EINVAL; in mtk_pcie_set_trans_table()
339 table = pcie->base + PCIE_TRANS_TABLE_BASE_REG + *num * PCIE_ATR_TLB_SET_OFFSET; in mtk_pcie_set_trans_table()
340 writel_relaxed(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(table_size) - 1), table); in mtk_pcie_set_trans_table()
355 dev_dbg(pcie->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n", in mtk_pcie_set_trans_table()
361 remaining -= table_size; in mtk_pcie_set_trans_table()
366 dev_warn(pcie->dev, "not enough translate table for addr: %#llx, limited to [%d]\n", in mtk_pcie_set_trans_table()
378 struct mtk_msi_set *msi_set = &pcie->msi_sets[i]; in mtk_pcie_enable_msi()
380 msi_set->base = pcie->base + PCIE_MSI_SET_BASE_REG + in mtk_pcie_enable_msi()
382 msi_set->msg_addr = pcie->reg_base + PCIE_MSI_SET_BASE_REG + in mtk_pcie_enable_msi()
386 writel_relaxed(lower_32_bits(msi_set->msg_addr), msi_set->base); in mtk_pcie_enable_msi()
387 writel_relaxed(upper_32_bits(msi_set->msg_addr), in mtk_pcie_enable_msi()
388 pcie->base + PCIE_MSI_SET_ADDR_HI_BASE + in mtk_pcie_enable_msi()
392 val = readl_relaxed(pcie->base + PCIE_MSI_SET_ENABLE_REG); in mtk_pcie_enable_msi()
394 writel_relaxed(val, pcie->base + PCIE_MSI_SET_ENABLE_REG); in mtk_pcie_enable_msi()
396 val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG); in mtk_pcie_enable_msi()
398 writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG); in mtk_pcie_enable_msi()
410 val = readl_relaxed(pcie->base + PCIE_SETTING_REG); in mtk_pcie_startup_port()
412 if (pcie->max_link_speed) { in mtk_pcie_startup_port()
416 if (pcie->max_link_speed >= 2) in mtk_pcie_startup_port()
418 GENMASK(pcie->max_link_speed - 2, 0)); in mtk_pcie_startup_port()
420 if (pcie->num_lanes) { in mtk_pcie_startup_port()
424 if (pcie->num_lanes > 1) in mtk_pcie_startup_port()
426 GENMASK(fls(pcie->num_lanes >> 2), 0)); in mtk_pcie_startup_port()
428 writel_relaxed(val, pcie->base + PCIE_SETTING_REG); in mtk_pcie_startup_port()
431 if (pcie->max_link_speed) { in mtk_pcie_startup_port()
432 val = readl_relaxed(pcie->base + PCIE_CONF_LINK2_CTL_STS); in mtk_pcie_startup_port()
434 val |= FIELD_PREP(PCIE_CONF_LINK2_LCR2_LINK_SPEED, pcie->max_link_speed); in mtk_pcie_startup_port()
435 writel_relaxed(val, pcie->base + PCIE_CONF_LINK2_CTL_STS); in mtk_pcie_startup_port()
439 val = readl_relaxed(pcie->base + PCIE_PCI_IDS_1); in mtk_pcie_startup_port()
442 writel_relaxed(val, pcie->base + PCIE_PCI_IDS_1); in mtk_pcie_startup_port()
445 val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG); in mtk_pcie_startup_port()
447 writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG); in mtk_pcie_startup_port()
450 val = readl_relaxed(pcie->base + PCIE_MISC_CTRL_REG); in mtk_pcie_startup_port()
452 writel_relaxed(val, pcie->base + PCIE_MISC_CTRL_REG); in mtk_pcie_startup_port()
461 if (!(pcie->soc->flags & SKIP_PCIE_RSTB)) { in mtk_pcie_startup_port()
463 val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG); in mtk_pcie_startup_port()
466 writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG); in mtk_pcie_startup_port()
471 * The deassertion of PERST# should be delayed 100ms (TPVPERL) in mtk_pcie_startup_port()
476 /* De-assert reset signals */ in mtk_pcie_startup_port()
479 writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG); in mtk_pcie_startup_port()
483 err = readl_poll_timeout(pcie->base + PCIE_LINK_STATUS_REG, val, in mtk_pcie_startup_port()
490 val = readl_relaxed(pcie->base + PCIE_LTSSM_STATUS_REG); in mtk_pcie_startup_port()
494 dev_err(pcie->dev, in mtk_pcie_startup_port()
503 resource_list_for_each_entry(entry, &host->windows) { in mtk_pcie_startup_port()
504 struct resource *res = entry->res; in mtk_pcie_startup_port()
511 cpu_addr = pci_pio_to_address(res->start); in mtk_pcie_startup_port()
513 cpu_addr = res->start; in mtk_pcie_startup_port()
517 pci_addr = res->start - entry->offset; in mtk_pcie_startup_port()
557 struct mtk_gen3_pcie *pcie = data->domain->host_data; in mtk_compose_msi_msg()
560 hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET; in mtk_compose_msi_msg()
562 msg->address_hi = upper_32_bits(msi_set->msg_addr); in mtk_compose_msi_msg()
563 msg->address_lo = lower_32_bits(msi_set->msg_addr); in mtk_compose_msi_msg()
564 msg->data = hwirq; in mtk_compose_msi_msg()
565 dev_dbg(pcie->dev, "msi#%#lx address_hi %#x address_lo %#x data %d\n", in mtk_compose_msi_msg()
566 hwirq, msg->address_hi, msg->address_lo, msg->data); in mtk_compose_msi_msg()
574 hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET; in mtk_msi_bottom_irq_ack()
576 writel_relaxed(BIT(hwirq), msi_set->base + PCIE_MSI_SET_STATUS_OFFSET); in mtk_msi_bottom_irq_ack()
582 struct mtk_gen3_pcie *pcie = data->domain->host_data; in mtk_msi_bottom_irq_mask()
586 hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET; in mtk_msi_bottom_irq_mask()
588 raw_spin_lock_irqsave(&pcie->irq_lock, flags); in mtk_msi_bottom_irq_mask()
589 val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET); in mtk_msi_bottom_irq_mask()
591 writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET); in mtk_msi_bottom_irq_mask()
592 raw_spin_unlock_irqrestore(&pcie->irq_lock, flags); in mtk_msi_bottom_irq_mask()
598 struct mtk_gen3_pcie *pcie = data->domain->host_data; in mtk_msi_bottom_irq_unmask()
602 hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET; in mtk_msi_bottom_irq_unmask()
604 raw_spin_lock_irqsave(&pcie->irq_lock, flags); in mtk_msi_bottom_irq_unmask()
605 val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET); in mtk_msi_bottom_irq_unmask()
607 writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET); in mtk_msi_bottom_irq_unmask()
608 raw_spin_unlock_irqrestore(&pcie->irq_lock, flags); in mtk_msi_bottom_irq_unmask()
623 struct mtk_gen3_pcie *pcie = domain->host_data; in mtk_msi_bottom_domain_alloc()
627 mutex_lock(&pcie->lock); in mtk_msi_bottom_domain_alloc()
629 hwirq = bitmap_find_free_region(pcie->msi_irq_in_use, PCIE_MSI_IRQS_NUM, in mtk_msi_bottom_domain_alloc()
632 mutex_unlock(&pcie->lock); in mtk_msi_bottom_domain_alloc()
635 return -ENOSPC; in mtk_msi_bottom_domain_alloc()
638 msi_set = &pcie->msi_sets[set_idx]; in mtk_msi_bottom_domain_alloc()
651 struct mtk_gen3_pcie *pcie = domain->host_data; in mtk_msi_bottom_domain_free()
654 mutex_lock(&pcie->lock); in mtk_msi_bottom_domain_free()
656 bitmap_release_region(pcie->msi_irq_in_use, data->hwirq, in mtk_msi_bottom_domain_free()
659 mutex_unlock(&pcie->lock); in mtk_msi_bottom_domain_free()
675 raw_spin_lock_irqsave(&pcie->irq_lock, flags); in mtk_intx_mask()
676 val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG); in mtk_intx_mask()
677 val &= ~BIT(data->hwirq + PCIE_INTX_SHIFT); in mtk_intx_mask()
678 writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG); in mtk_intx_mask()
679 raw_spin_unlock_irqrestore(&pcie->irq_lock, flags); in mtk_intx_mask()
688 raw_spin_lock_irqsave(&pcie->irq_lock, flags); in mtk_intx_unmask()
689 val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG); in mtk_intx_unmask()
690 val |= BIT(data->hwirq + PCIE_INTX_SHIFT); in mtk_intx_unmask()
691 writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG); in mtk_intx_unmask()
692 raw_spin_unlock_irqrestore(&pcie->irq_lock, flags); in mtk_intx_unmask()
696 * mtk_intx_eoi() - Clear INTx IRQ status at the end of interrupt
700 * until the corresponding de-assert message is received; hence that
708 hwirq = data->hwirq + PCIE_INTX_SHIFT; in mtk_intx_eoi()
709 writel_relaxed(BIT(hwirq), pcie->base + PCIE_INT_STATUS_REG); in mtk_intx_eoi()
722 irq_set_chip_data(irq, domain->host_data); in mtk_pcie_intx_map()
734 struct device *dev = pcie->dev; in mtk_pcie_init_irq_domains()
735 struct device_node *intc_node, *node = dev->of_node; in mtk_pcie_init_irq_domains()
738 raw_spin_lock_init(&pcie->irq_lock); in mtk_pcie_init_irq_domains()
741 intc_node = of_get_child_by_name(node, "interrupt-controller"); in mtk_pcie_init_irq_domains()
743 dev_err(dev, "missing interrupt-controller node\n"); in mtk_pcie_init_irq_domains()
744 return -ENODEV; in mtk_pcie_init_irq_domains()
747 pcie->intx_domain = irq_domain_add_linear(intc_node, PCI_NUM_INTX, in mtk_pcie_init_irq_domains()
749 if (!pcie->intx_domain) { in mtk_pcie_init_irq_domains()
751 ret = -ENODEV; in mtk_pcie_init_irq_domains()
756 mutex_init(&pcie->lock); in mtk_pcie_init_irq_domains()
758 pcie->msi_bottom_domain = irq_domain_add_linear(node, PCIE_MSI_IRQS_NUM, in mtk_pcie_init_irq_domains()
760 if (!pcie->msi_bottom_domain) { in mtk_pcie_init_irq_domains()
762 ret = -ENODEV; in mtk_pcie_init_irq_domains()
766 pcie->msi_domain = pci_msi_create_irq_domain(dev->fwnode, in mtk_pcie_init_irq_domains()
768 pcie->msi_bottom_domain); in mtk_pcie_init_irq_domains()
769 if (!pcie->msi_domain) { in mtk_pcie_init_irq_domains()
771 ret = -ENODEV; in mtk_pcie_init_irq_domains()
779 irq_domain_remove(pcie->msi_bottom_domain); in mtk_pcie_init_irq_domains()
781 irq_domain_remove(pcie->intx_domain); in mtk_pcie_init_irq_domains()
789 irq_set_chained_handler_and_data(pcie->irq, NULL, NULL); in mtk_pcie_irq_teardown()
791 if (pcie->intx_domain) in mtk_pcie_irq_teardown()
792 irq_domain_remove(pcie->intx_domain); in mtk_pcie_irq_teardown()
794 if (pcie->msi_domain) in mtk_pcie_irq_teardown()
795 irq_domain_remove(pcie->msi_domain); in mtk_pcie_irq_teardown()
797 if (pcie->msi_bottom_domain) in mtk_pcie_irq_teardown()
798 irq_domain_remove(pcie->msi_bottom_domain); in mtk_pcie_irq_teardown()
800 irq_dispose_mapping(pcie->irq); in mtk_pcie_irq_teardown()
805 struct mtk_msi_set *msi_set = &pcie->msi_sets[set_idx]; in mtk_pcie_msi_handler()
809 msi_enable = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET); in mtk_pcie_msi_handler()
812 msi_status = readl_relaxed(msi_set->base + in mtk_pcie_msi_handler()
820 generic_handle_domain_irq(pcie->msi_bottom_domain, hwirq); in mtk_pcie_msi_handler()
834 status = readl_relaxed(pcie->base + PCIE_INT_STATUS_REG); in mtk_pcie_irq_handler()
837 generic_handle_domain_irq(pcie->intx_domain, in mtk_pcie_irq_handler()
838 irq_bit - PCIE_INTX_SHIFT); in mtk_pcie_irq_handler()
843 mtk_pcie_msi_handler(pcie, irq_bit - PCIE_MSI_SHIFT); in mtk_pcie_irq_handler()
845 writel_relaxed(BIT(irq_bit), pcie->base + PCIE_INT_STATUS_REG); in mtk_pcie_irq_handler()
853 struct device *dev = pcie->dev; in mtk_pcie_setup_irq()
861 pcie->irq = platform_get_irq(pdev, 0); in mtk_pcie_setup_irq()
862 if (pcie->irq < 0) in mtk_pcie_setup_irq()
863 return pcie->irq; in mtk_pcie_setup_irq()
865 irq_set_chained_handler_and_data(pcie->irq, mtk_pcie_irq_handler, pcie); in mtk_pcie_setup_irq()
872 int i, ret, num_resets = pcie->soc->phy_resets.num_resets; in mtk_pcie_parse_port()
873 struct device *dev = pcie->dev; in mtk_pcie_parse_port()
875 struct resource *regs; in mtk_pcie_parse_port() local
878 regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-mac"); in mtk_pcie_parse_port()
879 if (!regs) in mtk_pcie_parse_port()
880 return -EINVAL; in mtk_pcie_parse_port()
881 pcie->base = devm_ioremap_resource(dev, regs); in mtk_pcie_parse_port()
882 if (IS_ERR(pcie->base)) { in mtk_pcie_parse_port()
884 return PTR_ERR(pcie->base); in mtk_pcie_parse_port()
887 pcie->reg_base = regs->start; in mtk_pcie_parse_port()
890 pcie->phy_resets[i].id = pcie->soc->phy_resets.id[i]; in mtk_pcie_parse_port()
892 ret = devm_reset_control_bulk_get_optional_shared(dev, num_resets, pcie->phy_resets); in mtk_pcie_parse_port()
898 pcie->mac_reset = devm_reset_control_get_optional_exclusive(dev, "mac"); in mtk_pcie_parse_port()
899 if (IS_ERR(pcie->mac_reset)) { in mtk_pcie_parse_port()
900 ret = PTR_ERR(pcie->mac_reset); in mtk_pcie_parse_port()
901 if (ret != -EPROBE_DEFER) in mtk_pcie_parse_port()
907 pcie->phy = devm_phy_optional_get(dev, "pcie-phy"); in mtk_pcie_parse_port()
908 if (IS_ERR(pcie->phy)) { in mtk_pcie_parse_port()
909 ret = PTR_ERR(pcie->phy); in mtk_pcie_parse_port()
910 if (ret != -EPROBE_DEFER) in mtk_pcie_parse_port()
916 pcie->num_clks = devm_clk_bulk_get_all(dev, &pcie->clks); in mtk_pcie_parse_port()
917 if (pcie->num_clks < 0) { in mtk_pcie_parse_port()
919 return pcie->num_clks; in mtk_pcie_parse_port()
922 ret = of_property_read_u32(dev->of_node, "num-lanes", &num_lanes); in mtk_pcie_parse_port()
925 dev_warn(dev, "invalid num-lanes, using controller defaults\n"); in mtk_pcie_parse_port()
927 pcie->num_lanes = num_lanes; in mtk_pcie_parse_port()
936 struct device *dev = pcie->dev; in mtk_pcie_en7581_power_up()
947 reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, in mtk_pcie_en7581_power_up()
948 pcie->phy_resets); in mtk_pcie_en7581_power_up()
949 reset_control_assert(pcie->mac_reset); in mtk_pcie_en7581_power_up()
958 pbus_regmap = syscon_regmap_lookup_by_phandle_args(dev->of_node, in mtk_pcie_en7581_power_up()
959 "mediatek,pbus-csr", in mtk_pcie_en7581_power_up()
965 entry = resource_list_first_type(&host->windows, IORESOURCE_MEM); in mtk_pcie_en7581_power_up()
967 return -ENODEV; in mtk_pcie_en7581_power_up()
969 addr = entry->res->start - entry->offset; in mtk_pcie_en7581_power_up()
971 size = lower_32_bits(resource_size(entry->res)); in mtk_pcie_en7581_power_up()
976 * requires PHY initialization and power-on before PHY reset deassert. in mtk_pcie_en7581_power_up()
978 err = phy_init(pcie->phy); in mtk_pcie_en7581_power_up()
984 err = phy_power_on(pcie->phy); in mtk_pcie_en7581_power_up()
990 err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets); in mtk_pcie_en7581_power_up()
997 * Wait for the time needed to complete the bulk de-assert above. in mtk_pcie_en7581_power_up()
1009 writel_relaxed(val, pcie->base + PCIE_EQ_PRESET_01_REG); in mtk_pcie_en7581_power_up()
1015 writel_relaxed(val, pcie->base + PCIE_PIPE4_PIE8_REG); in mtk_pcie_en7581_power_up()
1017 err = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks); in mtk_pcie_en7581_power_up()
1035 reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets); in mtk_pcie_en7581_power_up()
1037 phy_power_off(pcie->phy); in mtk_pcie_en7581_power_up()
1039 phy_exit(pcie->phy); in mtk_pcie_en7581_power_up()
1046 struct device *dev = pcie->dev; in mtk_pcie_power_up()
1053 reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, in mtk_pcie_power_up()
1054 pcie->phy_resets); in mtk_pcie_power_up()
1055 reset_control_assert(pcie->mac_reset); in mtk_pcie_power_up()
1059 err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets); in mtk_pcie_power_up()
1065 err = phy_init(pcie->phy); in mtk_pcie_power_up()
1071 err = phy_power_on(pcie->phy); in mtk_pcie_power_up()
1078 reset_control_deassert(pcie->mac_reset); in mtk_pcie_power_up()
1083 err = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks); in mtk_pcie_power_up()
1094 reset_control_assert(pcie->mac_reset); in mtk_pcie_power_up()
1095 phy_power_off(pcie->phy); in mtk_pcie_power_up()
1097 phy_exit(pcie->phy); in mtk_pcie_power_up()
1099 reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets); in mtk_pcie_power_up()
1106 clk_bulk_disable_unprepare(pcie->num_clks, pcie->clks); in mtk_pcie_power_down()
1108 pm_runtime_put_sync(pcie->dev); in mtk_pcie_power_down()
1109 pm_runtime_disable(pcie->dev); in mtk_pcie_power_down()
1110 reset_control_assert(pcie->mac_reset); in mtk_pcie_power_down()
1112 phy_power_off(pcie->phy); in mtk_pcie_power_down()
1113 phy_exit(pcie->phy); in mtk_pcie_power_down()
1114 reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets); in mtk_pcie_power_down()
1122 val = readl_relaxed(pcie->base + PCIE_BASE_CFG_REG); in mtk_pcie_get_controller_max_link_speed()
1126 return ret > 0 ? ret : -EINVAL; in mtk_pcie_get_controller_max_link_speed()
1141 reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets); in mtk_pcie_setup()
1144 err = pcie->soc->power_up(pcie); in mtk_pcie_setup()
1148 err = of_pci_get_max_link_speed(pcie->dev->of_node); in mtk_pcie_setup()
1155 pcie->max_link_speed = err; in mtk_pcie_setup()
1156 dev_info(pcie->dev, in mtk_pcie_setup()
1158 max_speed, pcie->max_link_speed); in mtk_pcie_setup()
1181 struct device *dev = &pdev->dev; in mtk_pcie_probe()
1188 return -ENOMEM; in mtk_pcie_probe()
1192 pcie->dev = dev; in mtk_pcie_probe()
1193 pcie->soc = device_get_match_data(dev); in mtk_pcie_probe()
1200 host->ops = &mtk_pcie_ops; in mtk_pcie_probe()
1201 host->sysdata = pcie; in mtk_pcie_probe()
1219 pci_stop_root_bus(host->bus); in mtk_pcie_remove()
1220 pci_remove_root_bus(host->bus); in mtk_pcie_remove()
1231 raw_spin_lock(&pcie->irq_lock); in mtk_pcie_irq_save()
1233 pcie->saved_irq_state = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG); in mtk_pcie_irq_save()
1236 struct mtk_msi_set *msi_set = &pcie->msi_sets[i]; in mtk_pcie_irq_save()
1238 msi_set->saved_irq_state = readl_relaxed(msi_set->base + in mtk_pcie_irq_save()
1242 raw_spin_unlock(&pcie->irq_lock); in mtk_pcie_irq_save()
1249 raw_spin_lock(&pcie->irq_lock); in mtk_pcie_irq_restore()
1251 writel_relaxed(pcie->saved_irq_state, pcie->base + PCIE_INT_ENABLE_REG); in mtk_pcie_irq_restore()
1254 struct mtk_msi_set *msi_set = &pcie->msi_sets[i]; in mtk_pcie_irq_restore()
1256 writel_relaxed(msi_set->saved_irq_state, in mtk_pcie_irq_restore()
1257 msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET); in mtk_pcie_irq_restore()
1260 raw_spin_unlock(&pcie->irq_lock); in mtk_pcie_irq_restore()
1267 val = readl_relaxed(pcie->base + PCIE_ICMD_PM_REG); in mtk_pcie_turn_off_link()
1269 writel_relaxed(val, pcie->base + PCIE_ICMD_PM_REG); in mtk_pcie_turn_off_link()
1272 return readl_poll_timeout(pcie->base + PCIE_LTSSM_STATUS_REG, val, in mtk_pcie_turn_off_link()
1287 dev_err(pcie->dev, "cannot enter L2 state\n"); in mtk_pcie_suspend_noirq()
1291 if (!(pcie->soc->flags & SKIP_PCIE_RSTB)) { in mtk_pcie_suspend_noirq()
1292 /* Assert the PERST# pin */ in mtk_pcie_suspend_noirq()
1293 val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG); in mtk_pcie_suspend_noirq()
1295 writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG); in mtk_pcie_suspend_noirq()
1298 dev_dbg(pcie->dev, "entered L2 states successfully"); in mtk_pcie_suspend_noirq()
1311 err = pcie->soc->power_up(pcie); in mtk_pcie_resume_noirq()
1342 .id[0] = "phy-lane0",
1343 .id[1] = "phy-lane1",
1344 .id[2] = "phy-lane2",
1351 { .compatible = "airoha,en7581-pcie", .data = &mtk_pcie_soc_en7581 },
1352 { .compatible = "mediatek,mt8192-pcie", .data = &mtk_pcie_soc_mt8192 },
1361 .name = "mtk-pcie-gen3",