Lines Matching +full:pcie +full:- +full:0

1 // SPDX-License-Identifier: GPL-2.0
3 * MediaTek PCIe host controller driver.
11 #include <linux/clk-provider.h>
33 #define PCIE_BASE_CFG_REG 0x14
36 #define PCIE_SETTING_REG 0x80
39 #define PCIE_PCI_IDS_1 0x9c
41 #define PCIE_RC_MODE BIT(0)
43 #define PCIE_EQ_PRESET_01_REG 0x100
44 #define PCIE_VAL_LN0_DOWNSTREAM GENMASK(6, 0)
49 #define PCIE_CFGNUM_REG 0x140
50 #define PCIE_CFG_DEVFN(devfn) ((devfn) & GENMASK(7, 0))
54 #define PCIE_CFG_OFFSET_ADDR 0x1000
58 #define PCIE_RST_CTRL_REG 0x148
59 #define PCIE_MAC_RSTB BIT(0)
64 #define PCIE_LTSSM_STATUS_REG 0x150
67 #define PCIE_LTSSM_STATE_L2_IDLE 0x14
69 #define PCIE_LINK_STATUS_REG 0x154
77 #define PCIE_INT_ENABLE_REG 0x180
78 #define PCIE_MSI_ENABLE GENMASK(PCIE_MSI_SET_NUM + 8 - 1, 8)
82 GENMASK(PCIE_INTX_SHIFT + PCI_NUM_INTX - 1, PCIE_INTX_SHIFT)
84 #define PCIE_INT_STATUS_REG 0x184
85 #define PCIE_MSI_SET_ENABLE_REG 0x190
86 #define PCIE_MSI_SET_ENABLE GENMASK(PCIE_MSI_SET_NUM - 1, 0)
88 #define PCIE_PIPE4_PIE8_REG 0x338
89 #define PCIE_K_FINETUNE_MAX GENMASK(5, 0)
96 #define PCIE_MSI_SET_BASE_REG 0xc00
97 #define PCIE_MSI_SET_OFFSET 0x10
98 #define PCIE_MSI_SET_STATUS_OFFSET 0x04
99 #define PCIE_MSI_SET_ENABLE_OFFSET 0x08
101 #define PCIE_MSI_SET_ADDR_HI_BASE 0xc80
102 #define PCIE_MSI_SET_ADDR_HI_OFFSET 0x04
104 #define PCIE_ICMD_PM_REG 0x198
107 #define PCIE_MISC_CTRL_REG 0x348
110 #define PCIE_TRANS_TABLE_BASE_REG 0x800
111 #define PCIE_ATR_SRC_ADDR_MSB_OFFSET 0x4
112 #define PCIE_ATR_TRSL_ADDR_LSB_OFFSET 0x8
113 #define PCIE_ATR_TRSL_ADDR_MSB_OFFSET 0xc
114 #define PCIE_ATR_TRSL_PARAM_OFFSET 0x10
115 #define PCIE_ATR_TLB_SET_OFFSET 0x20
118 #define PCIE_ATR_EN BIT(0)
120 (((((size) - 1) << 1) & GENMASK(6, 1)) | PCIE_ATR_EN)
121 #define PCIE_ATR_ID(id) ((id) & GENMASK(3, 0))
122 #define PCIE_ATR_TYPE_MEM PCIE_ATR_ID(0)
125 #define PCIE_ATR_TLP_TYPE_MEM PCIE_ATR_TLP_TYPE(0)
132 /* Time in ms needed to complete PCIe reset on EN7581 SoC */
137 #define PCIE_CONF_LINK2_CTL_STS (PCIE_CFG_OFFSET_ADDR + 0xb0)
138 #define PCIE_CONF_LINK2_LCR2_LINK_SPEED GENMASK(3, 0)
141 SKIP_PCIE_RSTB = BIT(0), /* Skip PERST# assertion during device
148 * struct mtk_gen3_pcie_pdata - differentiate between host generations
149 * @power_up: pcie power_up callback
151 * @flags: pcie device flags.
154 int (*power_up)(struct mtk_gen3_pcie *pcie);
163 * struct mtk_msi_set - MSI information for each set
175 * struct mtk_gen3_pcie - PCIe port information
176 * @dev: pointer to PCIe device
182 * @clks: PCIe clocks
183 * @num_clks: PCIe clocks count for this port
184 * @max_link_speed: Maximum link speed (PCIe Gen) for this port
185 * @num_lanes: Number of PCIe lanes for this port
186 * @irq: PCIe controller interrupt number
195 * @soc: pointer to SoC-dependent operations
224 "detect.quiet", /* 0x00 */
225 "detect.active", /* 0x01 */
226 "polling.active", /* 0x02 */
227 "polling.compliance", /* 0x03 */
228 "polling.configuration", /* 0x04 */
229 "config.linkwidthstart", /* 0x05 */
230 "config.linkwidthaccept", /* 0x06 */
231 "config.lanenumwait", /* 0x07 */
232 "config.lanenumaccept", /* 0x08 */
233 "config.complete", /* 0x09 */
234 "config.idle", /* 0x0A */
235 "recovery.receiverlock", /* 0x0B */
236 "recovery.equalization", /* 0x0C */
237 "recovery.speed", /* 0x0D */
238 "recovery.receiverconfig", /* 0x0E */
239 "recovery.idle", /* 0x0F */
240 "L0", /* 0x10 */
241 "L0s", /* 0x11 */
242 "L1.entry", /* 0x12 */
243 "L1.idle", /* 0x13 */
244 "L2.idle", /* 0x14 */
245 "L2.transmitwake", /* 0x15 */
246 "disable", /* 0x16 */
247 "loopback.entry", /* 0x17 */
248 "loopback.active", /* 0x18 */
249 "loopback.exit", /* 0x19 */
250 "hotreset", /* 0x1A */
254 * mtk_pcie_config_tlp_header() - Configure a configuration TLP header
265 struct mtk_gen3_pcie *pcie = bus->sysdata; in mtk_pcie_config_tlp_header() local
269 bytes = (GENMASK(size - 1, 0) & 0xf) << (where & 0x3); in mtk_pcie_config_tlp_header()
272 PCIE_CFG_HEADER(bus->number, devfn); in mtk_pcie_config_tlp_header()
274 writel_relaxed(val, pcie->base + PCIE_CFGNUM_REG); in mtk_pcie_config_tlp_header()
280 struct mtk_gen3_pcie *pcie = bus->sysdata; in mtk_pcie_map_bus() local
282 return pcie->base + PCIE_CFG_OFFSET_ADDR + where; in mtk_pcie_map_bus()
299 val <<= (where & 0x3) * 8; in mtk_pcie_config_write()
310 static int mtk_pcie_set_trans_table(struct mtk_gen3_pcie *pcie, in mtk_pcie_set_trans_table() argument
325 table_size = BIT(fls(remaining) - 1); in mtk_pcie_set_trans_table()
327 if (cpu_addr > 0) { in mtk_pcie_set_trans_table()
328 addr_align = BIT(ffs(cpu_addr) - 1); in mtk_pcie_set_trans_table()
333 if (table_size < 0x1000) { in mtk_pcie_set_trans_table()
334 dev_err(pcie->dev, "illegal table size %#llx\n", in mtk_pcie_set_trans_table()
336 return -EINVAL; in mtk_pcie_set_trans_table()
339 table = pcie->base + PCIE_TRANS_TABLE_BASE_REG + *num * PCIE_ATR_TLB_SET_OFFSET; in mtk_pcie_set_trans_table()
340 writel_relaxed(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(table_size) - 1), table); in mtk_pcie_set_trans_table()
355 dev_dbg(pcie->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n", in mtk_pcie_set_trans_table()
361 remaining -= table_size; in mtk_pcie_set_trans_table()
366 dev_warn(pcie->dev, "not enough translate table for addr: %#llx, limited to [%d]\n", in mtk_pcie_set_trans_table()
369 return 0; in mtk_pcie_set_trans_table()
372 static void mtk_pcie_enable_msi(struct mtk_gen3_pcie *pcie) in mtk_pcie_enable_msi() argument
377 for (i = 0; i < PCIE_MSI_SET_NUM; i++) { in mtk_pcie_enable_msi()
378 struct mtk_msi_set *msi_set = &pcie->msi_sets[i]; in mtk_pcie_enable_msi()
380 msi_set->base = pcie->base + PCIE_MSI_SET_BASE_REG + in mtk_pcie_enable_msi()
382 msi_set->msg_addr = pcie->reg_base + PCIE_MSI_SET_BASE_REG + in mtk_pcie_enable_msi()
386 writel_relaxed(lower_32_bits(msi_set->msg_addr), msi_set->base); in mtk_pcie_enable_msi()
387 writel_relaxed(upper_32_bits(msi_set->msg_addr), in mtk_pcie_enable_msi()
388 pcie->base + PCIE_MSI_SET_ADDR_HI_BASE + in mtk_pcie_enable_msi()
392 val = readl_relaxed(pcie->base + PCIE_MSI_SET_ENABLE_REG); in mtk_pcie_enable_msi()
394 writel_relaxed(val, pcie->base + PCIE_MSI_SET_ENABLE_REG); in mtk_pcie_enable_msi()
396 val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG); in mtk_pcie_enable_msi()
398 writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG); in mtk_pcie_enable_msi()
401 static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie) in mtk_pcie_startup_port() argument
404 struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); in mtk_pcie_startup_port()
405 unsigned int table_index = 0; in mtk_pcie_startup_port()
409 /* Set as RC mode and set controller PCIe Gen speed restriction, if any */ in mtk_pcie_startup_port()
410 val = readl_relaxed(pcie->base + PCIE_SETTING_REG); in mtk_pcie_startup_port()
412 if (pcie->max_link_speed) { in mtk_pcie_startup_port()
416 if (pcie->max_link_speed >= 2) in mtk_pcie_startup_port()
418 GENMASK(pcie->max_link_speed - 2, 0)); in mtk_pcie_startup_port()
420 if (pcie->num_lanes) { in mtk_pcie_startup_port()
424 if (pcie->num_lanes > 1) in mtk_pcie_startup_port()
426 GENMASK(fls(pcie->num_lanes >> 2), 0)); in mtk_pcie_startup_port()
428 writel_relaxed(val, pcie->base + PCIE_SETTING_REG); in mtk_pcie_startup_port()
431 if (pcie->max_link_speed) { in mtk_pcie_startup_port()
432 val = readl_relaxed(pcie->base + PCIE_CONF_LINK2_CTL_STS); in mtk_pcie_startup_port()
434 val |= FIELD_PREP(PCIE_CONF_LINK2_LCR2_LINK_SPEED, pcie->max_link_speed); in mtk_pcie_startup_port()
435 writel_relaxed(val, pcie->base + PCIE_CONF_LINK2_CTL_STS); in mtk_pcie_startup_port()
439 val = readl_relaxed(pcie->base + PCIE_PCI_IDS_1); in mtk_pcie_startup_port()
442 writel_relaxed(val, pcie->base + PCIE_PCI_IDS_1); in mtk_pcie_startup_port()
445 val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG); in mtk_pcie_startup_port()
447 writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG); in mtk_pcie_startup_port()
450 val = readl_relaxed(pcie->base + PCIE_MISC_CTRL_REG); in mtk_pcie_startup_port()
452 writel_relaxed(val, pcie->base + PCIE_MISC_CTRL_REG); in mtk_pcie_startup_port()
456 * causing occasional PCIe link down. In order to overcome the issue, in mtk_pcie_startup_port()
458 * PCIe block is reset using en7523_reset_assert() and in mtk_pcie_startup_port()
461 if (!(pcie->soc->flags & SKIP_PCIE_RSTB)) { in mtk_pcie_startup_port()
463 val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG); in mtk_pcie_startup_port()
466 writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG); in mtk_pcie_startup_port()
469 * Described in PCIe CEM specification revision 6.0. in mtk_pcie_startup_port()
476 /* De-assert reset signals */ in mtk_pcie_startup_port()
479 writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG); in mtk_pcie_startup_port()
483 err = readl_poll_timeout(pcie->base + PCIE_LINK_STATUS_REG, val, in mtk_pcie_startup_port()
490 val = readl_relaxed(pcie->base + PCIE_LTSSM_STATUS_REG); in mtk_pcie_startup_port()
494 dev_err(pcie->dev, in mtk_pcie_startup_port()
495 "PCIe link down, current LTSSM state: %s (%#x)\n", in mtk_pcie_startup_port()
500 mtk_pcie_enable_msi(pcie); in mtk_pcie_startup_port()
502 /* Set PCIe translation windows */ in mtk_pcie_startup_port()
503 resource_list_for_each_entry(entry, &host->windows) { in mtk_pcie_startup_port()
504 struct resource *res = entry->res; in mtk_pcie_startup_port()
511 cpu_addr = pci_pio_to_address(res->start); in mtk_pcie_startup_port()
513 cpu_addr = res->start; in mtk_pcie_startup_port()
517 pci_addr = res->start - entry->offset; in mtk_pcie_startup_port()
519 err = mtk_pcie_set_trans_table(pcie, cpu_addr, pci_addr, size, in mtk_pcie_startup_port()
525 return 0; in mtk_pcie_startup_port()
557 struct mtk_gen3_pcie *pcie = data->domain->host_data; in mtk_compose_msi_msg() local
560 hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET; in mtk_compose_msi_msg()
562 msg->address_hi = upper_32_bits(msi_set->msg_addr); in mtk_compose_msi_msg()
563 msg->address_lo = lower_32_bits(msi_set->msg_addr); in mtk_compose_msi_msg()
564 msg->data = hwirq; in mtk_compose_msi_msg()
565 dev_dbg(pcie->dev, "msi#%#lx address_hi %#x address_lo %#x data %d\n", in mtk_compose_msi_msg()
566 hwirq, msg->address_hi, msg->address_lo, msg->data); in mtk_compose_msi_msg()
574 hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET; in mtk_msi_bottom_irq_ack()
576 writel_relaxed(BIT(hwirq), msi_set->base + PCIE_MSI_SET_STATUS_OFFSET); in mtk_msi_bottom_irq_ack()
582 struct mtk_gen3_pcie *pcie = data->domain->host_data; in mtk_msi_bottom_irq_mask() local
586 hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET; in mtk_msi_bottom_irq_mask()
588 raw_spin_lock_irqsave(&pcie->irq_lock, flags); in mtk_msi_bottom_irq_mask()
589 val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET); in mtk_msi_bottom_irq_mask()
591 writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET); in mtk_msi_bottom_irq_mask()
592 raw_spin_unlock_irqrestore(&pcie->irq_lock, flags); in mtk_msi_bottom_irq_mask()
598 struct mtk_gen3_pcie *pcie = data->domain->host_data; in mtk_msi_bottom_irq_unmask() local
602 hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET; in mtk_msi_bottom_irq_unmask()
604 raw_spin_lock_irqsave(&pcie->irq_lock, flags); in mtk_msi_bottom_irq_unmask()
605 val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET); in mtk_msi_bottom_irq_unmask()
607 writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET); in mtk_msi_bottom_irq_unmask()
608 raw_spin_unlock_irqrestore(&pcie->irq_lock, flags); in mtk_msi_bottom_irq_unmask()
623 struct mtk_gen3_pcie *pcie = domain->host_data; in mtk_msi_bottom_domain_alloc() local
627 mutex_lock(&pcie->lock); in mtk_msi_bottom_domain_alloc()
629 hwirq = bitmap_find_free_region(pcie->msi_irq_in_use, PCIE_MSI_IRQS_NUM, in mtk_msi_bottom_domain_alloc()
632 mutex_unlock(&pcie->lock); in mtk_msi_bottom_domain_alloc()
634 if (hwirq < 0) in mtk_msi_bottom_domain_alloc()
635 return -ENOSPC; in mtk_msi_bottom_domain_alloc()
638 msi_set = &pcie->msi_sets[set_idx]; in mtk_msi_bottom_domain_alloc()
640 for (i = 0; i < nr_irqs; i++) in mtk_msi_bottom_domain_alloc()
645 return 0; in mtk_msi_bottom_domain_alloc()
651 struct mtk_gen3_pcie *pcie = domain->host_data; in mtk_msi_bottom_domain_free() local
654 mutex_lock(&pcie->lock); in mtk_msi_bottom_domain_free()
656 bitmap_release_region(pcie->msi_irq_in_use, data->hwirq, in mtk_msi_bottom_domain_free()
659 mutex_unlock(&pcie->lock); in mtk_msi_bottom_domain_free()
671 struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data); in mtk_intx_mask() local
675 raw_spin_lock_irqsave(&pcie->irq_lock, flags); in mtk_intx_mask()
676 val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG); in mtk_intx_mask()
677 val &= ~BIT(data->hwirq + PCIE_INTX_SHIFT); in mtk_intx_mask()
678 writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG); in mtk_intx_mask()
679 raw_spin_unlock_irqrestore(&pcie->irq_lock, flags); in mtk_intx_mask()
684 struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data); in mtk_intx_unmask() local
688 raw_spin_lock_irqsave(&pcie->irq_lock, flags); in mtk_intx_unmask()
689 val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG); in mtk_intx_unmask()
690 val |= BIT(data->hwirq + PCIE_INTX_SHIFT); in mtk_intx_unmask()
691 writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG); in mtk_intx_unmask()
692 raw_spin_unlock_irqrestore(&pcie->irq_lock, flags); in mtk_intx_unmask()
696 * mtk_intx_eoi() - Clear INTx IRQ status at the end of interrupt
700 * until the corresponding de-assert message is received; hence that
705 struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data); in mtk_intx_eoi() local
708 hwirq = data->hwirq + PCIE_INTX_SHIFT; in mtk_intx_eoi()
709 writel_relaxed(BIT(hwirq), pcie->base + PCIE_INT_STATUS_REG); in mtk_intx_eoi()
722 irq_set_chip_data(irq, domain->host_data); in mtk_pcie_intx_map()
725 return 0; in mtk_pcie_intx_map()
732 static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie) in mtk_pcie_init_irq_domains() argument
734 struct device *dev = pcie->dev; in mtk_pcie_init_irq_domains()
735 struct device_node *intc_node, *node = dev->of_node; in mtk_pcie_init_irq_domains()
738 raw_spin_lock_init(&pcie->irq_lock); in mtk_pcie_init_irq_domains()
741 intc_node = of_get_child_by_name(node, "interrupt-controller"); in mtk_pcie_init_irq_domains()
743 dev_err(dev, "missing interrupt-controller node\n"); in mtk_pcie_init_irq_domains()
744 return -ENODEV; in mtk_pcie_init_irq_domains()
747 pcie->intx_domain = irq_domain_add_linear(intc_node, PCI_NUM_INTX, in mtk_pcie_init_irq_domains()
748 &intx_domain_ops, pcie); in mtk_pcie_init_irq_domains()
749 if (!pcie->intx_domain) { in mtk_pcie_init_irq_domains()
751 ret = -ENODEV; in mtk_pcie_init_irq_domains()
756 mutex_init(&pcie->lock); in mtk_pcie_init_irq_domains()
758 pcie->msi_bottom_domain = irq_domain_add_linear(node, PCIE_MSI_IRQS_NUM, in mtk_pcie_init_irq_domains()
759 &mtk_msi_bottom_domain_ops, pcie); in mtk_pcie_init_irq_domains()
760 if (!pcie->msi_bottom_domain) { in mtk_pcie_init_irq_domains()
762 ret = -ENODEV; in mtk_pcie_init_irq_domains()
766 pcie->msi_domain = pci_msi_create_irq_domain(dev->fwnode, in mtk_pcie_init_irq_domains()
768 pcie->msi_bottom_domain); in mtk_pcie_init_irq_domains()
769 if (!pcie->msi_domain) { in mtk_pcie_init_irq_domains()
771 ret = -ENODEV; in mtk_pcie_init_irq_domains()
776 return 0; in mtk_pcie_init_irq_domains()
779 irq_domain_remove(pcie->msi_bottom_domain); in mtk_pcie_init_irq_domains()
781 irq_domain_remove(pcie->intx_domain); in mtk_pcie_init_irq_domains()
787 static void mtk_pcie_irq_teardown(struct mtk_gen3_pcie *pcie) in mtk_pcie_irq_teardown() argument
789 irq_set_chained_handler_and_data(pcie->irq, NULL, NULL); in mtk_pcie_irq_teardown()
791 if (pcie->intx_domain) in mtk_pcie_irq_teardown()
792 irq_domain_remove(pcie->intx_domain); in mtk_pcie_irq_teardown()
794 if (pcie->msi_domain) in mtk_pcie_irq_teardown()
795 irq_domain_remove(pcie->msi_domain); in mtk_pcie_irq_teardown()
797 if (pcie->msi_bottom_domain) in mtk_pcie_irq_teardown()
798 irq_domain_remove(pcie->msi_bottom_domain); in mtk_pcie_irq_teardown()
800 irq_dispose_mapping(pcie->irq); in mtk_pcie_irq_teardown()
803 static void mtk_pcie_msi_handler(struct mtk_gen3_pcie *pcie, int set_idx) in mtk_pcie_msi_handler() argument
805 struct mtk_msi_set *msi_set = &pcie->msi_sets[set_idx]; in mtk_pcie_msi_handler()
809 msi_enable = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET); in mtk_pcie_msi_handler()
812 msi_status = readl_relaxed(msi_set->base + in mtk_pcie_msi_handler()
820 generic_handle_domain_irq(pcie->msi_bottom_domain, hwirq); in mtk_pcie_msi_handler()
827 struct mtk_gen3_pcie *pcie = irq_desc_get_handler_data(desc); in mtk_pcie_irq_handler() local
834 status = readl_relaxed(pcie->base + PCIE_INT_STATUS_REG); in mtk_pcie_irq_handler()
837 generic_handle_domain_irq(pcie->intx_domain, in mtk_pcie_irq_handler()
838 irq_bit - PCIE_INTX_SHIFT); in mtk_pcie_irq_handler()
843 mtk_pcie_msi_handler(pcie, irq_bit - PCIE_MSI_SHIFT); in mtk_pcie_irq_handler()
845 writel_relaxed(BIT(irq_bit), pcie->base + PCIE_INT_STATUS_REG); in mtk_pcie_irq_handler()
851 static int mtk_pcie_setup_irq(struct mtk_gen3_pcie *pcie) in mtk_pcie_setup_irq() argument
853 struct device *dev = pcie->dev; in mtk_pcie_setup_irq()
857 err = mtk_pcie_init_irq_domains(pcie); in mtk_pcie_setup_irq()
861 pcie->irq = platform_get_irq(pdev, 0); in mtk_pcie_setup_irq()
862 if (pcie->irq < 0) in mtk_pcie_setup_irq()
863 return pcie->irq; in mtk_pcie_setup_irq()
865 irq_set_chained_handler_and_data(pcie->irq, mtk_pcie_irq_handler, pcie); in mtk_pcie_setup_irq()
867 return 0; in mtk_pcie_setup_irq()
870 static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie) in mtk_pcie_parse_port() argument
872 int i, ret, num_resets = pcie->soc->phy_resets.num_resets; in mtk_pcie_parse_port()
873 struct device *dev = pcie->dev; in mtk_pcie_parse_port()
878 regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-mac"); in mtk_pcie_parse_port()
880 return -EINVAL; in mtk_pcie_parse_port()
881 pcie->base = devm_ioremap_resource(dev, regs); in mtk_pcie_parse_port()
882 if (IS_ERR(pcie->base)) { in mtk_pcie_parse_port()
884 return PTR_ERR(pcie->base); in mtk_pcie_parse_port()
887 pcie->reg_base = regs->start; in mtk_pcie_parse_port()
889 for (i = 0; i < num_resets; i++) in mtk_pcie_parse_port()
890 pcie->phy_resets[i].id = pcie->soc->phy_resets.id[i]; in mtk_pcie_parse_port()
892 ret = devm_reset_control_bulk_get_optional_shared(dev, num_resets, pcie->phy_resets); in mtk_pcie_parse_port()
898 pcie->mac_reset = devm_reset_control_get_optional_exclusive(dev, "mac"); in mtk_pcie_parse_port()
899 if (IS_ERR(pcie->mac_reset)) { in mtk_pcie_parse_port()
900 ret = PTR_ERR(pcie->mac_reset); in mtk_pcie_parse_port()
901 if (ret != -EPROBE_DEFER) in mtk_pcie_parse_port()
907 pcie->phy = devm_phy_optional_get(dev, "pcie-phy"); in mtk_pcie_parse_port()
908 if (IS_ERR(pcie->phy)) { in mtk_pcie_parse_port()
909 ret = PTR_ERR(pcie->phy); in mtk_pcie_parse_port()
910 if (ret != -EPROBE_DEFER) in mtk_pcie_parse_port()
916 pcie->num_clks = devm_clk_bulk_get_all(dev, &pcie->clks); in mtk_pcie_parse_port()
917 if (pcie->num_clks < 0) { in mtk_pcie_parse_port()
919 return pcie->num_clks; in mtk_pcie_parse_port()
922 ret = of_property_read_u32(dev->of_node, "num-lanes", &num_lanes); in mtk_pcie_parse_port()
923 if (ret == 0) { in mtk_pcie_parse_port()
924 if (num_lanes == 0 || num_lanes > 16 || (num_lanes != 1 && num_lanes % 2)) in mtk_pcie_parse_port()
925 dev_warn(dev, "invalid num-lanes, using controller defaults\n"); in mtk_pcie_parse_port()
927 pcie->num_lanes = num_lanes; in mtk_pcie_parse_port()
930 return 0; in mtk_pcie_parse_port()
933 static int mtk_pcie_en7581_power_up(struct mtk_gen3_pcie *pcie) in mtk_pcie_en7581_power_up() argument
935 struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); in mtk_pcie_en7581_power_up()
936 struct device *dev = pcie->dev; in mtk_pcie_en7581_power_up()
947 reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, in mtk_pcie_en7581_power_up()
948 pcie->phy_resets); in mtk_pcie_en7581_power_up()
949 reset_control_assert(pcie->mac_reset); in mtk_pcie_en7581_power_up()
956 * hw to detect if a given address is accessible on PCIe controller. in mtk_pcie_en7581_power_up()
958 pbus_regmap = syscon_regmap_lookup_by_phandle_args(dev->of_node, in mtk_pcie_en7581_power_up()
959 "mediatek,pbus-csr", in mtk_pcie_en7581_power_up()
965 entry = resource_list_first_type(&host->windows, IORESOURCE_MEM); in mtk_pcie_en7581_power_up()
967 return -ENODEV; in mtk_pcie_en7581_power_up()
969 addr = entry->res->start - entry->offset; in mtk_pcie_en7581_power_up()
970 regmap_write(pbus_regmap, args[0], lower_32_bits(addr)); in mtk_pcie_en7581_power_up()
971 size = lower_32_bits(resource_size(entry->res)); in mtk_pcie_en7581_power_up()
976 * requires PHY initialization and power-on before PHY reset deassert. in mtk_pcie_en7581_power_up()
978 err = phy_init(pcie->phy); in mtk_pcie_en7581_power_up()
984 err = phy_power_on(pcie->phy); in mtk_pcie_en7581_power_up()
990 err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets); in mtk_pcie_en7581_power_up()
997 * Wait for the time needed to complete the bulk de-assert above. in mtk_pcie_en7581_power_up()
1005 val = FIELD_PREP(PCIE_VAL_LN0_DOWNSTREAM, 0x47) | in mtk_pcie_en7581_power_up()
1006 FIELD_PREP(PCIE_VAL_LN1_DOWNSTREAM, 0x47) | in mtk_pcie_en7581_power_up()
1007 FIELD_PREP(PCIE_VAL_LN0_UPSTREAM, 0x41) | in mtk_pcie_en7581_power_up()
1008 FIELD_PREP(PCIE_VAL_LN1_UPSTREAM, 0x41); in mtk_pcie_en7581_power_up()
1009 writel_relaxed(val, pcie->base + PCIE_EQ_PRESET_01_REG); in mtk_pcie_en7581_power_up()
1012 FIELD_PREP(PCIE_K_PRESET_TO_USE_16G, 0x80) | in mtk_pcie_en7581_power_up()
1013 FIELD_PREP(PCIE_K_PRESET_TO_USE, 0x2) | in mtk_pcie_en7581_power_up()
1014 FIELD_PREP(PCIE_K_FINETUNE_MAX, 0xf); in mtk_pcie_en7581_power_up()
1015 writel_relaxed(val, pcie->base + PCIE_PIPE4_PIE8_REG); in mtk_pcie_en7581_power_up()
1017 err = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks); in mtk_pcie_en7581_power_up()
1024 * Airoha EN7581 performs PCIe reset via clk callbacks since it has a in mtk_pcie_en7581_power_up()
1026 * complete the PCIe reset. in mtk_pcie_en7581_power_up()
1030 return 0; in mtk_pcie_en7581_power_up()
1035 reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets); in mtk_pcie_en7581_power_up()
1037 phy_power_off(pcie->phy); in mtk_pcie_en7581_power_up()
1039 phy_exit(pcie->phy); in mtk_pcie_en7581_power_up()
1044 static int mtk_pcie_power_up(struct mtk_gen3_pcie *pcie) in mtk_pcie_power_up() argument
1046 struct device *dev = pcie->dev; in mtk_pcie_power_up()
1053 reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, in mtk_pcie_power_up()
1054 pcie->phy_resets); in mtk_pcie_power_up()
1055 reset_control_assert(pcie->mac_reset); in mtk_pcie_power_up()
1059 err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets); in mtk_pcie_power_up()
1065 err = phy_init(pcie->phy); in mtk_pcie_power_up()
1071 err = phy_power_on(pcie->phy); in mtk_pcie_power_up()
1078 reset_control_deassert(pcie->mac_reset); in mtk_pcie_power_up()
1083 err = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks); in mtk_pcie_power_up()
1089 return 0; in mtk_pcie_power_up()
1094 reset_control_assert(pcie->mac_reset); in mtk_pcie_power_up()
1095 phy_power_off(pcie->phy); in mtk_pcie_power_up()
1097 phy_exit(pcie->phy); in mtk_pcie_power_up()
1099 reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets); in mtk_pcie_power_up()
1104 static void mtk_pcie_power_down(struct mtk_gen3_pcie *pcie) in mtk_pcie_power_down() argument
1106 clk_bulk_disable_unprepare(pcie->num_clks, pcie->clks); in mtk_pcie_power_down()
1108 pm_runtime_put_sync(pcie->dev); in mtk_pcie_power_down()
1109 pm_runtime_disable(pcie->dev); in mtk_pcie_power_down()
1110 reset_control_assert(pcie->mac_reset); in mtk_pcie_power_down()
1112 phy_power_off(pcie->phy); in mtk_pcie_power_down()
1113 phy_exit(pcie->phy); in mtk_pcie_power_down()
1114 reset_control_bulk_assert(pcie->soc->phy_resets.num_resets, pcie->phy_resets); in mtk_pcie_power_down()
1117 static int mtk_pcie_get_controller_max_link_speed(struct mtk_gen3_pcie *pcie) in mtk_pcie_get_controller_max_link_speed() argument
1122 val = readl_relaxed(pcie->base + PCIE_BASE_CFG_REG); in mtk_pcie_get_controller_max_link_speed()
1126 return ret > 0 ? ret : -EINVAL; in mtk_pcie_get_controller_max_link_speed()
1129 static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie) in mtk_pcie_setup() argument
1133 err = mtk_pcie_parse_port(pcie); in mtk_pcie_setup()
1141 reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets, pcie->phy_resets); in mtk_pcie_setup()
1144 err = pcie->soc->power_up(pcie); in mtk_pcie_setup()
1148 err = of_pci_get_max_link_speed(pcie->dev->of_node); in mtk_pcie_setup()
1151 max_speed = mtk_pcie_get_controller_max_link_speed(pcie); in mtk_pcie_setup()
1154 if (max_speed >= 0 && max_speed <= err) { in mtk_pcie_setup()
1155 pcie->max_link_speed = err; in mtk_pcie_setup()
1156 dev_info(pcie->dev, in mtk_pcie_setup()
1158 max_speed, pcie->max_link_speed); in mtk_pcie_setup()
1163 err = mtk_pcie_startup_port(pcie); in mtk_pcie_setup()
1167 err = mtk_pcie_setup_irq(pcie); in mtk_pcie_setup()
1171 return 0; in mtk_pcie_setup()
1174 mtk_pcie_power_down(pcie); in mtk_pcie_setup()
1181 struct device *dev = &pdev->dev; in mtk_pcie_probe()
1182 struct mtk_gen3_pcie *pcie; in mtk_pcie_probe() local
1186 host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); in mtk_pcie_probe()
1188 return -ENOMEM; in mtk_pcie_probe()
1190 pcie = pci_host_bridge_priv(host); in mtk_pcie_probe()
1192 pcie->dev = dev; in mtk_pcie_probe()
1193 pcie->soc = device_get_match_data(dev); in mtk_pcie_probe()
1194 platform_set_drvdata(pdev, pcie); in mtk_pcie_probe()
1196 err = mtk_pcie_setup(pcie); in mtk_pcie_probe()
1200 host->ops = &mtk_pcie_ops; in mtk_pcie_probe()
1201 host->sysdata = pcie; in mtk_pcie_probe()
1205 mtk_pcie_irq_teardown(pcie); in mtk_pcie_probe()
1206 mtk_pcie_power_down(pcie); in mtk_pcie_probe()
1210 return 0; in mtk_pcie_probe()
1215 struct mtk_gen3_pcie *pcie = platform_get_drvdata(pdev); in mtk_pcie_remove() local
1216 struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); in mtk_pcie_remove()
1219 pci_stop_root_bus(host->bus); in mtk_pcie_remove()
1220 pci_remove_root_bus(host->bus); in mtk_pcie_remove()
1223 mtk_pcie_irq_teardown(pcie); in mtk_pcie_remove()
1224 mtk_pcie_power_down(pcie); in mtk_pcie_remove()
1227 static void mtk_pcie_irq_save(struct mtk_gen3_pcie *pcie) in mtk_pcie_irq_save() argument
1231 raw_spin_lock(&pcie->irq_lock); in mtk_pcie_irq_save()
1233 pcie->saved_irq_state = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG); in mtk_pcie_irq_save()
1235 for (i = 0; i < PCIE_MSI_SET_NUM; i++) { in mtk_pcie_irq_save()
1236 struct mtk_msi_set *msi_set = &pcie->msi_sets[i]; in mtk_pcie_irq_save()
1238 msi_set->saved_irq_state = readl_relaxed(msi_set->base + in mtk_pcie_irq_save()
1242 raw_spin_unlock(&pcie->irq_lock); in mtk_pcie_irq_save()
1245 static void mtk_pcie_irq_restore(struct mtk_gen3_pcie *pcie) in mtk_pcie_irq_restore() argument
1249 raw_spin_lock(&pcie->irq_lock); in mtk_pcie_irq_restore()
1251 writel_relaxed(pcie->saved_irq_state, pcie->base + PCIE_INT_ENABLE_REG); in mtk_pcie_irq_restore()
1253 for (i = 0; i < PCIE_MSI_SET_NUM; i++) { in mtk_pcie_irq_restore()
1254 struct mtk_msi_set *msi_set = &pcie->msi_sets[i]; in mtk_pcie_irq_restore()
1256 writel_relaxed(msi_set->saved_irq_state, in mtk_pcie_irq_restore()
1257 msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET); in mtk_pcie_irq_restore()
1260 raw_spin_unlock(&pcie->irq_lock); in mtk_pcie_irq_restore()
1263 static int mtk_pcie_turn_off_link(struct mtk_gen3_pcie *pcie) in mtk_pcie_turn_off_link() argument
1267 val = readl_relaxed(pcie->base + PCIE_ICMD_PM_REG); in mtk_pcie_turn_off_link()
1269 writel_relaxed(val, pcie->base + PCIE_ICMD_PM_REG); in mtk_pcie_turn_off_link()
1272 return readl_poll_timeout(pcie->base + PCIE_LTSSM_STATUS_REG, val, in mtk_pcie_turn_off_link()
1280 struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev); in mtk_pcie_suspend_noirq() local
1285 err = mtk_pcie_turn_off_link(pcie); in mtk_pcie_suspend_noirq()
1287 dev_err(pcie->dev, "cannot enter L2 state\n"); in mtk_pcie_suspend_noirq()
1291 if (!(pcie->soc->flags & SKIP_PCIE_RSTB)) { in mtk_pcie_suspend_noirq()
1293 val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG); in mtk_pcie_suspend_noirq()
1295 writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG); in mtk_pcie_suspend_noirq()
1298 dev_dbg(pcie->dev, "entered L2 states successfully"); in mtk_pcie_suspend_noirq()
1300 mtk_pcie_irq_save(pcie); in mtk_pcie_suspend_noirq()
1301 mtk_pcie_power_down(pcie); in mtk_pcie_suspend_noirq()
1303 return 0; in mtk_pcie_suspend_noirq()
1308 struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev); in mtk_pcie_resume_noirq() local
1311 err = pcie->soc->power_up(pcie); in mtk_pcie_resume_noirq()
1315 err = mtk_pcie_startup_port(pcie); in mtk_pcie_resume_noirq()
1317 mtk_pcie_power_down(pcie); in mtk_pcie_resume_noirq()
1321 mtk_pcie_irq_restore(pcie); in mtk_pcie_resume_noirq()
1323 return 0; in mtk_pcie_resume_noirq()
1334 .id[0] = "phy",
1342 .id[0] = "phy-lane0",
1343 .id[1] = "phy-lane1",
1344 .id[2] = "phy-lane2",
1351 { .compatible = "airoha,en7581-pcie", .data = &mtk_pcie_soc_en7581 },
1352 { .compatible = "mediatek,mt8192-pcie", .data = &mtk_pcie_soc_mt8192 },
1361 .name = "mtk-pcie-gen3",
1369 MODULE_DESCRIPTION("MediaTek Gen3 PCIe host controller driver");