Lines Matching +full:cap +full:- +full:get
1 // SPDX-License-Identifier: GPL-2.0
4 // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
10 #include <linux/pci-epc.h>
14 #include "pcie-cadence.h"
22 u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET; in cdns_pcie_get_fn_from_vfn() local
28 first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_SRIOV_VF_OFFSET); in cdns_pcie_get_fn_from_vfn()
29 stride = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_SRIOV_VF_STRIDE); in cdns_pcie_get_fn_from_vfn()
30 fn = fn + first_vf_offset + ((vfn - 1) * stride); in cdns_pcie_get_fn_from_vfn()
39 u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET; in cdns_pcie_ep_write_header() local
40 struct cdns_pcie *pcie = &ep->pcie; in cdns_pcie_ep_write_header()
44 dev_err(&epc->dev, "Only Virtual Function #1 has deviceID\n"); in cdns_pcie_ep_write_header()
45 return -EINVAL; in cdns_pcie_ep_write_header()
47 reg = cap + PCI_SRIOV_VF_DID; in cdns_pcie_ep_write_header()
48 cdns_pcie_ep_fn_writew(pcie, fn, reg, hdr->deviceid); in cdns_pcie_ep_write_header()
52 cdns_pcie_ep_fn_writew(pcie, fn, PCI_DEVICE_ID, hdr->deviceid); in cdns_pcie_ep_write_header()
53 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_REVISION_ID, hdr->revid); in cdns_pcie_ep_write_header()
54 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CLASS_PROG, hdr->progif_code); in cdns_pcie_ep_write_header()
56 hdr->subclass_code | hdr->baseclass_code << 8); in cdns_pcie_ep_write_header()
58 hdr->cache_line_size); in cdns_pcie_ep_write_header()
59 cdns_pcie_ep_fn_writew(pcie, fn, PCI_SUBSYSTEM_ID, hdr->subsys_id); in cdns_pcie_ep_write_header()
60 cdns_pcie_ep_fn_writeb(pcie, fn, PCI_INTERRUPT_PIN, hdr->interrupt_pin); in cdns_pcie_ep_write_header()
68 u32 id = CDNS_PCIE_LM_ID_VENDOR(hdr->vendorid) | in cdns_pcie_ep_write_header()
69 CDNS_PCIE_LM_ID_SUBSYS(hdr->subsys_vendor_id); in cdns_pcie_ep_write_header()
81 struct cdns_pcie_epf *epf = &ep->epf[fn]; in cdns_pcie_ep_set_bar()
82 struct cdns_pcie *pcie = &ep->pcie; in cdns_pcie_ep_set_bar()
83 dma_addr_t bar_phys = epf_bar->phys_addr; in cdns_pcie_ep_set_bar()
84 enum pci_barno bar = epf_bar->barno; in cdns_pcie_ep_set_bar()
85 int flags = epf_bar->flags; in cdns_pcie_ep_set_bar()
90 sz = max_t(size_t, epf_bar->size, CDNS_PCIE_EP_MIN_APERTURE); in cdns_pcie_ep_set_bar()
95 sz = 1ULL << fls64(sz - 1); in cdns_pcie_ep_set_bar()
96 aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */ in cdns_pcie_ep_set_bar()
105 return -EINVAL; in cdns_pcie_ep_set_bar()
124 b = (bar < BAR_4) ? bar : bar - BAR_4; in cdns_pcie_ep_set_bar()
142 epf = &epf->epf[vfn - 1]; in cdns_pcie_ep_set_bar()
143 epf->epf_bar[bar] = epf_bar; in cdns_pcie_ep_set_bar()
152 struct cdns_pcie_epf *epf = &ep->epf[fn]; in cdns_pcie_ep_clear_bar()
153 struct cdns_pcie *pcie = &ep->pcie; in cdns_pcie_ep_clear_bar()
154 enum pci_barno bar = epf_bar->barno; in cdns_pcie_ep_clear_bar()
161 b = (bar < BAR_4) ? bar : bar - BAR_4; in cdns_pcie_ep_clear_bar()
177 epf = &epf->epf[vfn - 1]; in cdns_pcie_ep_clear_bar()
178 epf->epf_bar[bar] = NULL; in cdns_pcie_ep_clear_bar()
185 struct cdns_pcie *pcie = &ep->pcie; in cdns_pcie_ep_map_addr()
188 r = find_first_zero_bit(&ep->ob_region_map, BITS_PER_LONG); in cdns_pcie_ep_map_addr()
189 if (r >= ep->max_regions - 1) { in cdns_pcie_ep_map_addr()
190 dev_err(&epc->dev, "no free outbound region\n"); in cdns_pcie_ep_map_addr()
191 return -EINVAL; in cdns_pcie_ep_map_addr()
197 set_bit(r, &ep->ob_region_map); in cdns_pcie_ep_map_addr()
198 ep->ob_addr[r] = addr; in cdns_pcie_ep_map_addr()
207 struct cdns_pcie *pcie = &ep->pcie; in cdns_pcie_ep_unmap_addr()
210 for (r = 0; r < ep->max_regions - 1; r++) in cdns_pcie_ep_unmap_addr()
211 if (ep->ob_addr[r] == addr) in cdns_pcie_ep_unmap_addr()
214 if (r == ep->max_regions - 1) in cdns_pcie_ep_unmap_addr()
219 ep->ob_addr[r] = 0; in cdns_pcie_ep_unmap_addr()
220 clear_bit(r, &ep->ob_region_map); in cdns_pcie_ep_unmap_addr()
226 struct cdns_pcie *pcie = &ep->pcie; in cdns_pcie_ep_set_msi()
227 u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; in cdns_pcie_ep_set_msi() local
236 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); in cdns_pcie_ep_set_msi()
240 cdns_pcie_ep_fn_writew(pcie, fn, cap + PCI_MSI_FLAGS, flags); in cdns_pcie_ep_set_msi()
248 struct cdns_pcie *pcie = &ep->pcie; in cdns_pcie_ep_get_msi()
249 u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; in cdns_pcie_ep_get_msi() local
255 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); in cdns_pcie_ep_get_msi()
257 return -EINVAL; in cdns_pcie_ep_get_msi()
260 * Get the Multiple Message Enable bitfield from the Message Control in cdns_pcie_ep_get_msi()
271 struct cdns_pcie *pcie = &ep->pcie; in cdns_pcie_ep_get_msix()
272 u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET; in cdns_pcie_ep_get_msix() local
277 reg = cap + PCI_MSIX_FLAGS; in cdns_pcie_ep_get_msix()
280 return -EINVAL; in cdns_pcie_ep_get_msix()
292 struct cdns_pcie *pcie = &ep->pcie; in cdns_pcie_ep_set_msix()
293 u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET; in cdns_pcie_ep_set_msix() local
298 reg = cap + PCI_MSIX_FLAGS; in cdns_pcie_ep_set_msix()
305 reg = cap + PCI_MSIX_TABLE; in cdns_pcie_ep_set_msix()
310 reg = cap + PCI_MSIX_PBA; in cdns_pcie_ep_set_msix()
320 struct cdns_pcie *pcie = &ep->pcie; in cdns_pcie_ep_assert_intx()
329 if (unlikely(ep->irq_pci_addr != CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY || in cdns_pcie_ep_assert_intx()
330 ep->irq_pci_fn != fn)) { in cdns_pcie_ep_assert_intx()
333 ep->irq_phys_addr); in cdns_pcie_ep_assert_intx()
334 ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY; in cdns_pcie_ep_assert_intx()
335 ep->irq_pci_fn = fn; in cdns_pcie_ep_assert_intx()
339 ep->irq_pending |= BIT(intx); in cdns_pcie_ep_assert_intx()
342 ep->irq_pending &= ~BIT(intx); in cdns_pcie_ep_assert_intx()
346 spin_lock_irqsave(&ep->lock, flags); in cdns_pcie_ep_assert_intx()
348 if (((status & PCI_STATUS_INTERRUPT) != 0) ^ (ep->irq_pending != 0)) { in cdns_pcie_ep_assert_intx()
352 spin_unlock_irqrestore(&ep->lock, flags); in cdns_pcie_ep_assert_intx()
356 writel(0, ep->irq_cpu_addr + offset); in cdns_pcie_ep_assert_intx()
364 cmd = cdns_pcie_ep_fn_readw(&ep->pcie, fn, PCI_COMMAND); in cdns_pcie_ep_send_intx_irq()
366 return -EINVAL; in cdns_pcie_ep_send_intx_irq()
380 struct cdns_pcie *pcie = &ep->pcie; in cdns_pcie_ep_send_msi_irq()
381 u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; in cdns_pcie_ep_send_msi_irq() local
389 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); in cdns_pcie_ep_send_msi_irq()
391 return -EINVAL; in cdns_pcie_ep_send_msi_irq()
393 /* Get the number of enabled MSIs */ in cdns_pcie_ep_send_msi_irq()
397 return -EINVAL; in cdns_pcie_ep_send_msi_irq()
400 data_mask = msi_count - 1; in cdns_pcie_ep_send_msi_irq()
401 data = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_DATA_64); in cdns_pcie_ep_send_msi_irq()
402 data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask); in cdns_pcie_ep_send_msi_irq()
404 /* Get the PCI address where to write the data into. */ in cdns_pcie_ep_send_msi_irq()
405 pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_HI); in cdns_pcie_ep_send_msi_irq()
407 pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_LO); in cdns_pcie_ep_send_msi_irq()
411 if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) || in cdns_pcie_ep_send_msi_irq()
412 ep->irq_pci_fn != fn)) { in cdns_pcie_ep_send_msi_irq()
416 ep->irq_phys_addr, in cdns_pcie_ep_send_msi_irq()
419 ep->irq_pci_addr = (pci_addr & ~pci_addr_mask); in cdns_pcie_ep_send_msi_irq()
420 ep->irq_pci_fn = fn; in cdns_pcie_ep_send_msi_irq()
422 writel(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask)); in cdns_pcie_ep_send_msi_irq()
433 u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; in cdns_pcie_ep_map_msi_irq() local
434 struct cdns_pcie *pcie = &ep->pcie; in cdns_pcie_ep_map_msi_irq()
444 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); in cdns_pcie_ep_map_msi_irq()
446 return -EINVAL; in cdns_pcie_ep_map_msi_irq()
448 /* Get the number of enabled MSIs */ in cdns_pcie_ep_map_msi_irq()
452 return -EINVAL; in cdns_pcie_ep_map_msi_irq()
455 data_mask = msi_count - 1; in cdns_pcie_ep_map_msi_irq()
456 data = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_DATA_64); in cdns_pcie_ep_map_msi_irq()
459 /* Get the PCI address where to write the data into. */ in cdns_pcie_ep_map_msi_irq()
460 pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_HI); in cdns_pcie_ep_map_msi_irq()
462 pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_LO); in cdns_pcie_ep_map_msi_irq()
483 u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET; in cdns_pcie_ep_send_msix_irq() local
485 struct cdns_pcie *pcie = &ep->pcie; in cdns_pcie_ep_send_msix_irq()
493 epf = &ep->epf[fn]; in cdns_pcie_ep_send_msix_irq()
495 epf = &epf->epf[vfn - 1]; in cdns_pcie_ep_send_msix_irq()
499 /* Check whether the MSI-X feature has been enabled by the PCI host. */ in cdns_pcie_ep_send_msix_irq()
500 flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSIX_FLAGS); in cdns_pcie_ep_send_msix_irq()
502 return -EINVAL; in cdns_pcie_ep_send_msix_irq()
504 reg = cap + PCI_MSIX_TABLE; in cdns_pcie_ep_send_msix_irq()
509 msix_tbl = epf->epf_bar[bir]->addr + tbl_offset; in cdns_pcie_ep_send_msix_irq()
510 msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr; in cdns_pcie_ep_send_msix_irq()
511 msg_data = msix_tbl[(interrupt_num - 1)].msg_data; in cdns_pcie_ep_send_msix_irq()
514 if (ep->irq_pci_addr != (msg_addr & ~pci_addr_mask) || in cdns_pcie_ep_send_msix_irq()
515 ep->irq_pci_fn != fn) { in cdns_pcie_ep_send_msix_irq()
519 ep->irq_phys_addr, in cdns_pcie_ep_send_msix_irq()
522 ep->irq_pci_addr = (msg_addr & ~pci_addr_mask); in cdns_pcie_ep_send_msix_irq()
523 ep->irq_pci_fn = fn; in cdns_pcie_ep_send_msix_irq()
525 writel(msg_data, ep->irq_cpu_addr + (msg_addr & pci_addr_mask)); in cdns_pcie_ep_send_msix_irq()
534 struct cdns_pcie *pcie = &ep->pcie; in cdns_pcie_ep_raise_irq()
535 struct device *dev = pcie->dev; in cdns_pcie_ep_raise_irq()
541 return -EINVAL; in cdns_pcie_ep_raise_irq()
555 return -EINVAL; in cdns_pcie_ep_raise_irq()
561 struct cdns_pcie *pcie = &ep->pcie; in cdns_pcie_ep_start()
562 struct device *dev = pcie->dev; in cdns_pcie_ep_start()
563 int max_epfs = sizeof(epc->function_num_map) * 8; in cdns_pcie_ep_start()
571 cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, epc->function_num_map); in cdns_pcie_ep_start()
578 last_fn = find_last_bit(&epc->function_num_map, BITS_PER_LONG); in cdns_pcie_ep_start()
584 if (ep->quirk_disable_flr) { in cdns_pcie_ep_start()
586 if (!(epc->function_num_map & BIT(epf))) in cdns_pcie_ep_start()
650 struct device *dev = ep->pcie.dev; in cdns_pcie_ep_setup()
652 struct device_node *np = dev->of_node; in cdns_pcie_ep_setup()
653 struct cdns_pcie *pcie = &ep->pcie; in cdns_pcie_ep_setup()
660 pcie->is_rc = false; in cdns_pcie_ep_setup()
662 pcie->reg_base = devm_platform_ioremap_resource_byname(pdev, "reg"); in cdns_pcie_ep_setup()
663 if (IS_ERR(pcie->reg_base)) { in cdns_pcie_ep_setup()
665 return PTR_ERR(pcie->reg_base); in cdns_pcie_ep_setup()
671 return -EINVAL; in cdns_pcie_ep_setup()
673 pcie->mem_res = res; in cdns_pcie_ep_setup()
675 ep->max_regions = CDNS_PCIE_MAX_OB; in cdns_pcie_ep_setup()
676 of_property_read_u32(np, "cdns,max-outbound-regions", &ep->max_regions); in cdns_pcie_ep_setup()
678 ep->ob_addr = devm_kcalloc(dev, in cdns_pcie_ep_setup()
679 ep->max_regions, sizeof(*ep->ob_addr), in cdns_pcie_ep_setup()
681 if (!ep->ob_addr) in cdns_pcie_ep_setup()
682 return -ENOMEM; in cdns_pcie_ep_setup()
695 if (of_property_read_u8(np, "max-functions", &epc->max_functions) < 0) in cdns_pcie_ep_setup()
696 epc->max_functions = 1; in cdns_pcie_ep_setup()
698 ep->epf = devm_kcalloc(dev, epc->max_functions, sizeof(*ep->epf), in cdns_pcie_ep_setup()
700 if (!ep->epf) in cdns_pcie_ep_setup()
701 return -ENOMEM; in cdns_pcie_ep_setup()
703 epc->max_vfs = devm_kcalloc(dev, epc->max_functions, in cdns_pcie_ep_setup()
704 sizeof(*epc->max_vfs), GFP_KERNEL); in cdns_pcie_ep_setup()
705 if (!epc->max_vfs) in cdns_pcie_ep_setup()
706 return -ENOMEM; in cdns_pcie_ep_setup()
708 ret = of_property_read_u8_array(np, "max-virtual-functions", in cdns_pcie_ep_setup()
709 epc->max_vfs, epc->max_functions); in cdns_pcie_ep_setup()
711 for (i = 0; i < epc->max_functions; i++) { in cdns_pcie_ep_setup()
712 epf = &ep->epf[i]; in cdns_pcie_ep_setup()
713 if (epc->max_vfs[i] == 0) in cdns_pcie_ep_setup()
715 epf->epf = devm_kcalloc(dev, epc->max_vfs[i], in cdns_pcie_ep_setup()
716 sizeof(*ep->epf), GFP_KERNEL); in cdns_pcie_ep_setup()
717 if (!epf->epf) in cdns_pcie_ep_setup()
718 return -ENOMEM; in cdns_pcie_ep_setup()
722 ret = pci_epc_mem_init(epc, pcie->mem_res->start, in cdns_pcie_ep_setup()
723 resource_size(pcie->mem_res), PAGE_SIZE); in cdns_pcie_ep_setup()
729 ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr, in cdns_pcie_ep_setup()
731 if (!ep->irq_cpu_addr) { in cdns_pcie_ep_setup()
733 ret = -ENOMEM; in cdns_pcie_ep_setup()
736 ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE; in cdns_pcie_ep_setup()
738 set_bit(0, &ep->ob_region_map); in cdns_pcie_ep_setup()
740 if (ep->quirk_detect_quiet_flag) in cdns_pcie_ep_setup()
741 cdns_pcie_detect_quiet_min_delay_set(&ep->pcie); in cdns_pcie_ep_setup()
743 spin_lock_init(&ep->lock); in cdns_pcie_ep_setup()