Lines Matching +full:dll +full:- +full:config
6 * Copyright (C) 2009-2012 Cavium, Inc
10 #include <linux/dma-mapping.h>
27 #include <asm/octeon/cvmx-mixx-defs.h>
28 #include <asm/octeon/cvmx-agl-defs.h>
162 spin_lock_irqsave(&p->lock, flags); in octeon_mgmt_set_rx_irq()
163 mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA); in octeon_mgmt_set_rx_irq()
165 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64); in octeon_mgmt_set_rx_irq()
166 spin_unlock_irqrestore(&p->lock, flags); in octeon_mgmt_set_rx_irq()
174 spin_lock_irqsave(&p->lock, flags); in octeon_mgmt_set_tx_irq()
175 mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA); in octeon_mgmt_set_tx_irq()
177 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64); in octeon_mgmt_set_tx_irq()
178 spin_unlock_irqrestore(&p->lock, flags); in octeon_mgmt_set_tx_irq()
203 return ring_size - 8; in ring_max_fill()
215 while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) { in octeon_mgmt_rx_fill_ring()
221 size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN; in octeon_mgmt_rx_fill_ring()
227 __skb_queue_tail(&p->rx_list, skb); in octeon_mgmt_rx_fill_ring()
231 re.s.addr = dma_map_single(p->dev, skb->data, in octeon_mgmt_rx_fill_ring()
236 p->rx_ring[p->rx_next_fill] = re.d64; in octeon_mgmt_rx_fill_ring()
242 dma_sync_single_for_device(p->dev, p->rx_ring_handle, in octeon_mgmt_rx_fill_ring()
245 p->rx_next_fill = in octeon_mgmt_rx_fill_ring()
246 (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE; in octeon_mgmt_rx_fill_ring()
247 p->rx_current_fill++; in octeon_mgmt_rx_fill_ring()
249 cvmx_write_csr(p->mix + MIX_IRING2, 1); in octeon_mgmt_rx_fill_ring()
261 mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT); in octeon_mgmt_clean_tx_buffers()
263 spin_lock_irqsave(&p->tx_list.lock, flags); in octeon_mgmt_clean_tx_buffers()
265 mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT); in octeon_mgmt_clean_tx_buffers()
268 spin_unlock_irqrestore(&p->tx_list.lock, flags); in octeon_mgmt_clean_tx_buffers()
272 dma_sync_single_for_cpu(p->dev, p->tx_ring_handle, in octeon_mgmt_clean_tx_buffers()
276 re.d64 = p->tx_ring[p->tx_next_clean]; in octeon_mgmt_clean_tx_buffers()
277 p->tx_next_clean = in octeon_mgmt_clean_tx_buffers()
278 (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE; in octeon_mgmt_clean_tx_buffers()
279 skb = __skb_dequeue(&p->tx_list); in octeon_mgmt_clean_tx_buffers()
285 cvmx_write_csr(p->mix + MIX_ORCNT, mix_orcnt.u64); in octeon_mgmt_clean_tx_buffers()
286 p->tx_current_fill--; in octeon_mgmt_clean_tx_buffers()
288 spin_unlock_irqrestore(&p->tx_list.lock, flags); in octeon_mgmt_clean_tx_buffers()
290 dma_unmap_single(p->dev, re.s.addr, re.s.len, in octeon_mgmt_clean_tx_buffers()
300 ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port)); in octeon_mgmt_clean_tx_buffers()
302 cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0); in octeon_mgmt_clean_tx_buffers()
311 mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT); in octeon_mgmt_clean_tx_buffers()
314 if (cleaned && netif_queue_stopped(p->netdev)) in octeon_mgmt_clean_tx_buffers()
315 netif_wake_queue(p->netdev); in octeon_mgmt_clean_tx_buffers()
332 drop = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP); in octeon_mgmt_update_rx_stats()
333 bad = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD); in octeon_mgmt_update_rx_stats()
337 spin_lock_irqsave(&p->lock, flags); in octeon_mgmt_update_rx_stats()
338 netdev->stats.rx_errors += bad; in octeon_mgmt_update_rx_stats()
339 netdev->stats.rx_dropped += drop; in octeon_mgmt_update_rx_stats()
340 spin_unlock_irqrestore(&p->lock, flags); in octeon_mgmt_update_rx_stats()
353 s0.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT0); in octeon_mgmt_update_tx_stats()
354 s1.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT1); in octeon_mgmt_update_tx_stats()
358 spin_lock_irqsave(&p->lock, flags); in octeon_mgmt_update_tx_stats()
359 netdev->stats.tx_errors += s0.s.xsdef + s0.s.xscol; in octeon_mgmt_update_tx_stats()
360 netdev->stats.collisions += s1.s.scol + s1.s.mcol; in octeon_mgmt_update_tx_stats()
361 spin_unlock_irqrestore(&p->lock, flags); in octeon_mgmt_update_tx_stats()
374 dma_sync_single_for_cpu(p->dev, p->rx_ring_handle, in octeon_mgmt_dequeue_rx_buffer()
378 re.d64 = p->rx_ring[p->rx_next]; in octeon_mgmt_dequeue_rx_buffer()
379 p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE; in octeon_mgmt_dequeue_rx_buffer()
380 p->rx_current_fill--; in octeon_mgmt_dequeue_rx_buffer()
381 *pskb = __skb_dequeue(&p->rx_list); in octeon_mgmt_dequeue_rx_buffer()
383 dma_unmap_single(p->dev, re.s.addr, in octeon_mgmt_dequeue_rx_buffer()
393 struct net_device *netdev = p->netdev; in octeon_mgmt_receive_one()
409 if (p->has_rx_tstamp) { in octeon_mgmt_receive_one()
411 u64 ns = *(u64 *)skb->data; in octeon_mgmt_receive_one()
414 ts->hwtstamp = ns_to_ktime(ns); in octeon_mgmt_receive_one()
417 skb->protocol = eth_type_trans(skb, netdev); in octeon_mgmt_receive_one()
418 netdev->stats.rx_packets++; in octeon_mgmt_receive_one()
419 netdev->stats.rx_bytes += skb->len; in octeon_mgmt_receive_one()
428 * non-split case. in octeon_mgmt_receive_one()
437 skb_new = skb_copy_expand(skb, 0, skb2->len, in octeon_mgmt_receive_one()
442 skb2->len)) in octeon_mgmt_receive_one()
444 skb_put(skb_new, skb2->len); in octeon_mgmt_receive_one()
466 netdev->stats.rx_errors++; in octeon_mgmt_receive_one()
472 cvmx_write_csr(p->mix + MIX_IRCNT, mix_ircnt.u64); in octeon_mgmt_receive_one()
482 mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT); in octeon_mgmt_receive_packets()
490 mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT); in octeon_mgmt_receive_packets()
493 octeon_mgmt_rx_fill_ring(p->netdev); in octeon_mgmt_receive_packets()
501 struct net_device *netdev = p->netdev; in octeon_mgmt_napi_poll()
524 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); in octeon_mgmt_reset_hw()
526 mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL); in octeon_mgmt_reset_hw()
529 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); in octeon_mgmt_reset_hw()
530 cvmx_read_csr(p->mix + MIX_CTL); in octeon_mgmt_reset_hw()
533 mix_bist.u64 = cvmx_read_csr(p->mix + MIX_BIST); in octeon_mgmt_reset_hw()
535 dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n", in octeon_mgmt_reset_hw()
540 dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n", in octeon_mgmt_reset_hw()
556 cs->cam[i] |= (u64)addr[i] << (8 * (cs->cam_index)); in octeon_mgmt_cam_state_add()
557 cs->cam_mask |= (1ULL << cs->cam_index); in octeon_mgmt_cam_state_add()
558 cs->cam_index++; in octeon_mgmt_cam_state_add()
568 unsigned int cam_mode = 1; /* 1 - Accept on CAM match */ in octeon_mgmt_set_rx_filtering()
569 unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */ in octeon_mgmt_set_rx_filtering()
576 if ((netdev->flags & IFF_PROMISC) || netdev->uc.count > 7) { in octeon_mgmt_set_rx_filtering()
583 available_cam_entries = 7 - netdev->uc.count; in octeon_mgmt_set_rx_filtering()
586 if (netdev->flags & IFF_MULTICAST) { in octeon_mgmt_set_rx_filtering()
587 if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) || in octeon_mgmt_set_rx_filtering()
589 multicast_mode = 2; /* 2 - Accept all multicast. */ in octeon_mgmt_set_rx_filtering()
591 multicast_mode = 0; /* 0 - Use CAM. */ in octeon_mgmt_set_rx_filtering()
596 octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr); in octeon_mgmt_set_rx_filtering()
598 octeon_mgmt_cam_state_add(&cam_state, ha->addr); in octeon_mgmt_set_rx_filtering()
602 octeon_mgmt_cam_state_add(&cam_state, ha->addr); in octeon_mgmt_set_rx_filtering()
605 spin_lock_irqsave(&p->lock, flags); in octeon_mgmt_set_rx_filtering()
608 agl_gmx_prtx.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); in octeon_mgmt_set_rx_filtering()
611 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64); in octeon_mgmt_set_rx_filtering()
618 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CTL, adr_ctl.u64); in octeon_mgmt_set_rx_filtering()
620 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM0, cam_state.cam[0]); in octeon_mgmt_set_rx_filtering()
621 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM1, cam_state.cam[1]); in octeon_mgmt_set_rx_filtering()
622 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM2, cam_state.cam[2]); in octeon_mgmt_set_rx_filtering()
623 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM3, cam_state.cam[3]); in octeon_mgmt_set_rx_filtering()
624 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM4, cam_state.cam[4]); in octeon_mgmt_set_rx_filtering()
625 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM5, cam_state.cam[5]); in octeon_mgmt_set_rx_filtering()
626 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM_EN, cam_state.cam_mask); in octeon_mgmt_set_rx_filtering()
630 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64); in octeon_mgmt_set_rx_filtering()
632 spin_unlock_irqrestore(&p->lock, flags); in octeon_mgmt_set_rx_filtering()
652 WRITE_ONCE(netdev->mtu, new_mtu); in octeon_mgmt_change_mtu()
657 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, max_packet); in octeon_mgmt_change_mtu()
663 cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER, in octeon_mgmt_change_mtu()
675 mixx_isr.u64 = cvmx_read_csr(p->mix + MIX_ISR); in octeon_mgmt_interrupt()
678 cvmx_write_csr(p->mix + MIX_ISR, mixx_isr.u64); in octeon_mgmt_interrupt()
679 cvmx_read_csr(p->mix + MIX_ISR); in octeon_mgmt_interrupt()
683 napi_schedule(&p->napi); in octeon_mgmt_interrupt()
687 tasklet_schedule(&p->tx_clean_tasklet); in octeon_mgmt_interrupt()
697 struct hwtstamp_config config; in octeon_mgmt_ioctl_hwtstamp() local
702 if (copy_from_user(&config, rq->ifr_data, sizeof(config))) in octeon_mgmt_ioctl_hwtstamp()
703 return -EFAULT; in octeon_mgmt_ioctl_hwtstamp()
737 return -EINVAL; in octeon_mgmt_ioctl_hwtstamp()
739 switch (config.tx_type) { in octeon_mgmt_ioctl_hwtstamp()
744 return -ERANGE; in octeon_mgmt_ioctl_hwtstamp()
747 switch (config.rx_filter) { in octeon_mgmt_ioctl_hwtstamp()
749 p->has_rx_tstamp = false; in octeon_mgmt_ioctl_hwtstamp()
750 rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL); in octeon_mgmt_ioctl_hwtstamp()
752 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); in octeon_mgmt_ioctl_hwtstamp()
769 p->has_rx_tstamp = have_hw_timestamps; in octeon_mgmt_ioctl_hwtstamp()
770 config.rx_filter = HWTSTAMP_FILTER_ALL; in octeon_mgmt_ioctl_hwtstamp()
771 if (p->has_rx_tstamp) { in octeon_mgmt_ioctl_hwtstamp()
772 rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL); in octeon_mgmt_ioctl_hwtstamp()
774 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); in octeon_mgmt_ioctl_hwtstamp()
778 return -ERANGE; in octeon_mgmt_ioctl_hwtstamp()
781 if (copy_to_user(rq->ifr_data, &config, sizeof(config))) in octeon_mgmt_ioctl_hwtstamp()
782 return -EFAULT; in octeon_mgmt_ioctl_hwtstamp()
803 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); in octeon_mgmt_disable_link()
807 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); in octeon_mgmt_disable_link()
812 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); in octeon_mgmt_disable_link()
826 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); in octeon_mgmt_enable_link()
830 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); in octeon_mgmt_enable_link()
835 struct net_device *ndev = p->netdev; in octeon_mgmt_update_link()
836 struct phy_device *phydev = ndev->phydev; in octeon_mgmt_update_link()
839 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); in octeon_mgmt_update_link()
841 if (!phydev->link) in octeon_mgmt_update_link()
844 prtx_cfg.s.duplex = phydev->duplex; in octeon_mgmt_update_link()
846 switch (phydev->speed) { in octeon_mgmt_update_link()
870 /* Only matters for half-duplex */ in octeon_mgmt_update_link()
872 prtx_cfg.s.burst = phydev->duplex; in octeon_mgmt_update_link()
881 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); in octeon_mgmt_update_link()
883 /* Read GMX CFG again to make sure the config is completed. */ in octeon_mgmt_update_link()
884 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); in octeon_mgmt_update_link()
890 prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); in octeon_mgmt_update_link()
891 agl_clk.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_CLK); in octeon_mgmt_update_link()
895 if (phydev->speed == 10) in octeon_mgmt_update_link()
897 else if (phydev->speed == 100) in octeon_mgmt_update_link()
900 cvmx_write_csr(p->agl + AGL_GMX_TX_CLK, agl_clk.u64); in octeon_mgmt_update_link()
907 struct phy_device *phydev = netdev->phydev; in octeon_mgmt_adjust_link()
914 spin_lock_irqsave(&p->lock, flags); in octeon_mgmt_adjust_link()
917 if (!phydev->link && p->last_link) in octeon_mgmt_adjust_link()
918 link_changed = -1; in octeon_mgmt_adjust_link()
920 if (phydev->link && in octeon_mgmt_adjust_link()
921 (p->last_duplex != phydev->duplex || in octeon_mgmt_adjust_link()
922 p->last_link != phydev->link || in octeon_mgmt_adjust_link()
923 p->last_speed != phydev->speed)) { in octeon_mgmt_adjust_link()
930 p->last_link = phydev->link; in octeon_mgmt_adjust_link()
931 p->last_speed = phydev->speed; in octeon_mgmt_adjust_link()
932 p->last_duplex = phydev->duplex; in octeon_mgmt_adjust_link()
934 spin_unlock_irqrestore(&p->lock, flags); in octeon_mgmt_adjust_link()
938 netdev_info(netdev, "Link is up - %d/%s\n", in octeon_mgmt_adjust_link()
939 phydev->speed, phydev->duplex == DUPLEX_FULL ? "Full" : "Half"); in octeon_mgmt_adjust_link()
950 if (octeon_is_simulation() || p->phy_np == NULL) { in octeon_mgmt_init_phy()
956 phydev = of_phy_connect(netdev, p->phy_np, in octeon_mgmt_init_phy()
961 return -EPROBE_DEFER; in octeon_mgmt_init_phy()
980 p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), in octeon_mgmt_open()
982 if (!p->tx_ring) in octeon_mgmt_open()
983 return -ENOMEM; in octeon_mgmt_open()
984 p->tx_ring_handle = in octeon_mgmt_open()
985 dma_map_single(p->dev, p->tx_ring, in octeon_mgmt_open()
988 p->tx_next = 0; in octeon_mgmt_open()
989 p->tx_next_clean = 0; in octeon_mgmt_open()
990 p->tx_current_fill = 0; in octeon_mgmt_open()
993 p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), in octeon_mgmt_open()
995 if (!p->rx_ring) in octeon_mgmt_open()
997 p->rx_ring_handle = in octeon_mgmt_open()
998 dma_map_single(p->dev, p->rx_ring, in octeon_mgmt_open()
1002 p->rx_next = 0; in octeon_mgmt_open()
1003 p->rx_next_fill = 0; in octeon_mgmt_open()
1004 p->rx_current_fill = 0; in octeon_mgmt_open()
1008 mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL); in octeon_mgmt_open()
1013 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); in octeon_mgmt_open()
1015 mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL); in octeon_mgmt_open()
1032 if (p->port) { in octeon_mgmt_open()
1045 oring1.s.obase = p->tx_ring_handle >> 3; in octeon_mgmt_open()
1047 cvmx_write_csr(p->mix + MIX_ORING1, oring1.u64); in octeon_mgmt_open()
1050 iring1.s.ibase = p->rx_ring_handle >> 3; in octeon_mgmt_open()
1052 cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64); in octeon_mgmt_open()
1054 memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN); in octeon_mgmt_open()
1057 octeon_mgmt_change_mtu(netdev, netdev->mtu); in octeon_mgmt_open()
1066 /* MII CB-request FIFO programmable high watermark */ in octeon_mgmt_open()
1071 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); in octeon_mgmt_open()
1075 dev_err(p->dev, "Cannot initialize PHY on MIX%d.\n", p->port); in octeon_mgmt_open()
1080 if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && netdev->phydev) { in octeon_mgmt_open()
1084 netdev->phydev->supported) | in octeon_mgmt_open()
1086 netdev->phydev->supported)) != 0; in octeon_mgmt_open()
1088 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); in octeon_mgmt_open()
1090 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); in octeon_mgmt_open()
1098 /* Take the DLL and clock tree out of reset */ in octeon_mgmt_open()
1099 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); in octeon_mgmt_open()
1105 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); in octeon_mgmt_open()
1106 cvmx_read_csr(p->agl_prt_ctl); /* Force write out before wait */ in octeon_mgmt_open()
1108 /* Wait for the DLL to lock. External 125 MHz in octeon_mgmt_open()
1114 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); in octeon_mgmt_open()
1116 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); in octeon_mgmt_open()
1119 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); in octeon_mgmt_open()
1124 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); in octeon_mgmt_open()
1126 cvmx_read_csr(p->agl_prt_ctl); in octeon_mgmt_open()
1144 cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_CTL, 1); in octeon_mgmt_open()
1145 cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP, 0); in octeon_mgmt_open()
1146 cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD, 0); in octeon_mgmt_open()
1148 cvmx_write_csr(p->agl + AGL_GMX_TX_STATS_CTL, 1); in octeon_mgmt_open()
1149 cvmx_write_csr(p->agl + AGL_GMX_TX_STAT0, 0); in octeon_mgmt_open()
1150 cvmx_write_csr(p->agl + AGL_GMX_TX_STAT1, 0); in octeon_mgmt_open()
1153 cvmx_write_csr(p->mix + MIX_ISR, cvmx_read_csr(p->mix + MIX_ISR)); in octeon_mgmt_open()
1155 if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name, in octeon_mgmt_open()
1157 dev_err(p->dev, "request_irq(%d) failed.\n", p->irq); in octeon_mgmt_open()
1164 cvmx_write_csr(p->mix + MIX_IRHWM, mix_irhwm.u64); in octeon_mgmt_open()
1169 cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64); in octeon_mgmt_open()
1175 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64); in octeon_mgmt_open()
1180 rxx_frm_ctl.s.ptp_mode = p->has_rx_tstamp ? 1 : 0; in octeon_mgmt_open()
1182 /* When set, disables the length check for non-min sized pkts in octeon_mgmt_open()
1204 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); in octeon_mgmt_open()
1208 if (netdev->phydev) in octeon_mgmt_open()
1212 p->last_link = 0; in octeon_mgmt_open()
1213 p->last_speed = 0; in octeon_mgmt_open()
1217 if (netdev->phydev) { in octeon_mgmt_open()
1219 phy_start(netdev->phydev); in octeon_mgmt_open()
1223 napi_enable(&p->napi); in octeon_mgmt_open()
1228 dma_unmap_single(p->dev, p->rx_ring_handle, in octeon_mgmt_open()
1231 kfree(p->rx_ring); in octeon_mgmt_open()
1233 dma_unmap_single(p->dev, p->tx_ring_handle, in octeon_mgmt_open()
1236 kfree(p->tx_ring); in octeon_mgmt_open()
1237 return -ENOMEM; in octeon_mgmt_open()
1244 napi_disable(&p->napi); in octeon_mgmt_stop()
1247 if (netdev->phydev) { in octeon_mgmt_stop()
1248 phy_stop(netdev->phydev); in octeon_mgmt_stop()
1249 phy_disconnect(netdev->phydev); in octeon_mgmt_stop()
1256 free_irq(p->irq, netdev); in octeon_mgmt_stop()
1259 skb_queue_purge(&p->tx_list); in octeon_mgmt_stop()
1260 skb_queue_purge(&p->rx_list); in octeon_mgmt_stop()
1262 dma_unmap_single(p->dev, p->rx_ring_handle, in octeon_mgmt_stop()
1265 kfree(p->rx_ring); in octeon_mgmt_stop()
1267 dma_unmap_single(p->dev, p->tx_ring_handle, in octeon_mgmt_stop()
1270 kfree(p->tx_ring); in octeon_mgmt_stop()
1284 re.s.tstamp = ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) != 0); in octeon_mgmt_xmit()
1285 re.s.len = skb->len; in octeon_mgmt_xmit()
1286 re.s.addr = dma_map_single(p->dev, skb->data, in octeon_mgmt_xmit()
1287 skb->len, in octeon_mgmt_xmit()
1290 spin_lock_irqsave(&p->tx_list.lock, flags); in octeon_mgmt_xmit()
1292 if (unlikely(p->tx_current_fill >= ring_max_fill(OCTEON_MGMT_TX_RING_SIZE) - 1)) { in octeon_mgmt_xmit()
1293 spin_unlock_irqrestore(&p->tx_list.lock, flags); in octeon_mgmt_xmit()
1295 spin_lock_irqsave(&p->tx_list.lock, flags); in octeon_mgmt_xmit()
1298 if (unlikely(p->tx_current_fill >= in octeon_mgmt_xmit()
1300 spin_unlock_irqrestore(&p->tx_list.lock, flags); in octeon_mgmt_xmit()
1301 dma_unmap_single(p->dev, re.s.addr, re.s.len, in octeon_mgmt_xmit()
1306 __skb_queue_tail(&p->tx_list, skb); in octeon_mgmt_xmit()
1309 p->tx_ring[p->tx_next] = re.d64; in octeon_mgmt_xmit()
1310 p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE; in octeon_mgmt_xmit()
1311 p->tx_current_fill++; in octeon_mgmt_xmit()
1313 spin_unlock_irqrestore(&p->tx_list.lock, flags); in octeon_mgmt_xmit()
1315 dma_sync_single_for_device(p->dev, p->tx_ring_handle, in octeon_mgmt_xmit()
1319 netdev->stats.tx_packets++; in octeon_mgmt_xmit()
1320 netdev->stats.tx_bytes += skb->len; in octeon_mgmt_xmit()
1323 cvmx_write_csr(p->mix + MIX_ORING2, 1); in octeon_mgmt_xmit()
1345 strscpy(info->driver, DRV_NAME, sizeof(info->driver)); in octeon_mgmt_get_drvinfo()
1351 return -EPERM; in octeon_mgmt_nway_reset()
1353 if (dev->phydev) in octeon_mgmt_nway_reset()
1354 return phy_start_aneg(dev->phydev); in octeon_mgmt_nway_reset()
1356 return -EOPNOTSUPP; in octeon_mgmt_nway_reset()
1393 return -ENOMEM; in octeon_mgmt_probe()
1395 SET_NETDEV_DEV(netdev, &pdev->dev); in octeon_mgmt_probe()
1399 netif_napi_add_weight(netdev, &p->napi, octeon_mgmt_napi_poll, in octeon_mgmt_probe()
1402 p->netdev = netdev; in octeon_mgmt_probe()
1403 p->dev = &pdev->dev; in octeon_mgmt_probe()
1404 p->has_rx_tstamp = false; in octeon_mgmt_probe()
1406 data = of_get_property(pdev->dev.of_node, "cell-index", &len); in octeon_mgmt_probe()
1408 p->port = be32_to_cpup(data); in octeon_mgmt_probe()
1410 dev_err(&pdev->dev, "no 'cell-index' property\n"); in octeon_mgmt_probe()
1411 result = -ENXIO; in octeon_mgmt_probe()
1415 snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port); in octeon_mgmt_probe()
1421 p->irq = result; in octeon_mgmt_probe()
1425 dev_err(&pdev->dev, "no 'reg' resource\n"); in octeon_mgmt_probe()
1426 result = -ENXIO; in octeon_mgmt_probe()
1432 dev_err(&pdev->dev, "no 'reg' resource\n"); in octeon_mgmt_probe()
1433 result = -ENXIO; in octeon_mgmt_probe()
1439 dev_err(&pdev->dev, "no 'reg' resource\n"); in octeon_mgmt_probe()
1440 result = -ENXIO; in octeon_mgmt_probe()
1444 p->mix_phys = res_mix->start; in octeon_mgmt_probe()
1445 p->mix_size = resource_size(res_mix); in octeon_mgmt_probe()
1446 p->agl_phys = res_agl->start; in octeon_mgmt_probe()
1447 p->agl_size = resource_size(res_agl); in octeon_mgmt_probe()
1448 p->agl_prt_ctl_phys = res_agl_prt_ctl->start; in octeon_mgmt_probe()
1449 p->agl_prt_ctl_size = resource_size(res_agl_prt_ctl); in octeon_mgmt_probe()
1452 if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size, in octeon_mgmt_probe()
1453 res_mix->name)) { in octeon_mgmt_probe()
1454 dev_err(&pdev->dev, "request_mem_region (%s) failed\n", in octeon_mgmt_probe()
1455 res_mix->name); in octeon_mgmt_probe()
1456 result = -ENXIO; in octeon_mgmt_probe()
1460 if (!devm_request_mem_region(&pdev->dev, p->agl_phys, p->agl_size, in octeon_mgmt_probe()
1461 res_agl->name)) { in octeon_mgmt_probe()
1462 result = -ENXIO; in octeon_mgmt_probe()
1463 dev_err(&pdev->dev, "request_mem_region (%s) failed\n", in octeon_mgmt_probe()
1464 res_agl->name); in octeon_mgmt_probe()
1468 if (!devm_request_mem_region(&pdev->dev, p->agl_prt_ctl_phys, in octeon_mgmt_probe()
1469 p->agl_prt_ctl_size, res_agl_prt_ctl->name)) { in octeon_mgmt_probe()
1470 result = -ENXIO; in octeon_mgmt_probe()
1471 dev_err(&pdev->dev, "request_mem_region (%s) failed\n", in octeon_mgmt_probe()
1472 res_agl_prt_ctl->name); in octeon_mgmt_probe()
1476 p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size); in octeon_mgmt_probe()
1477 p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size); in octeon_mgmt_probe()
1478 p->agl_prt_ctl = (u64)devm_ioremap(&pdev->dev, p->agl_prt_ctl_phys, in octeon_mgmt_probe()
1479 p->agl_prt_ctl_size); in octeon_mgmt_probe()
1480 if (!p->mix || !p->agl || !p->agl_prt_ctl) { in octeon_mgmt_probe()
1481 dev_err(&pdev->dev, "failed to map I/O memory\n"); in octeon_mgmt_probe()
1482 result = -ENOMEM; in octeon_mgmt_probe()
1486 spin_lock_init(&p->lock); in octeon_mgmt_probe()
1488 skb_queue_head_init(&p->tx_list); in octeon_mgmt_probe()
1489 skb_queue_head_init(&p->rx_list); in octeon_mgmt_probe()
1490 tasklet_setup(&p->tx_clean_tasklet, in octeon_mgmt_probe()
1493 netdev->priv_flags |= IFF_UNICAST_FLT; in octeon_mgmt_probe()
1495 netdev->netdev_ops = &octeon_mgmt_ops; in octeon_mgmt_probe()
1496 netdev->ethtool_ops = &octeon_mgmt_ethtool_ops; in octeon_mgmt_probe()
1498 netdev->min_mtu = 64 - OCTEON_MGMT_RX_HEADROOM; in octeon_mgmt_probe()
1499 netdev->max_mtu = 16383 - OCTEON_MGMT_RX_HEADROOM - VLAN_HLEN; in octeon_mgmt_probe()
1501 result = of_get_ethdev_address(pdev->dev.of_node, netdev); in octeon_mgmt_probe()
1505 p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); in octeon_mgmt_probe()
1507 result = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in octeon_mgmt_probe()
1519 of_node_put(p->phy_np); in octeon_mgmt_probe()
1530 of_node_put(p->phy_np); in octeon_mgmt_remove()
1536 .compatible = "cavium,octeon-5750-mix",
1553 MODULE_SOFTDEP("pre: mdio-cavium");