Lines Matching full:lp
448 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_netif_stop() local
451 napi_disable(&lp->napi); in pcnet32_netif_stop()
457 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_netif_start() local
462 val = lp->a->read_csr(ioaddr, CSR3); in pcnet32_netif_start()
464 lp->a->write_csr(ioaddr, CSR3, val); in pcnet32_netif_start()
465 napi_enable_locked(&lp->napi); in pcnet32_netif_start()
473 * Must be called with lp->lock held.
476 struct pcnet32_private *lp, in pcnet32_realloc_tx_ring() argument
488 dma_alloc_coherent(&lp->pci_dev->dev, in pcnet32_realloc_tx_ring()
502 kfree(lp->tx_skbuff); in pcnet32_realloc_tx_ring()
503 kfree(lp->tx_dma_addr); in pcnet32_realloc_tx_ring()
504 dma_free_coherent(&lp->pci_dev->dev, in pcnet32_realloc_tx_ring()
505 sizeof(struct pcnet32_tx_head) * lp->tx_ring_size, in pcnet32_realloc_tx_ring()
506 lp->tx_ring, lp->tx_ring_dma_addr); in pcnet32_realloc_tx_ring()
508 lp->tx_ring_size = entries; in pcnet32_realloc_tx_ring()
509 lp->tx_mod_mask = lp->tx_ring_size - 1; in pcnet32_realloc_tx_ring()
510 lp->tx_len_bits = (size << 12); in pcnet32_realloc_tx_ring()
511 lp->tx_ring = new_tx_ring; in pcnet32_realloc_tx_ring()
512 lp->tx_ring_dma_addr = new_ring_dma_addr; in pcnet32_realloc_tx_ring()
513 lp->tx_dma_addr = new_dma_addr_list; in pcnet32_realloc_tx_ring()
514 lp->tx_skbuff = new_skb_list; in pcnet32_realloc_tx_ring()
520 dma_free_coherent(&lp->pci_dev->dev, in pcnet32_realloc_tx_ring()
533 * Must be called with lp->lock held.
536 struct pcnet32_private *lp, in pcnet32_realloc_rx_ring() argument
547 dma_alloc_coherent(&lp->pci_dev->dev, in pcnet32_realloc_rx_ring()
562 overlap = min(entries, lp->rx_ring_size); in pcnet32_realloc_rx_ring()
564 new_rx_ring[new] = lp->rx_ring[new]; in pcnet32_realloc_rx_ring()
565 new_dma_addr_list[new] = lp->rx_dma_addr[new]; in pcnet32_realloc_rx_ring()
566 new_skb_list[new] = lp->rx_skbuff[new]; in pcnet32_realloc_rx_ring()
575 netif_err(lp, drv, dev, "%s netdev_alloc_skb failed\n", in pcnet32_realloc_rx_ring()
582 dma_map_single(&lp->pci_dev->dev, rx_skbuff->data, in pcnet32_realloc_rx_ring()
584 if (dma_mapping_error(&lp->pci_dev->dev, new_dma_addr_list[new])) { in pcnet32_realloc_rx_ring()
585 netif_err(lp, drv, dev, "%s dma mapping failed\n", in pcnet32_realloc_rx_ring()
595 for (; new < lp->rx_ring_size; new++) { in pcnet32_realloc_rx_ring()
596 if (lp->rx_skbuff[new]) { in pcnet32_realloc_rx_ring()
597 if (!dma_mapping_error(&lp->pci_dev->dev, lp->rx_dma_addr[new])) in pcnet32_realloc_rx_ring()
598 dma_unmap_single(&lp->pci_dev->dev, in pcnet32_realloc_rx_ring()
599 lp->rx_dma_addr[new], in pcnet32_realloc_rx_ring()
602 dev_kfree_skb(lp->rx_skbuff[new]); in pcnet32_realloc_rx_ring()
606 kfree(lp->rx_skbuff); in pcnet32_realloc_rx_ring()
607 kfree(lp->rx_dma_addr); in pcnet32_realloc_rx_ring()
608 dma_free_coherent(&lp->pci_dev->dev, in pcnet32_realloc_rx_ring()
609 sizeof(struct pcnet32_rx_head) * lp->rx_ring_size, in pcnet32_realloc_rx_ring()
610 lp->rx_ring, lp->rx_ring_dma_addr); in pcnet32_realloc_rx_ring()
612 lp->rx_ring_size = entries; in pcnet32_realloc_rx_ring()
613 lp->rx_mod_mask = lp->rx_ring_size - 1; in pcnet32_realloc_rx_ring()
614 lp->rx_len_bits = (size << 4); in pcnet32_realloc_rx_ring()
615 lp->rx_ring = new_rx_ring; in pcnet32_realloc_rx_ring()
616 lp->rx_ring_dma_addr = new_ring_dma_addr; in pcnet32_realloc_rx_ring()
617 lp->rx_dma_addr = new_dma_addr_list; in pcnet32_realloc_rx_ring()
618 lp->rx_skbuff = new_skb_list; in pcnet32_realloc_rx_ring()
622 while (--new >= lp->rx_ring_size) { in pcnet32_realloc_rx_ring()
624 if (!dma_mapping_error(&lp->pci_dev->dev, new_dma_addr_list[new])) in pcnet32_realloc_rx_ring()
625 dma_unmap_single(&lp->pci_dev->dev, in pcnet32_realloc_rx_ring()
636 dma_free_coherent(&lp->pci_dev->dev, in pcnet32_realloc_rx_ring()
643 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_purge_rx_ring() local
647 for (i = 0; i < lp->rx_ring_size; i++) { in pcnet32_purge_rx_ring()
648 lp->rx_ring[i].status = 0; /* CPU owns buffer */ in pcnet32_purge_rx_ring()
650 if (lp->rx_skbuff[i]) { in pcnet32_purge_rx_ring()
651 if (!dma_mapping_error(&lp->pci_dev->dev, lp->rx_dma_addr[i])) in pcnet32_purge_rx_ring()
652 dma_unmap_single(&lp->pci_dev->dev, in pcnet32_purge_rx_ring()
653 lp->rx_dma_addr[i], in pcnet32_purge_rx_ring()
656 dev_kfree_skb_any(lp->rx_skbuff[i]); in pcnet32_purge_rx_ring()
658 lp->rx_skbuff[i] = NULL; in pcnet32_purge_rx_ring()
659 lp->rx_dma_addr[i] = 0; in pcnet32_purge_rx_ring()
673 * lp->lock must be held.
679 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_suspend() local
680 const struct pcnet32_access *a = lp->a; in pcnet32_suspend()
685 if (lp->chip_version < PCNET32_79C970A) in pcnet32_suspend()
695 spin_unlock_irqrestore(&lp->lock, *flags); in pcnet32_suspend()
700 spin_lock_irqsave(&lp->lock, *flags); in pcnet32_suspend()
703 netif_printk(lp, hw, KERN_DEBUG, dev, in pcnet32_suspend()
711 static void pcnet32_clr_suspend(struct pcnet32_private *lp, ulong ioaddr) in pcnet32_clr_suspend() argument
713 int csr5 = lp->a->read_csr(ioaddr, CSR5); in pcnet32_clr_suspend()
715 lp->a->write_csr(ioaddr, CSR5, csr5 & ~CSR5_SUSPEND); in pcnet32_clr_suspend()
721 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_get_link_ksettings() local
724 spin_lock_irqsave(&lp->lock, flags); in pcnet32_get_link_ksettings()
725 if (lp->mii) { in pcnet32_get_link_ksettings()
726 mii_ethtool_get_link_ksettings(&lp->mii_if, cmd); in pcnet32_get_link_ksettings()
727 } else if (lp->chip_version == PCNET32_79C970A) { in pcnet32_get_link_ksettings()
728 if (lp->autoneg) { in pcnet32_get_link_ksettings()
730 if (lp->a->read_bcr(dev->base_addr, 4) == 0xc0) in pcnet32_get_link_ksettings()
736 cmd->base.port = lp->port_tp ? PORT_TP : PORT_AUI; in pcnet32_get_link_ksettings()
738 cmd->base.duplex = lp->fdx ? DUPLEX_FULL : DUPLEX_HALF; in pcnet32_get_link_ksettings()
744 spin_unlock_irqrestore(&lp->lock, flags); in pcnet32_get_link_ksettings()
751 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_set_link_ksettings() local
757 spin_lock_irqsave(&lp->lock, flags); in pcnet32_set_link_ksettings()
758 if (lp->mii) { in pcnet32_set_link_ksettings()
759 r = mii_ethtool_set_link_ksettings(&lp->mii_if, cmd); in pcnet32_set_link_ksettings()
760 } else if (lp->chip_version == PCNET32_79C970A) { in pcnet32_set_link_ksettings()
763 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); in pcnet32_set_link_ksettings()
765 lp->autoneg = cmd->base.autoneg == AUTONEG_ENABLE; in pcnet32_set_link_ksettings()
766 bcr2 = lp->a->read_bcr(ioaddr, 2); in pcnet32_set_link_ksettings()
768 lp->a->write_bcr(ioaddr, 2, bcr2 | 0x0002); in pcnet32_set_link_ksettings()
770 lp->a->write_bcr(ioaddr, 2, bcr2 & ~0x0002); in pcnet32_set_link_ksettings()
772 lp->port_tp = cmd->base.port == PORT_TP; in pcnet32_set_link_ksettings()
773 csr15 = lp->a->read_csr(ioaddr, CSR15) & ~0x0180; in pcnet32_set_link_ksettings()
776 lp->a->write_csr(ioaddr, CSR15, csr15); in pcnet32_set_link_ksettings()
777 lp->init_block->mode = cpu_to_le16(csr15); in pcnet32_set_link_ksettings()
779 lp->fdx = cmd->base.duplex == DUPLEX_FULL; in pcnet32_set_link_ksettings()
780 bcr9 = lp->a->read_bcr(ioaddr, 9) & ~0x0003; in pcnet32_set_link_ksettings()
783 lp->a->write_bcr(ioaddr, 9, bcr9); in pcnet32_set_link_ksettings()
786 pcnet32_clr_suspend(lp, ioaddr); in pcnet32_set_link_ksettings()
791 spin_unlock_irqrestore(&lp->lock, flags); in pcnet32_set_link_ksettings()
798 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_get_drvinfo() local
801 if (lp->pci_dev) in pcnet32_get_drvinfo()
802 strscpy(info->bus_info, pci_name(lp->pci_dev), in pcnet32_get_drvinfo()
811 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_get_link() local
815 spin_lock_irqsave(&lp->lock, flags); in pcnet32_get_link()
816 if (lp->mii) { in pcnet32_get_link()
817 r = mii_link_ok(&lp->mii_if); in pcnet32_get_link()
818 } else if (lp->chip_version == PCNET32_79C970A) { in pcnet32_get_link()
821 if (!lp->autoneg && lp->port_tp) in pcnet32_get_link()
822 r = (lp->a->read_bcr(ioaddr, 4) != 0xc0); in pcnet32_get_link()
825 } else if (lp->chip_version > PCNET32_79C970A) { in pcnet32_get_link()
827 r = (lp->a->read_bcr(ioaddr, 4) != 0xc0); in pcnet32_get_link()
831 spin_unlock_irqrestore(&lp->lock, flags); in pcnet32_get_link()
838 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_get_msglevel() local
839 return lp->msg_enable; in pcnet32_get_msglevel()
844 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_set_msglevel() local
845 lp->msg_enable = value; in pcnet32_set_msglevel()
850 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_nway_reset() local
854 if (lp->mii) { in pcnet32_nway_reset()
855 spin_lock_irqsave(&lp->lock, flags); in pcnet32_nway_reset()
856 r = mii_nway_restart(&lp->mii_if); in pcnet32_nway_reset()
857 spin_unlock_irqrestore(&lp->lock, flags); in pcnet32_nway_reset()
867 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_get_ringparam() local
870 ering->tx_pending = lp->tx_ring_size; in pcnet32_get_ringparam()
872 ering->rx_pending = lp->rx_ring_size; in pcnet32_get_ringparam()
880 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_set_ringparam() local
893 spin_lock_irqsave(&lp->lock, flags); in pcnet32_set_ringparam()
894 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */ in pcnet32_set_ringparam()
905 if ((1 << i) != lp->tx_ring_size) in pcnet32_set_ringparam()
906 pcnet32_realloc_tx_ring(dev, lp, i); in pcnet32_set_ringparam()
913 if ((1 << i) != lp->rx_ring_size) in pcnet32_set_ringparam()
914 pcnet32_realloc_rx_ring(dev, lp, i); in pcnet32_set_ringparam()
916 lp->napi.weight = lp->rx_ring_size / 2; in pcnet32_set_ringparam()
923 spin_unlock_irqrestore(&lp->lock, flags); in pcnet32_set_ringparam()
926 netif_info(lp, drv, dev, "Ring Param Settings: RX: %d, TX: %d\n", in pcnet32_set_ringparam()
927 lp->rx_ring_size, lp->tx_ring_size); in pcnet32_set_ringparam()
951 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_ethtool_test() local
957 netif_printk(lp, hw, KERN_DEBUG, dev, in pcnet32_ethtool_test()
961 netif_printk(lp, hw, KERN_DEBUG, dev, in pcnet32_ethtool_test()
964 netif_printk(lp, hw, KERN_DEBUG, dev, in pcnet32_ethtool_test()
970 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_loopback_test() local
971 const struct pcnet32_access *a = lp->a; /* access to registers */ in pcnet32_loopback_test()
991 spin_lock_irqsave(&lp->lock, flags); in pcnet32_loopback_test()
992 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */ in pcnet32_loopback_test()
994 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size)); in pcnet32_loopback_test()
997 lp->a->reset(ioaddr); in pcnet32_loopback_test()
998 lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ in pcnet32_loopback_test()
1001 lp->a->write_bcr(ioaddr, 20, 2); in pcnet32_loopback_test()
1006 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */ in pcnet32_loopback_test()
1013 netif_printk(lp, hw, KERN_DEBUG, dev, in pcnet32_loopback_test()
1020 lp->tx_skbuff[x] = skb; in pcnet32_loopback_test()
1021 lp->tx_ring[x].length = cpu_to_le16(-skb->len); in pcnet32_loopback_test()
1022 lp->tx_ring[x].misc = 0; in pcnet32_loopback_test()
1038 lp->tx_dma_addr[x] = in pcnet32_loopback_test()
1039 dma_map_single(&lp->pci_dev->dev, skb->data, skb->len, in pcnet32_loopback_test()
1041 if (dma_mapping_error(&lp->pci_dev->dev, lp->tx_dma_addr[x])) { in pcnet32_loopback_test()
1042 netif_printk(lp, hw, KERN_DEBUG, dev, in pcnet32_loopback_test()
1047 lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]); in pcnet32_loopback_test()
1049 lp->tx_ring[x].status = cpu_to_le16(status); in pcnet32_loopback_test()
1057 lp->a->write_csr(ioaddr, CSR15, x | 0x0044); in pcnet32_loopback_test()
1060 lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */ in pcnet32_loopback_test()
1066 while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) { in pcnet32_loopback_test()
1067 spin_unlock_irqrestore(&lp->lock, flags); in pcnet32_loopback_test()
1069 spin_lock_irqsave(&lp->lock, flags); in pcnet32_loopback_test()
1074 netif_err(lp, hw, dev, "Desc %d failed to reset!\n", x); in pcnet32_loopback_test()
1079 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */ in pcnet32_loopback_test()
1081 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) { in pcnet32_loopback_test()
1086 skb = lp->rx_skbuff[x]; in pcnet32_loopback_test()
1096 skb = lp->rx_skbuff[x]; in pcnet32_loopback_test()
1097 packet = lp->tx_skbuff[x]->data; in pcnet32_loopback_test()
1100 netif_printk(lp, hw, KERN_DEBUG, dev, in pcnet32_loopback_test()
1125 lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */ in pcnet32_loopback_test()
1127 spin_unlock_irqrestore(&lp->lock, flags); in pcnet32_loopback_test()
1136 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_set_phys_id() local
1137 const struct pcnet32_access *a = lp->a; in pcnet32_set_phys_id()
1145 spin_lock_irqsave(&lp->lock, flags); in pcnet32_set_phys_id()
1147 lp->save_regs[i - 4] = a->read_bcr(ioaddr, i); in pcnet32_set_phys_id()
1148 spin_unlock_irqrestore(&lp->lock, flags); in pcnet32_set_phys_id()
1154 spin_lock_irqsave(&lp->lock, flags); in pcnet32_set_phys_id()
1157 spin_unlock_irqrestore(&lp->lock, flags); in pcnet32_set_phys_id()
1162 spin_lock_irqsave(&lp->lock, flags); in pcnet32_set_phys_id()
1164 a->write_bcr(ioaddr, i, lp->save_regs[i - 4]); in pcnet32_set_phys_id()
1165 spin_unlock_irqrestore(&lp->lock, flags); in pcnet32_set_phys_id()
1175 struct pcnet32_private *lp, in pcnet32_rx_entry() argument
1208 netif_err(lp, drv, dev, "Impossible packet size %d!\n", in pcnet32_rx_entry()
1214 netif_err(lp, rx_err, dev, "Runt packet!\n"); in pcnet32_rx_entry()
1230 new_dma_addr = dma_map_single(&lp->pci_dev->dev, in pcnet32_rx_entry()
1234 if (dma_mapping_error(&lp->pci_dev->dev, new_dma_addr)) { in pcnet32_rx_entry()
1235 netif_err(lp, rx_err, dev, in pcnet32_rx_entry()
1240 skb = lp->rx_skbuff[entry]; in pcnet32_rx_entry()
1241 dma_unmap_single(&lp->pci_dev->dev, in pcnet32_rx_entry()
1242 lp->rx_dma_addr[entry], in pcnet32_rx_entry()
1246 lp->rx_skbuff[entry] = newskb; in pcnet32_rx_entry()
1247 lp->rx_dma_addr[entry] = new_dma_addr; in pcnet32_rx_entry()
1263 dma_sync_single_for_cpu(&lp->pci_dev->dev, in pcnet32_rx_entry()
1264 lp->rx_dma_addr[entry], pkt_len, in pcnet32_rx_entry()
1267 (unsigned char *)(lp->rx_skbuff[entry]->data), in pcnet32_rx_entry()
1269 dma_sync_single_for_device(&lp->pci_dev->dev, in pcnet32_rx_entry()
1270 lp->rx_dma_addr[entry], pkt_len, in pcnet32_rx_entry()
1281 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_rx() local
1282 int entry = lp->cur_rx & lp->rx_mod_mask; in pcnet32_rx()
1283 struct pcnet32_rx_head *rxp = &lp->rx_ring[entry]; in pcnet32_rx()
1288 pcnet32_rx_entry(dev, lp, rxp, entry); in pcnet32_rx()
1297 entry = (++lp->cur_rx) & lp->rx_mod_mask; in pcnet32_rx()
1298 rxp = &lp->rx_ring[entry]; in pcnet32_rx()
1306 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_tx() local
1307 unsigned int dirty_tx = lp->dirty_tx; in pcnet32_tx()
1311 while (dirty_tx != lp->cur_tx) { in pcnet32_tx()
1312 int entry = dirty_tx & lp->tx_mod_mask; in pcnet32_tx()
1313 int status = (short)le16_to_cpu(lp->tx_ring[entry].status); in pcnet32_tx()
1318 lp->tx_ring[entry].base = 0; in pcnet32_tx()
1322 int err_status = le32_to_cpu(lp->tx_ring[entry].misc); in pcnet32_tx()
1324 netif_err(lp, tx_err, dev, in pcnet32_tx()
1338 netif_err(lp, tx_err, dev, "Tx FIFO error!\n"); in pcnet32_tx()
1344 if (!lp->dxsuflo) { /* If controller doesn't recover ... */ in pcnet32_tx()
1347 netif_err(lp, tx_err, dev, "Tx FIFO error!\n"); in pcnet32_tx()
1359 if (lp->tx_skbuff[entry]) { in pcnet32_tx()
1360 dma_unmap_single(&lp->pci_dev->dev, in pcnet32_tx()
1361 lp->tx_dma_addr[entry], in pcnet32_tx()
1362 lp->tx_skbuff[entry]->len, in pcnet32_tx()
1364 dev_kfree_skb_any(lp->tx_skbuff[entry]); in pcnet32_tx()
1365 lp->tx_skbuff[entry] = NULL; in pcnet32_tx()
1366 lp->tx_dma_addr[entry] = 0; in pcnet32_tx()
1371 delta = (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask + lp->tx_ring_size); in pcnet32_tx()
1372 if (delta > lp->tx_ring_size) { in pcnet32_tx()
1373 netif_err(lp, drv, dev, "out-of-sync dirty pointer, %d vs. %d, full=%d\n", in pcnet32_tx()
1374 dirty_tx, lp->cur_tx, lp->tx_full); in pcnet32_tx()
1375 dirty_tx += lp->tx_ring_size; in pcnet32_tx()
1376 delta -= lp->tx_ring_size; in pcnet32_tx()
1379 if (lp->tx_full && in pcnet32_tx()
1381 delta < lp->tx_ring_size - 2) { in pcnet32_tx()
1383 lp->tx_full = 0; in pcnet32_tx()
1386 lp->dirty_tx = dirty_tx; in pcnet32_tx()
1393 struct pcnet32_private *lp = container_of(napi, struct pcnet32_private, napi); in pcnet32_poll() local
1394 struct net_device *dev = lp->dev; in pcnet32_poll()
1402 spin_lock_irqsave(&lp->lock, flags); in pcnet32_poll()
1405 lp->a->reset(ioaddr); in pcnet32_poll()
1406 lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ in pcnet32_poll()
1413 val = lp->a->read_csr(ioaddr, CSR3); in pcnet32_poll()
1415 lp->a->write_csr(ioaddr, CSR3, val); in pcnet32_poll()
1418 lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN); in pcnet32_poll()
1421 spin_unlock_irqrestore(&lp->lock, flags); in pcnet32_poll()
1429 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_get_regs_len() local
1430 int j = lp->phycount * PCNET32_REGS_PER_PHY; in pcnet32_get_regs_len()
1440 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_get_regs() local
1441 const struct pcnet32_access *a = lp->a; in pcnet32_get_regs()
1445 spin_lock_irqsave(&lp->lock, flags); in pcnet32_get_regs()
1472 if (lp->mii) { in pcnet32_get_regs()
1475 if (lp->phymask & (1 << j)) { in pcnet32_get_regs()
1477 lp->a->write_bcr(ioaddr, 33, in pcnet32_get_regs()
1479 *buff++ = lp->a->read_bcr(ioaddr, 34); in pcnet32_get_regs()
1486 pcnet32_clr_suspend(lp, ioaddr); in pcnet32_get_regs()
1488 spin_unlock_irqrestore(&lp->lock, flags); in pcnet32_get_regs()
1598 struct pcnet32_private *lp; in pcnet32_probe1() local
1749 dev = alloc_etherdev(sizeof(*lp)); in pcnet32_probe1()
1841 lp = netdev_priv(dev); in pcnet32_probe1()
1843 lp->init_block = dma_alloc_coherent(&pdev->dev, in pcnet32_probe1()
1844 sizeof(*lp->init_block), in pcnet32_probe1()
1845 &lp->init_dma_addr, GFP_KERNEL); in pcnet32_probe1()
1846 if (!lp->init_block) { in pcnet32_probe1()
1852 lp->pci_dev = pdev; in pcnet32_probe1()
1854 lp->dev = dev; in pcnet32_probe1()
1856 spin_lock_init(&lp->lock); in pcnet32_probe1()
1858 lp->name = chipname; in pcnet32_probe1()
1859 lp->shared_irq = shared; in pcnet32_probe1()
1860 lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */ in pcnet32_probe1()
1861 lp->rx_ring_size = RX_RING_SIZE; /* default rx ring size */ in pcnet32_probe1()
1862 lp->tx_mod_mask = lp->tx_ring_size - 1; in pcnet32_probe1()
1863 lp->rx_mod_mask = lp->rx_ring_size - 1; in pcnet32_probe1()
1864 lp->tx_len_bits = (PCNET32_LOG_TX_BUFFERS << 12); in pcnet32_probe1()
1865 lp->rx_len_bits = (PCNET32_LOG_RX_BUFFERS << 4); in pcnet32_probe1()
1866 lp->mii_if.full_duplex = fdx; in pcnet32_probe1()
1867 lp->mii_if.phy_id_mask = 0x1f; in pcnet32_probe1()
1868 lp->mii_if.reg_num_mask = 0x1f; in pcnet32_probe1()
1869 lp->dxsuflo = dxsuflo; in pcnet32_probe1()
1870 lp->mii = mii; in pcnet32_probe1()
1871 lp->chip_version = chip_version; in pcnet32_probe1()
1872 lp->msg_enable = pcnet32_debug; in pcnet32_probe1()
1875 lp->options = PCNET32_PORT_ASEL; in pcnet32_probe1()
1877 lp->options = options_mapping[options[cards_found]]; in pcnet32_probe1()
1879 if (lp->chip_version == PCNET32_79C970A) in pcnet32_probe1()
1880 lp->options = PCNET32_PORT_10BT; in pcnet32_probe1()
1881 lp->mii_if.dev = dev; in pcnet32_probe1()
1882 lp->mii_if.mdio_read = mdio_read; in pcnet32_probe1()
1883 lp->mii_if.mdio_write = mdio_write; in pcnet32_probe1()
1886 lp->napi.weight = lp->rx_ring_size / 2; in pcnet32_probe1()
1888 netif_napi_add_weight(dev, &lp->napi, pcnet32_poll, in pcnet32_probe1()
1889 lp->rx_ring_size / 2); in pcnet32_probe1()
1891 if (fdx && !(lp->options & PCNET32_PORT_ASEL) && in pcnet32_probe1()
1893 lp->options |= PCNET32_PORT_FD; in pcnet32_probe1()
1895 lp->a = a; in pcnet32_probe1()
1898 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) { in pcnet32_probe1()
1905 lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI; in pcnet32_probe1()
1907 lp->init_block->mode = cpu_to_le16(0x0003); /* Disable Rx and Tx. */ in pcnet32_probe1()
1908 lp->init_block->tlen_rlen = in pcnet32_probe1()
1909 cpu_to_le16(lp->tx_len_bits | lp->rx_len_bits); in pcnet32_probe1()
1911 lp->init_block->phys_addr[i] = dev->dev_addr[i]; in pcnet32_probe1()
1912 lp->init_block->filter[0] = 0x00000000; in pcnet32_probe1()
1913 lp->init_block->filter[1] = 0x00000000; in pcnet32_probe1()
1914 lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr); in pcnet32_probe1()
1915 lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr); in pcnet32_probe1()
1920 a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff)); in pcnet32_probe1()
1921 a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16)); in pcnet32_probe1()
1951 if (lp->mii) { in pcnet32_probe1()
1952 /* lp->phycount and lp->phymask are set to 0 by memset above */ in pcnet32_probe1()
1954 lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f; in pcnet32_probe1()
1967 lp->phycount++; in pcnet32_probe1()
1968 lp->phymask |= (1 << i); in pcnet32_probe1()
1969 lp->mii_if.phy_id = i; in pcnet32_probe1()
1974 lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5); in pcnet32_probe1()
1975 if (lp->phycount > 1) in pcnet32_probe1()
1976 lp->options |= PCNET32_PORT_MII; in pcnet32_probe1()
1979 timer_setup(&lp->watchdog_timer, pcnet32_watchdog, 0); in pcnet32_probe1()
1993 lp->next = pcnet32_dev; in pcnet32_probe1()
1998 pr_info("%s: registered as %s\n", dev->name, lp->name); in pcnet32_probe1()
2008 dma_free_coherent(&lp->pci_dev->dev, sizeof(*lp->init_block), in pcnet32_probe1()
2009 lp->init_block, lp->init_dma_addr); in pcnet32_probe1()
2020 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_alloc_ring() local
2022 lp->tx_ring = dma_alloc_coherent(&lp->pci_dev->dev, in pcnet32_alloc_ring()
2023 sizeof(struct pcnet32_tx_head) * lp->tx_ring_size, in pcnet32_alloc_ring()
2024 &lp->tx_ring_dma_addr, GFP_KERNEL); in pcnet32_alloc_ring()
2025 if (!lp->tx_ring) { in pcnet32_alloc_ring()
2026 netif_err(lp, drv, dev, "Coherent memory allocation failed\n"); in pcnet32_alloc_ring()
2030 lp->rx_ring = dma_alloc_coherent(&lp->pci_dev->dev, in pcnet32_alloc_ring()
2031 sizeof(struct pcnet32_rx_head) * lp->rx_ring_size, in pcnet32_alloc_ring()
2032 &lp->rx_ring_dma_addr, GFP_KERNEL); in pcnet32_alloc_ring()
2033 if (!lp->rx_ring) { in pcnet32_alloc_ring()
2034 netif_err(lp, drv, dev, "Coherent memory allocation failed\n"); in pcnet32_alloc_ring()
2038 lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t), in pcnet32_alloc_ring()
2040 if (!lp->tx_dma_addr) in pcnet32_alloc_ring()
2043 lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t), in pcnet32_alloc_ring()
2045 if (!lp->rx_dma_addr) in pcnet32_alloc_ring()
2048 lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *), in pcnet32_alloc_ring()
2050 if (!lp->tx_skbuff) in pcnet32_alloc_ring()
2053 lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *), in pcnet32_alloc_ring()
2055 if (!lp->rx_skbuff) in pcnet32_alloc_ring()
2063 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_free_ring() local
2065 kfree(lp->tx_skbuff); in pcnet32_free_ring()
2066 lp->tx_skbuff = NULL; in pcnet32_free_ring()
2068 kfree(lp->rx_skbuff); in pcnet32_free_ring()
2069 lp->rx_skbuff = NULL; in pcnet32_free_ring()
2071 kfree(lp->tx_dma_addr); in pcnet32_free_ring()
2072 lp->tx_dma_addr = NULL; in pcnet32_free_ring()
2074 kfree(lp->rx_dma_addr); in pcnet32_free_ring()
2075 lp->rx_dma_addr = NULL; in pcnet32_free_ring()
2077 if (lp->tx_ring) { in pcnet32_free_ring()
2078 dma_free_coherent(&lp->pci_dev->dev, in pcnet32_free_ring()
2079 sizeof(struct pcnet32_tx_head) * lp->tx_ring_size, in pcnet32_free_ring()
2080 lp->tx_ring, lp->tx_ring_dma_addr); in pcnet32_free_ring()
2081 lp->tx_ring = NULL; in pcnet32_free_ring()
2084 if (lp->rx_ring) { in pcnet32_free_ring()
2085 dma_free_coherent(&lp->pci_dev->dev, in pcnet32_free_ring()
2086 sizeof(struct pcnet32_rx_head) * lp->rx_ring_size, in pcnet32_free_ring()
2087 lp->rx_ring, lp->rx_ring_dma_addr); in pcnet32_free_ring()
2088 lp->rx_ring = NULL; in pcnet32_free_ring()
2094 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_open() local
2095 struct pci_dev *pdev = lp->pci_dev; in pcnet32_open()
2103 lp->shared_irq ? IRQF_SHARED : 0, dev->name, in pcnet32_open()
2109 spin_lock_irqsave(&lp->lock, flags); in pcnet32_open()
2117 lp->a->reset(ioaddr); in pcnet32_open()
2120 lp->a->write_bcr(ioaddr, 20, 2); in pcnet32_open()
2122 netif_printk(lp, ifup, KERN_DEBUG, dev, in pcnet32_open()
2124 __func__, dev->irq, (u32) (lp->tx_ring_dma_addr), in pcnet32_open()
2125 (u32) (lp->rx_ring_dma_addr), in pcnet32_open()
2126 (u32) (lp->init_dma_addr)); in pcnet32_open()
2128 lp->autoneg = !!(lp->options & PCNET32_PORT_ASEL); in pcnet32_open()
2129 lp->port_tp = !!(lp->options & PCNET32_PORT_10BT); in pcnet32_open()
2130 lp->fdx = !!(lp->options & PCNET32_PORT_FD); in pcnet32_open()
2133 val = lp->a->read_bcr(ioaddr, 2) & ~2; in pcnet32_open()
2134 if (lp->options & PCNET32_PORT_ASEL) in pcnet32_open()
2136 lp->a->write_bcr(ioaddr, 2, val); in pcnet32_open()
2139 if (lp->mii_if.full_duplex) { in pcnet32_open()
2140 val = lp->a->read_bcr(ioaddr, 9) & ~3; in pcnet32_open()
2141 if (lp->options & PCNET32_PORT_FD) { in pcnet32_open()
2143 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI)) in pcnet32_open()
2145 } else if (lp->options & PCNET32_PORT_ASEL) { in pcnet32_open()
2147 if (lp->chip_version == 0x2627) in pcnet32_open()
2150 lp->a->write_bcr(ioaddr, 9, val); in pcnet32_open()
2154 val = lp->a->read_csr(ioaddr, 124) & ~0x10; in pcnet32_open()
2155 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI) in pcnet32_open()
2157 lp->a->write_csr(ioaddr, 124, val); in pcnet32_open()
2163 if (lp->options & PCNET32_PORT_ASEL) { in pcnet32_open()
2164 lp->options = PCNET32_PORT_FD | PCNET32_PORT_100; in pcnet32_open()
2165 netif_printk(lp, link, KERN_DEBUG, dev, in pcnet32_open()
2169 if (lp->phycount < 2) { in pcnet32_open()
2175 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) { in pcnet32_open()
2176 lp->a->write_bcr(ioaddr, 32, in pcnet32_open()
2177 lp->a->read_bcr(ioaddr, 32) | 0x0080); in pcnet32_open()
2179 val = lp->a->read_bcr(ioaddr, 32) & ~0xb8; in pcnet32_open()
2180 if (lp->options & PCNET32_PORT_FD) in pcnet32_open()
2182 if (lp->options & PCNET32_PORT_100) in pcnet32_open()
2184 lp->a->write_bcr(ioaddr, 32, val); in pcnet32_open()
2186 if (lp->options & PCNET32_PORT_ASEL) { in pcnet32_open()
2187 lp->a->write_bcr(ioaddr, 32, in pcnet32_open()
2188 lp->a->read_bcr(ioaddr, in pcnet32_open()
2191 val = lp->a->read_bcr(ioaddr, 32) & ~0x98; in pcnet32_open()
2193 lp->a->write_bcr(ioaddr, 32, val); in pcnet32_open()
2206 val = lp->a->read_bcr(ioaddr, 2); in pcnet32_open()
2207 lp->a->write_bcr(ioaddr, 2, val & ~2); in pcnet32_open()
2208 val = lp->a->read_bcr(ioaddr, 32); in pcnet32_open()
2209 lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */ in pcnet32_open()
2211 if (!(lp->options & PCNET32_PORT_ASEL)) { in pcnet32_open()
2217 (lp->options & PCNET32_PORT_100) ? in pcnet32_open()
2219 bcr9 = lp->a->read_bcr(ioaddr, 9); in pcnet32_open()
2221 if (lp->options & PCNET32_PORT_FD) { in pcnet32_open()
2228 lp->a->write_bcr(ioaddr, 9, bcr9); in pcnet32_open()
2232 if (lp->phymask & (1 << i)) { in pcnet32_open()
2244 lp->mii_if.phy_id = i; in pcnet32_open()
2246 if (lp->options & PCNET32_PORT_ASEL) { in pcnet32_open()
2247 mii_ethtool_gset(&lp->mii_if, &ecmd); in pcnet32_open()
2250 mii_ethtool_sset(&lp->mii_if, &ecmd); in pcnet32_open()
2253 lp->mii_if.phy_id = first_phy; in pcnet32_open()
2254 netif_info(lp, link, dev, "Using PHY number %d\n", first_phy); in pcnet32_open()
2258 if (lp->dxsuflo) { /* Disable transmit stop on underflow */ in pcnet32_open()
2259 val = lp->a->read_csr(ioaddr, CSR3); in pcnet32_open()
2261 lp->a->write_csr(ioaddr, CSR3, val); in pcnet32_open()
2265 lp->init_block->mode = in pcnet32_open()
2266 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7); in pcnet32_open()
2274 napi_enable_locked(&lp->napi); in pcnet32_open()
2277 lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff)); in pcnet32_open()
2278 lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16)); in pcnet32_open()
2280 lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */ in pcnet32_open()
2281 lp->a->write_csr(ioaddr, CSR0, CSR0_INIT); in pcnet32_open()
2285 if (lp->chip_version >= PCNET32_79C970A) { in pcnet32_open()
2288 mod_timer(&lp->watchdog_timer, PCNET32_WATCHDOG_TIMEOUT); in pcnet32_open()
2293 if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON) in pcnet32_open()
2299 lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL); in pcnet32_open()
2301 netif_printk(lp, ifup, KERN_DEBUG, dev, in pcnet32_open()
2304 (u32) (lp->init_dma_addr), in pcnet32_open()
2305 lp->a->read_csr(ioaddr, CSR0)); in pcnet32_open()
2307 spin_unlock_irqrestore(&lp->lock, flags); in pcnet32_open()
2320 lp->a->write_bcr(ioaddr, 20, 4); in pcnet32_open()
2323 spin_unlock_irqrestore(&lp->lock, flags); in pcnet32_open()
2344 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_purge_tx_ring() local
2347 for (i = 0; i < lp->tx_ring_size; i++) { in pcnet32_purge_tx_ring()
2348 lp->tx_ring[i].status = 0; /* CPU owns buffer */ in pcnet32_purge_tx_ring()
2350 if (lp->tx_skbuff[i]) { in pcnet32_purge_tx_ring()
2351 if (!dma_mapping_error(&lp->pci_dev->dev, lp->tx_dma_addr[i])) in pcnet32_purge_tx_ring()
2352 dma_unmap_single(&lp->pci_dev->dev, in pcnet32_purge_tx_ring()
2353 lp->tx_dma_addr[i], in pcnet32_purge_tx_ring()
2354 lp->tx_skbuff[i]->len, in pcnet32_purge_tx_ring()
2356 dev_kfree_skb_any(lp->tx_skbuff[i]); in pcnet32_purge_tx_ring()
2358 lp->tx_skbuff[i] = NULL; in pcnet32_purge_tx_ring()
2359 lp->tx_dma_addr[i] = 0; in pcnet32_purge_tx_ring()
2366 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_init_ring() local
2369 lp->tx_full = 0; in pcnet32_init_ring()
2370 lp->cur_rx = lp->cur_tx = 0; in pcnet32_init_ring()
2371 lp->dirty_rx = lp->dirty_tx = 0; in pcnet32_init_ring()
2373 for (i = 0; i < lp->rx_ring_size; i++) { in pcnet32_init_ring()
2374 struct sk_buff *rx_skbuff = lp->rx_skbuff[i]; in pcnet32_init_ring()
2376 lp->rx_skbuff[i] = netdev_alloc_skb(dev, PKT_BUF_SKB); in pcnet32_init_ring()
2377 rx_skbuff = lp->rx_skbuff[i]; in pcnet32_init_ring()
2380 netif_err(lp, drv, dev, "%s netdev_alloc_skb failed\n", in pcnet32_init_ring()
2388 if (lp->rx_dma_addr[i] == 0) { in pcnet32_init_ring()
2389 lp->rx_dma_addr[i] = in pcnet32_init_ring()
2390 dma_map_single(&lp->pci_dev->dev, rx_skbuff->data, in pcnet32_init_ring()
2392 if (dma_mapping_error(&lp->pci_dev->dev, lp->rx_dma_addr[i])) { in pcnet32_init_ring()
2394 netif_err(lp, drv, dev, in pcnet32_init_ring()
2400 lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]); in pcnet32_init_ring()
2401 lp->rx_ring[i].buf_length = cpu_to_le16(NEG_BUF_SIZE); in pcnet32_init_ring()
2403 lp->rx_ring[i].status = cpu_to_le16(0x8000); in pcnet32_init_ring()
2407 for (i = 0; i < lp->tx_ring_size; i++) { in pcnet32_init_ring()
2408 lp->tx_ring[i].status = 0; /* CPU owns buffer */ in pcnet32_init_ring()
2410 lp->tx_ring[i].base = 0; in pcnet32_init_ring()
2411 lp->tx_dma_addr[i] = 0; in pcnet32_init_ring()
2414 lp->init_block->tlen_rlen = in pcnet32_init_ring()
2415 cpu_to_le16(lp->tx_len_bits | lp->rx_len_bits); in pcnet32_init_ring()
2417 lp->init_block->phys_addr[i] = dev->dev_addr[i]; in pcnet32_init_ring()
2418 lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr); in pcnet32_init_ring()
2419 lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr); in pcnet32_init_ring()
2430 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_restart() local
2436 if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP) in pcnet32_restart()
2440 netif_err(lp, drv, dev, "%s timed out waiting for stop\n", in pcnet32_restart()
2448 lp->a->write_csr(ioaddr, CSR0, CSR0_INIT); in pcnet32_restart()
2451 if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON) in pcnet32_restart()
2454 lp->a->write_csr(ioaddr, CSR0, csr0_bits); in pcnet32_restart()
2459 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_tx_timeout() local
2462 spin_lock_irqsave(&lp->lock, flags); in pcnet32_tx_timeout()
2466 dev->name, lp->a->read_csr(ioaddr, CSR0)); in pcnet32_tx_timeout()
2467 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); in pcnet32_tx_timeout()
2469 if (netif_msg_tx_err(lp)) { in pcnet32_tx_timeout()
2473 lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "", in pcnet32_tx_timeout()
2474 lp->cur_rx); in pcnet32_tx_timeout()
2475 for (i = 0; i < lp->rx_ring_size; i++) in pcnet32_tx_timeout()
2477 le32_to_cpu(lp->rx_ring[i].base), in pcnet32_tx_timeout()
2478 (-le16_to_cpu(lp->rx_ring[i].buf_length)) & in pcnet32_tx_timeout()
2479 0xffff, le32_to_cpu(lp->rx_ring[i].msg_length), in pcnet32_tx_timeout()
2480 le16_to_cpu(lp->rx_ring[i].status)); in pcnet32_tx_timeout()
2481 for (i = 0; i < lp->tx_ring_size; i++) in pcnet32_tx_timeout()
2483 le32_to_cpu(lp->tx_ring[i].base), in pcnet32_tx_timeout()
2484 (-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff, in pcnet32_tx_timeout()
2485 le32_to_cpu(lp->tx_ring[i].misc), in pcnet32_tx_timeout()
2486 le16_to_cpu(lp->tx_ring[i].status)); in pcnet32_tx_timeout()
2494 spin_unlock_irqrestore(&lp->lock, flags); in pcnet32_tx_timeout()
2500 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_start_xmit() local
2506 spin_lock_irqsave(&lp->lock, flags); in pcnet32_start_xmit()
2508 netif_printk(lp, tx_queued, KERN_DEBUG, dev, in pcnet32_start_xmit()
2510 __func__, lp->a->read_csr(ioaddr, CSR0)); in pcnet32_start_xmit()
2520 entry = lp->cur_tx & lp->tx_mod_mask; in pcnet32_start_xmit()
2525 lp->tx_ring[entry].length = cpu_to_le16(-skb->len); in pcnet32_start_xmit()
2527 lp->tx_ring[entry].misc = 0x00000000; in pcnet32_start_xmit()
2529 lp->tx_dma_addr[entry] = in pcnet32_start_xmit()
2530 dma_map_single(&lp->pci_dev->dev, skb->data, skb->len, in pcnet32_start_xmit()
2532 if (dma_mapping_error(&lp->pci_dev->dev, lp->tx_dma_addr[entry])) { in pcnet32_start_xmit()
2537 lp->tx_skbuff[entry] = skb; in pcnet32_start_xmit()
2538 lp->tx_ring[entry].base = cpu_to_le32(lp->tx_dma_addr[entry]); in pcnet32_start_xmit()
2540 lp->tx_ring[entry].status = cpu_to_le16(status); in pcnet32_start_xmit()
2542 lp->cur_tx++; in pcnet32_start_xmit()
2546 lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL); in pcnet32_start_xmit()
2548 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) { in pcnet32_start_xmit()
2549 lp->tx_full = 1; in pcnet32_start_xmit()
2553 spin_unlock_irqrestore(&lp->lock, flags); in pcnet32_start_xmit()
2562 struct pcnet32_private *lp; in pcnet32_interrupt() local
2568 lp = netdev_priv(dev); in pcnet32_interrupt()
2570 spin_lock(&lp->lock); in pcnet32_interrupt()
2572 csr0 = lp->a->read_csr(ioaddr, CSR0); in pcnet32_interrupt()
2577 lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f); in pcnet32_interrupt()
2579 netif_printk(lp, intr, KERN_DEBUG, dev, in pcnet32_interrupt()
2581 csr0, lp->a->read_csr(ioaddr, CSR0)); in pcnet32_interrupt()
2601 netif_err(lp, drv, dev, "Bus master arbitration failure, status %4.4x\n", in pcnet32_interrupt()
2605 if (napi_schedule_prep(&lp->napi)) { in pcnet32_interrupt()
2608 val = lp->a->read_csr(ioaddr, CSR3); in pcnet32_interrupt()
2610 lp->a->write_csr(ioaddr, CSR3, val); in pcnet32_interrupt()
2612 __napi_schedule(&lp->napi); in pcnet32_interrupt()
2615 csr0 = lp->a->read_csr(ioaddr, CSR0); in pcnet32_interrupt()
2618 netif_printk(lp, intr, KERN_DEBUG, dev, in pcnet32_interrupt()
2620 lp->a->read_csr(ioaddr, CSR0)); in pcnet32_interrupt()
2622 spin_unlock(&lp->lock); in pcnet32_interrupt()
2630 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_close() local
2633 del_timer_sync(&lp->watchdog_timer); in pcnet32_close()
2636 napi_disable(&lp->napi); in pcnet32_close()
2638 spin_lock_irqsave(&lp->lock, flags); in pcnet32_close()
2640 dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112); in pcnet32_close()
2642 netif_printk(lp, ifdown, KERN_DEBUG, dev, in pcnet32_close()
2644 lp->a->read_csr(ioaddr, CSR0)); in pcnet32_close()
2647 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); in pcnet32_close()
2653 lp->a->write_bcr(ioaddr, 20, 4); in pcnet32_close()
2655 spin_unlock_irqrestore(&lp->lock, flags); in pcnet32_close()
2659 spin_lock_irqsave(&lp->lock, flags); in pcnet32_close()
2664 spin_unlock_irqrestore(&lp->lock, flags); in pcnet32_close()
2671 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_get_stats() local
2675 spin_lock_irqsave(&lp->lock, flags); in pcnet32_get_stats()
2676 dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112); in pcnet32_get_stats()
2677 spin_unlock_irqrestore(&lp->lock, flags); in pcnet32_get_stats()
2685 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_load_multicast() local
2686 volatile struct pcnet32_init_block *ib = lp->init_block; in pcnet32_load_multicast()
2697 lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff); in pcnet32_load_multicast()
2698 lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff); in pcnet32_load_multicast()
2699 lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff); in pcnet32_load_multicast()
2700 lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff); in pcnet32_load_multicast()
2714 lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i, in pcnet32_load_multicast()
2724 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_set_multicast_list() local
2727 spin_lock_irqsave(&lp->lock, flags); in pcnet32_set_multicast_list()
2729 csr15 = lp->a->read_csr(ioaddr, CSR15); in pcnet32_set_multicast_list()
2732 netif_info(lp, hw, dev, "Promiscuous mode enabled\n"); in pcnet32_set_multicast_list()
2733 lp->init_block->mode = in pcnet32_set_multicast_list()
2734 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) << in pcnet32_set_multicast_list()
2736 lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000); in pcnet32_set_multicast_list()
2738 lp->init_block->mode = in pcnet32_set_multicast_list()
2739 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7); in pcnet32_set_multicast_list()
2740 lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff); in pcnet32_set_multicast_list()
2745 pcnet32_clr_suspend(lp, ioaddr); in pcnet32_set_multicast_list()
2747 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); in pcnet32_set_multicast_list()
2752 spin_unlock_irqrestore(&lp->lock, flags); in pcnet32_set_multicast_list()
2755 /* This routine assumes that the lp->lock is held */
2758 struct pcnet32_private *lp = netdev_priv(dev); in mdio_read() local
2762 if (!lp->mii) in mdio_read()
2765 lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f)); in mdio_read()
2766 val_out = lp->a->read_bcr(ioaddr, 34); in mdio_read()
2771 /* This routine assumes that the lp->lock is held */
2774 struct pcnet32_private *lp = netdev_priv(dev); in mdio_write() local
2777 if (!lp->mii) in mdio_write()
2780 lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f)); in mdio_write()
2781 lp->a->write_bcr(ioaddr, 34, val); in mdio_write()
2786 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_ioctl() local
2791 if (lp->mii) { in pcnet32_ioctl()
2792 spin_lock_irqsave(&lp->lock, flags); in pcnet32_ioctl()
2793 rc = generic_mii_ioctl(&lp->mii_if, if_mii(rq), cmd, NULL); in pcnet32_ioctl()
2794 spin_unlock_irqrestore(&lp->lock, flags); in pcnet32_ioctl()
2804 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_check_otherphy() local
2805 struct mii_if_info mii = lp->mii_if; in pcnet32_check_otherphy()
2810 if (i == lp->mii_if.phy_id) in pcnet32_check_otherphy()
2812 if (lp->phymask & (1 << i)) { in pcnet32_check_otherphy()
2816 netif_info(lp, link, dev, "Using PHY number %d\n", in pcnet32_check_otherphy()
2821 mdio_read(dev, lp->mii_if.phy_id, MII_BMCR); in pcnet32_check_otherphy()
2822 mdio_write(dev, lp->mii_if.phy_id, MII_BMCR, in pcnet32_check_otherphy()
2831 lp->mii_if.phy_id = i; in pcnet32_check_otherphy()
2844 * Caller is assumed to hold and release the lp->lock.
2849 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_check_media() local
2854 if (lp->mii) { in pcnet32_check_media()
2855 curr_link = mii_link_ok(&lp->mii_if); in pcnet32_check_media()
2856 } else if (lp->chip_version == PCNET32_79C970A) { in pcnet32_check_media()
2859 if (!lp->autoneg && lp->port_tp) in pcnet32_check_media()
2860 curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0); in pcnet32_check_media()
2865 curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0); in pcnet32_check_media()
2870 netif_info(lp, link, dev, "link down\n"); in pcnet32_check_media()
2872 if (lp->phycount > 1) { in pcnet32_check_media()
2877 if (lp->mii) { in pcnet32_check_media()
2878 if (netif_msg_link(lp)) { in pcnet32_check_media()
2881 mii_ethtool_gset(&lp->mii_if, &ecmd); in pcnet32_check_media()
2887 bcr9 = lp->a->read_bcr(dev->base_addr, 9); in pcnet32_check_media()
2888 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) { in pcnet32_check_media()
2889 if (lp->mii_if.full_duplex) in pcnet32_check_media()
2893 lp->a->write_bcr(dev->base_addr, 9, bcr9); in pcnet32_check_media()
2896 netif_info(lp, link, dev, "link up\n"); in pcnet32_check_media()
2908 struct pcnet32_private *lp = from_timer(lp, t, watchdog_timer); in pcnet32_watchdog() local
2909 struct net_device *dev = lp->dev; in pcnet32_watchdog()
2913 spin_lock_irqsave(&lp->lock, flags); in pcnet32_watchdog()
2915 spin_unlock_irqrestore(&lp->lock, flags); in pcnet32_watchdog()
2917 mod_timer(&lp->watchdog_timer, round_jiffies(PCNET32_WATCHDOG_TIMEOUT)); in pcnet32_watchdog()
2949 struct pcnet32_private *lp = netdev_priv(dev); in pcnet32_remove_one() local
2954 dma_free_coherent(&lp->pci_dev->dev, sizeof(*lp->init_block), in pcnet32_remove_one()
2955 lp->init_block, lp->init_dma_addr); in pcnet32_remove_one()
3032 struct pcnet32_private *lp = netdev_priv(pcnet32_dev); in pcnet32_cleanup_module() local
3033 next_dev = lp->next; in pcnet32_cleanup_module()
3037 dma_free_coherent(&lp->pci_dev->dev, sizeof(*lp->init_block), in pcnet32_cleanup_module()
3038 lp->init_block, lp->init_dma_addr); in pcnet32_cleanup_module()