Lines Matching +full:broken +full:- +full:prefetch +full:- +full:cmd

1 // SPDX-License-Identifier: GPL-1.0+
2 /* lasi_82596.c -- driver for the intel 82596 ethernet controller, as
9 3 primary sources of the mess --
27 03/02/2000 changes for better/correct(?) cache-flushing (deller)
34 This driver is for the Apricot 82596 bus-master interface
46 non-cached page, so we can run on 68060 in copyback mode.
53 Most of my modifications relate to the braindead big-endian
55 'big-endian' mode, it thinks a 32 bit value of 0x12345678
82 #include <linux/dma-mapping.h>
114 * the following commands are available (p5-18). The 32-bit port command
115 * must be word-swapped with the most significant word written first.
138 #define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
139 #define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
200 struct i596_cmd cmd; member
214 struct i596_cmd cmd; member
220 struct i596_cmd cmd; member
226 struct i596_cmd cmd; member
231 struct i596_cmd cmd; member
237 unsigned short cmd; member
276 u32 cmd; member
333 0x8E, /* length, prefetch on */
352 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
361 static int max_cmd_backlog = TX_RING_SIZE-1;
369 return lp->dma_addr + ((unsigned long)v - (unsigned long)lp->dma); in virt_to_dma()
376 dma_sync_single_for_device(ndev->dev.parent, in dma_sync_dev()
384 dma_sync_single_for_cpu(ndev->dev.parent, in dma_sync_cpu()
401 dma_sync_cpu(dev, &(dma->iscp), sizeof(struct i596_iscp)); in wait_istat()
402 while (--delcnt && dma->iscp.stat) { in wait_istat()
404 dma_sync_cpu(dev, &(dma->iscp), sizeof(struct i596_iscp)); in wait_istat()
408 dev->name, str, SWAP16(dma->iscp.stat)); in wait_istat()
409 return -1; in wait_istat()
417 dma_sync_cpu(dev, &(dma->scb), sizeof(struct i596_scb)); in wait_cmd()
418 while (--delcnt && dma->scb.command) { in wait_cmd()
420 dma_sync_cpu(dev, &(dma->scb), sizeof(struct i596_scb)); in wait_cmd()
423 printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n", in wait_cmd()
424 dev->name, str, in wait_cmd()
425 SWAP16(dma->scb.status), in wait_cmd()
426 SWAP16(dma->scb.command)); in wait_cmd()
427 return -1; in wait_cmd()
436 struct i596_dma *dma = lp->dma; in i596_display_data()
437 struct i596_cmd *cmd; in i596_display_data() local
442 &dma->scp, dma->scp.sysbus, SWAP32(dma->scp.iscp)); in i596_display_data()
444 &dma->iscp, SWAP32(dma->iscp.stat), SWAP32(dma->iscp.scb)); in i596_display_data()
446 " .cmd = %08x, .rfd = %08x\n", in i596_display_data()
447 &dma->scb, SWAP16(dma->scb.status), SWAP16(dma->scb.command), in i596_display_data()
448 SWAP16(dma->scb.cmd), SWAP32(dma->scb.rfd)); in i596_display_data()
451 SWAP32(dma->scb.crc_err), SWAP32(dma->scb.align_err), in i596_display_data()
452 SWAP32(dma->scb.resource_err), SWAP32(dma->scb.over_err), in i596_display_data()
453 SWAP32(dma->scb.rcvdt_err), SWAP32(dma->scb.short_err)); in i596_display_data()
454 cmd = lp->cmd_head; in i596_display_data()
455 while (cmd != NULL) { in i596_display_data()
457 "cmd at %p, .status = %04x, .command = %04x," in i596_display_data()
459 cmd, SWAP16(cmd->status), SWAP16(cmd->command), in i596_display_data()
460 SWAP32(cmd->b_next)); in i596_display_data()
461 cmd = cmd->v_next; in i596_display_data()
463 rfd = lp->rfd_head; in i596_display_data()
467 " %p .stat %04x, .cmd %04x, b_next %08x, rbd %08x," in i596_display_data()
469 rfd, SWAP16(rfd->stat), SWAP16(rfd->cmd), in i596_display_data()
470 SWAP32(rfd->b_next), SWAP32(rfd->rbd), in i596_display_data()
471 SWAP16(rfd->count)); in i596_display_data()
472 rfd = rfd->v_next; in i596_display_data()
473 } while (rfd != lp->rfd_head); in i596_display_data()
474 rbd = lp->rbd_head; in i596_display_data()
480 rbd, SWAP16(rbd->count), SWAP32(rbd->b_next), in i596_display_data()
481 SWAP32(rbd->b_data), SWAP16(rbd->size)); in i596_display_data()
482 rbd = rbd->v_next; in i596_display_data()
483 } while (rbd != lp->rbd_head); in i596_display_data()
490 struct i596_dma *dma = lp->dma; in init_rx_bufs()
497 for (i = 0, rbd = dma->rbds; i < rx_ring_size; i++, rbd++) { in init_rx_bufs()
503 return -1; in init_rx_bufs()
504 dma_addr = dma_map_single(dev->dev.parent, skb->data, in init_rx_bufs()
506 rbd->v_next = rbd+1; in init_rx_bufs()
507 rbd->b_next = SWAP32(virt_to_dma(lp, rbd+1)); in init_rx_bufs()
508 rbd->b_addr = SWAP32(virt_to_dma(lp, rbd)); in init_rx_bufs()
509 rbd->skb = skb; in init_rx_bufs()
510 rbd->v_data = skb->data; in init_rx_bufs()
511 rbd->b_data = SWAP32(dma_addr); in init_rx_bufs()
512 rbd->size = SWAP16(PKT_BUF_SZ); in init_rx_bufs()
514 lp->rbd_head = dma->rbds; in init_rx_bufs()
515 rbd = dma->rbds + rx_ring_size - 1; in init_rx_bufs()
516 rbd->v_next = dma->rbds; in init_rx_bufs()
517 rbd->b_next = SWAP32(virt_to_dma(lp, dma->rbds)); in init_rx_bufs()
521 for (i = 0, rfd = dma->rfds; i < rx_ring_size; i++, rfd++) { in init_rx_bufs()
522 rfd->rbd = I596_NULL; in init_rx_bufs()
523 rfd->v_next = rfd+1; in init_rx_bufs()
524 rfd->v_prev = rfd-1; in init_rx_bufs()
525 rfd->b_next = SWAP32(virt_to_dma(lp, rfd+1)); in init_rx_bufs()
526 rfd->cmd = SWAP16(CMD_FLEX); in init_rx_bufs()
528 lp->rfd_head = dma->rfds; in init_rx_bufs()
529 dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds)); in init_rx_bufs()
530 rfd = dma->rfds; in init_rx_bufs()
531 rfd->rbd = SWAP32(virt_to_dma(lp, lp->rbd_head)); in init_rx_bufs()
532 rfd->v_prev = dma->rfds + rx_ring_size - 1; in init_rx_bufs()
533 rfd = dma->rfds + rx_ring_size - 1; in init_rx_bufs()
534 rfd->v_next = dma->rfds; in init_rx_bufs()
535 rfd->b_next = SWAP32(virt_to_dma(lp, dma->rfds)); in init_rx_bufs()
536 rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX); in init_rx_bufs()
548 for (i = 0, rbd = lp->dma->rbds; i < rx_ring_size; i++, rbd++) { in remove_rx_bufs()
549 if (rbd->skb == NULL) in remove_rx_bufs()
551 dma_unmap_single(dev->dev.parent, in remove_rx_bufs()
552 (dma_addr_t)SWAP32(rbd->b_data), in remove_rx_bufs()
554 dev_kfree_skb(rbd->skb); in remove_rx_bufs()
562 struct i596_dma *dma = lp->dma; in rebuild_rx_bufs()
568 dma->rfds[i].rbd = I596_NULL; in rebuild_rx_bufs()
569 dma->rfds[i].cmd = SWAP16(CMD_FLEX); in rebuild_rx_bufs()
571 dma->rfds[rx_ring_size-1].cmd = SWAP16(CMD_EOL|CMD_FLEX); in rebuild_rx_bufs()
572 lp->rfd_head = dma->rfds; in rebuild_rx_bufs()
573 dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds)); in rebuild_rx_bufs()
574 lp->rbd_head = dma->rbds; in rebuild_rx_bufs()
575 dma->rfds[0].rbd = SWAP32(virt_to_dma(lp, dma->rbds)); in rebuild_rx_bufs()
584 struct i596_dma *dma = lp->dma; in init_i596_mem()
588 udelay(100); /* Wait 100us - seems to help */ in init_i596_mem()
592 lp->last_cmd = jiffies; in init_i596_mem()
594 dma->scp.sysbus = SYSBUS; in init_i596_mem()
595 dma->scp.iscp = SWAP32(virt_to_dma(lp, &(dma->iscp))); in init_i596_mem()
596 dma->iscp.scb = SWAP32(virt_to_dma(lp, &(dma->scb))); in init_i596_mem()
597 dma->iscp.stat = SWAP32(ISCP_BUSY); in init_i596_mem()
598 lp->cmd_backlog = 0; in init_i596_mem()
600 lp->cmd_head = NULL; in init_i596_mem()
601 dma->scb.cmd = I596_NULL; in init_i596_mem()
603 DEB(DEB_INIT, printk(KERN_DEBUG "%s: starting i82596.\n", dev->name)); in init_i596_mem()
605 dma_sync_dev(dev, &(dma->scp), sizeof(struct i596_scp)); in init_i596_mem()
606 dma_sync_dev(dev, &(dma->iscp), sizeof(struct i596_iscp)); in init_i596_mem()
607 dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb)); in init_i596_mem()
609 mpu_port(dev, PORT_ALTSCP, virt_to_dma(lp, &dma->scp)); in init_i596_mem()
615 dev->name)); in init_i596_mem()
617 if (request_irq(dev->irq, i596_interrupt, 0, "i82596", dev)) { in init_i596_mem()
618 printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq); in init_i596_mem()
625 dma->scb.command = 0; in init_i596_mem()
626 dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb)); in init_i596_mem()
629 "%s: queuing CmdConfigure\n", dev->name)); in init_i596_mem()
630 memcpy(dma->cf_cmd.i596_config, init_setup, 14); in init_i596_mem()
631 dma->cf_cmd.cmd.command = SWAP16(CmdConfigure); in init_i596_mem()
632 dma_sync_dev(dev, &(dma->cf_cmd), sizeof(struct cf_cmd)); in init_i596_mem()
633 i596_add_cmd(dev, &dma->cf_cmd.cmd); in init_i596_mem()
635 DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name)); in init_i596_mem()
636 memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN); in init_i596_mem()
637 dma->sa_cmd.cmd.command = SWAP16(CmdSASetup); in init_i596_mem()
638 dma_sync_dev(dev, &(dma->sa_cmd), sizeof(struct sa_cmd)); in init_i596_mem()
639 i596_add_cmd(dev, &dma->sa_cmd.cmd); in init_i596_mem()
641 DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name)); in init_i596_mem()
642 dma->tdr_cmd.cmd.command = SWAP16(CmdTDR); in init_i596_mem()
643 dma_sync_dev(dev, &(dma->tdr_cmd), sizeof(struct tdr_cmd)); in init_i596_mem()
644 i596_add_cmd(dev, &dma->tdr_cmd.cmd); in init_i596_mem()
646 spin_lock_irqsave (&lp->lock, flags); in init_i596_mem()
649 spin_unlock_irqrestore (&lp->lock, flags); in init_i596_mem()
652 DEB(DEB_INIT, printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name)); in init_i596_mem()
653 dma->scb.command = SWAP16(RX_START); in init_i596_mem()
654 dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds)); in init_i596_mem()
655 dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb)); in init_i596_mem()
659 spin_unlock_irqrestore (&lp->lock, flags); in init_i596_mem()
663 "%s: Receive unit started OK\n", dev->name)); in init_i596_mem()
667 free_irq(dev->irq, dev); in init_i596_mem()
669 printk(KERN_ERR "%s: Failed to initialise 82596\n", dev->name); in init_i596_mem()
671 return -1; in init_i596_mem()
684 lp->rfd_head, lp->rbd_head)); in i596_rx()
687 rfd = lp->rfd_head; /* Ref next frame to check */ in i596_rx()
690 while (rfd->stat & SWAP16(STAT_C)) { /* Loop while complete frames */ in i596_rx()
691 if (rfd->rbd == I596_NULL) in i596_rx()
693 else if (rfd->rbd == lp->rbd_head->b_addr) { in i596_rx()
694 rbd = lp->rbd_head; in i596_rx()
697 printk(KERN_ERR "%s: rbd chain broken!\n", dev->name); in i596_rx()
703 rfd, rfd->rbd, rfd->stat)); in i596_rx()
705 if (rbd != NULL && (rfd->stat & SWAP16(STAT_OK))) { in i596_rx()
707 int pkt_len = SWAP16(rbd->count) & 0x3fff; in i596_rx()
708 struct sk_buff *skb = rbd->skb; in i596_rx()
711 DEB(DEB_RXADDR, print_eth(rbd->v_data, "received")); in i596_rx()
722 dma_unmap_single(dev->dev.parent, in i596_rx()
723 (dma_addr_t)SWAP32(rbd->b_data), in i596_rx()
736 rbd->skb = newskb; in i596_rx()
737 dma_addr = dma_map_single(dev->dev.parent, in i596_rx()
738 newskb->data, in i596_rx()
741 rbd->v_data = newskb->data; in i596_rx()
742 rbd->b_data = SWAP32(dma_addr); in i596_rx()
750 dev->stats.rx_dropped++; in i596_rx()
754 dma_sync_single_for_cpu(dev->dev.parent, in i596_rx()
755 (dma_addr_t)SWAP32(rbd->b_data), in i596_rx()
757 skb_put_data(skb, rbd->v_data, in i596_rx()
759 dma_sync_single_for_device(dev->dev.parent, in i596_rx()
760 (dma_addr_t)SWAP32(rbd->b_data), in i596_rx()
763 skb->len = pkt_len; in i596_rx()
764 skb->protocol = eth_type_trans(skb, dev); in i596_rx()
766 dev->stats.rx_packets++; in i596_rx()
767 dev->stats.rx_bytes += pkt_len; in i596_rx()
772 dev->name, rfd->stat)); in i596_rx()
773 dev->stats.rx_errors++; in i596_rx()
774 if (rfd->stat & SWAP16(0x0100)) in i596_rx()
775 dev->stats.collisions++; in i596_rx()
776 if (rfd->stat & SWAP16(0x8000)) in i596_rx()
777 dev->stats.rx_length_errors++; in i596_rx()
778 if (rfd->stat & SWAP16(0x0001)) in i596_rx()
779 dev->stats.rx_over_errors++; in i596_rx()
780 if (rfd->stat & SWAP16(0x0002)) in i596_rx()
781 dev->stats.rx_fifo_errors++; in i596_rx()
782 if (rfd->stat & SWAP16(0x0004)) in i596_rx()
783 dev->stats.rx_frame_errors++; in i596_rx()
784 if (rfd->stat & SWAP16(0x0008)) in i596_rx()
785 dev->stats.rx_crc_errors++; in i596_rx()
786 if (rfd->stat & SWAP16(0x0010)) in i596_rx()
787 dev->stats.rx_length_errors++; in i596_rx()
792 if (rbd != NULL && (rbd->count & SWAP16(0x4000))) { in i596_rx()
793 rbd->count = 0; in i596_rx()
794 lp->rbd_head = rbd->v_next; in i596_rx()
800 rfd->rbd = I596_NULL; in i596_rx()
801 rfd->stat = 0; in i596_rx()
802 rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX); in i596_rx()
803 rfd->count = 0; in i596_rx()
807 lp->dma->scb.rfd = rfd->b_next; in i596_rx()
808 lp->rfd_head = rfd->v_next; in i596_rx()
811 /* Remove end-of-list from old end descriptor */ in i596_rx()
813 rfd->v_prev->cmd = SWAP16(CMD_FLEX); in i596_rx()
814 dma_sync_dev(dev, rfd->v_prev, sizeof(struct i596_rfd)); in i596_rx()
815 rfd = lp->rfd_head; in i596_rx()
829 while (lp->cmd_head != NULL) { in i596_cleanup_cmd()
830 ptr = lp->cmd_head; in i596_cleanup_cmd()
831 lp->cmd_head = ptr->v_next; in i596_cleanup_cmd()
832 lp->cmd_backlog--; in i596_cleanup_cmd()
834 switch (SWAP16(ptr->command) & 0x7) { in i596_cleanup_cmd()
838 struct sk_buff *skb = tx_cmd->skb; in i596_cleanup_cmd()
839 dma_unmap_single(dev->dev.parent, in i596_cleanup_cmd()
840 tx_cmd->dma_addr, in i596_cleanup_cmd()
841 skb->len, DMA_TO_DEVICE); in i596_cleanup_cmd()
845 dev->stats.tx_errors++; in i596_cleanup_cmd()
846 dev->stats.tx_aborted_errors++; in i596_cleanup_cmd()
848 ptr->v_next = NULL; in i596_cleanup_cmd()
849 ptr->b_next = I596_NULL; in i596_cleanup_cmd()
850 tx_cmd->cmd.command = 0; /* Mark as free */ in i596_cleanup_cmd()
854 ptr->v_next = NULL; in i596_cleanup_cmd()
855 ptr->b_next = I596_NULL; in i596_cleanup_cmd()
860 wait_cmd(dev, lp->dma, 100, "i596_cleanup_cmd timed out"); in i596_cleanup_cmd()
861 lp->dma->scb.cmd = I596_NULL; in i596_cleanup_cmd()
862 dma_sync_dev(dev, &(lp->dma->scb), sizeof(struct i596_scb)); in i596_cleanup_cmd()
872 spin_lock_irqsave (&lp->lock, flags); in i596_reset()
874 wait_cmd(dev, lp->dma, 100, "i596_reset timed out"); in i596_reset()
879 lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT); in i596_reset()
880 dma_sync_dev(dev, &(lp->dma->scb), sizeof(struct i596_scb)); in i596_reset()
884 wait_cmd(dev, lp->dma, 1000, "i596_reset 2 timed out"); in i596_reset()
885 spin_unlock_irqrestore (&lp->lock, flags); in i596_reset()
895 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd) in i596_add_cmd() argument
898 struct i596_dma *dma = lp->dma; in i596_add_cmd()
902 lp->cmd_head)); in i596_add_cmd()
904 cmd->status = 0; in i596_add_cmd()
905 cmd->command |= SWAP16(CMD_EOL | CMD_INTR); in i596_add_cmd()
906 cmd->v_next = NULL; in i596_add_cmd()
907 cmd->b_next = I596_NULL; in i596_add_cmd()
908 dma_sync_dev(dev, cmd, sizeof(struct i596_cmd)); in i596_add_cmd()
910 spin_lock_irqsave (&lp->lock, flags); in i596_add_cmd()
912 if (lp->cmd_head != NULL) { in i596_add_cmd()
913 lp->cmd_tail->v_next = cmd; in i596_add_cmd()
914 lp->cmd_tail->b_next = SWAP32(virt_to_dma(lp, &cmd->status)); in i596_add_cmd()
915 dma_sync_dev(dev, lp->cmd_tail, sizeof(struct i596_cmd)); in i596_add_cmd()
917 lp->cmd_head = cmd; in i596_add_cmd()
919 dma->scb.cmd = SWAP32(virt_to_dma(lp, &cmd->status)); in i596_add_cmd()
920 dma->scb.command = SWAP16(CUC_START); in i596_add_cmd()
921 dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb)); in i596_add_cmd()
924 lp->cmd_tail = cmd; in i596_add_cmd()
925 lp->cmd_backlog++; in i596_add_cmd()
927 spin_unlock_irqrestore (&lp->lock, flags); in i596_add_cmd()
929 if (lp->cmd_backlog > max_cmd_backlog) { in i596_add_cmd()
930 unsigned long tickssofar = jiffies - lp->last_cmd; in i596_add_cmd()
937 dev->name); in i596_add_cmd()
947 "%s: i596_open() irq %d.\n", dev->name, dev->irq)); in i596_open()
950 printk(KERN_ERR "%s: Failed to init rx bufs\n", dev->name); in i596_open()
951 return -EAGAIN; in i596_open()
954 printk(KERN_ERR "%s: Failed to init memory\n", dev->name); in i596_open()
963 return -EAGAIN; in i596_open()
973 dev->name)); in i596_tx_timeout()
975 dev->stats.tx_errors++; in i596_tx_timeout()
978 if (lp->last_restart == dev->stats.tx_packets) { in i596_tx_timeout()
985 lp->dma->scb.command = SWAP16(CUC_START | RX_START); in i596_tx_timeout()
986 dma_sync_dev(dev, &(lp->dma->scb), sizeof(struct i596_scb)); in i596_tx_timeout()
988 lp->last_restart = dev->stats.tx_packets; in i596_tx_timeout()
1001 short length = skb->len; in i596_start_xmit()
1005 dev->name, skb->len, skb->data)); in i596_start_xmit()
1015 tx_cmd = lp->dma->tx_cmds + lp->next_tx_cmd; in i596_start_xmit()
1016 tbd = lp->dma->tbds + lp->next_tx_cmd; in i596_start_xmit()
1018 if (tx_cmd->cmd.command) { in i596_start_xmit()
1021 dev->name)); in i596_start_xmit()
1022 dev->stats.tx_dropped++; in i596_start_xmit()
1026 if (++lp->next_tx_cmd == TX_RING_SIZE) in i596_start_xmit()
1027 lp->next_tx_cmd = 0; in i596_start_xmit()
1028 tx_cmd->tbd = SWAP32(virt_to_dma(lp, tbd)); in i596_start_xmit()
1029 tbd->next = I596_NULL; in i596_start_xmit()
1031 tx_cmd->cmd.command = SWAP16(CMD_FLEX | CmdTx); in i596_start_xmit()
1032 tx_cmd->skb = skb; in i596_start_xmit()
1034 tx_cmd->pad = 0; in i596_start_xmit()
1035 tx_cmd->size = 0; in i596_start_xmit()
1036 tbd->pad = 0; in i596_start_xmit()
1037 tbd->size = SWAP16(EOF | length); in i596_start_xmit()
1039 tx_cmd->dma_addr = dma_map_single(dev->dev.parent, skb->data, in i596_start_xmit()
1040 skb->len, DMA_TO_DEVICE); in i596_start_xmit()
1041 tbd->data = SWAP32(tx_cmd->dma_addr); in i596_start_xmit()
1043 DEB(DEB_TXADDR, print_eth(skb->data, "tx-queued")); in i596_start_xmit()
1046 i596_add_cmd(dev, &tx_cmd->cmd); in i596_start_xmit()
1048 dev->stats.tx_packets++; in i596_start_xmit()
1049 dev->stats.tx_bytes += length; in i596_start_xmit()
1059 printk(KERN_DEBUG "i596 0x%p, %pM --> %pM %02X%02X, %s\n", in print_eth()
1089 if (!dev->base_addr || !dev->irq) in i82596_probe()
1090 return -ENODEV; in i82596_probe()
1092 dev->netdev_ops = &i596_netdev_ops; in i82596_probe()
1093 dev->watchdog_timeo = TX_TIMEOUT; in i82596_probe()
1095 memset(lp->dma, 0, sizeof(struct i596_dma)); in i82596_probe()
1096 lp->dma->scb.command = 0; in i82596_probe()
1097 lp->dma->scb.cmd = I596_NULL; in i82596_probe()
1098 lp->dma->scb.rfd = I596_NULL; in i82596_probe()
1099 spin_lock_init(&lp->lock); in i82596_probe()
1101 dma_sync_dev(dev, lp->dma, sizeof(struct i596_dma)); in i82596_probe()
1108 dev->name, dev->base_addr, dev->dev_addr, in i82596_probe()
1109 dev->irq)); in i82596_probe()
1111 "%s: dma at 0x%p (%d bytes), lp->scb at 0x%p\n", in i82596_probe()
1112 dev->name, lp->dma, (int)sizeof(struct i596_dma), in i82596_probe()
1113 &lp->dma->scb)); in i82596_probe()
1121 disable_irq(dev->irq); in i596_poll_controller()
1122 i596_interrupt(dev->irq, dev); in i596_poll_controller()
1123 enable_irq(dev->irq); in i596_poll_controller()
1135 dma = lp->dma; in i596_interrupt()
1137 spin_lock (&lp->lock); in i596_interrupt()
1140 status = SWAP16(dma->scb.status); in i596_interrupt()
1144 dev->name, dev->irq, status)); in i596_interrupt()
1151 dev->name)); in i596_interrupt()
1152 spin_unlock (&lp->lock); in i596_interrupt()
1163 dev->name)); in i596_interrupt()
1168 dev->name, status & 0x0700)); in i596_interrupt()
1170 while (lp->cmd_head != NULL) { in i596_interrupt()
1171 dma_sync_cpu(dev, lp->cmd_head, sizeof(struct i596_cmd)); in i596_interrupt()
1172 if (!(lp->cmd_head->status & SWAP16(STAT_C))) in i596_interrupt()
1175 ptr = lp->cmd_head; in i596_interrupt()
1179 "cmd_head->status = %04x, ->command = %04x\n", in i596_interrupt()
1180 SWAP16(lp->cmd_head->status), in i596_interrupt()
1181 SWAP16(lp->cmd_head->command))); in i596_interrupt()
1182 lp->cmd_head = ptr->v_next; in i596_interrupt()
1183 lp->cmd_backlog--; in i596_interrupt()
1185 switch (SWAP16(ptr->command) & 0x7) { in i596_interrupt()
1189 struct sk_buff *skb = tx_cmd->skb; in i596_interrupt()
1191 if (ptr->status & SWAP16(STAT_OK)) { in i596_interrupt()
1193 print_eth(skb->data, "tx-done")); in i596_interrupt()
1195 dev->stats.tx_errors++; in i596_interrupt()
1196 if (ptr->status & SWAP16(0x0020)) in i596_interrupt()
1197 dev->stats.collisions++; in i596_interrupt()
1198 if (!(ptr->status & SWAP16(0x0040))) in i596_interrupt()
1199 dev->stats.tx_heartbeat_errors++; in i596_interrupt()
1200 if (ptr->status & SWAP16(0x0400)) in i596_interrupt()
1201 dev->stats.tx_carrier_errors++; in i596_interrupt()
1202 if (ptr->status & SWAP16(0x0800)) in i596_interrupt()
1203 dev->stats.collisions++; in i596_interrupt()
1204 if (ptr->status & SWAP16(0x1000)) in i596_interrupt()
1205 dev->stats.tx_aborted_errors++; in i596_interrupt()
1207 dma_unmap_single(dev->dev.parent, in i596_interrupt()
1208 tx_cmd->dma_addr, in i596_interrupt()
1209 skb->len, DMA_TO_DEVICE); in i596_interrupt()
1212 tx_cmd->cmd.command = 0; /* Mark free */ in i596_interrupt()
1217 unsigned short status = SWAP16(((struct tdr_cmd *)ptr)->status); in i596_interrupt()
1222 dev->name)); in i596_interrupt()
1227 dev->name); in i596_interrupt()
1231 dev->name); in i596_interrupt()
1235 dev->name); in i596_interrupt()
1239 dev->name, status & 0x07ff)); in i596_interrupt()
1248 ptr->command = 0; in i596_interrupt()
1251 ptr->v_next = NULL; in i596_interrupt()
1252 ptr->b_next = I596_NULL; in i596_interrupt()
1254 lp->last_cmd = jiffies; in i596_interrupt()
1259 * only add to the cmd queue when the CU is stopped. in i596_interrupt()
1261 ptr = lp->cmd_head; in i596_interrupt()
1262 while ((ptr != NULL) && (ptr != lp->cmd_tail)) { in i596_interrupt()
1265 ptr->command &= SWAP16(0x1fff); in i596_interrupt()
1266 ptr = ptr->v_next; in i596_interrupt()
1270 if (lp->cmd_head != NULL) in i596_interrupt()
1272 dma->scb.cmd = SWAP32(virt_to_dma(lp, &lp->cmd_head->status)); in i596_interrupt()
1273 dma_sync_dev(dev, &dma->scb, sizeof(struct i596_scb)); in i596_interrupt()
1280 dev->name)); in i596_interrupt()
1282 /* Only RX_START if stopped - RGH 07-07-96 */ in i596_interrupt()
1288 dev->name, status)); in i596_interrupt()
1290 dev->stats.rx_errors++; in i596_interrupt()
1291 dev->stats.rx_fifo_errors++; in i596_interrupt()
1297 dma->scb.command = SWAP16(ack_cmd); in i596_interrupt()
1298 dma_sync_dev(dev, &dma->scb, sizeof(struct i596_scb)); in i596_interrupt()
1307 DEB(DEB_INTS, printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name)); in i596_interrupt()
1309 spin_unlock (&lp->lock); in i596_interrupt()
1323 dev->name, SWAP16(lp->dma->scb.status))); in i596_close()
1325 spin_lock_irqsave(&lp->lock, flags); in i596_close()
1327 wait_cmd(dev, lp->dma, 100, "close1 timed out"); in i596_close()
1328 lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT); in i596_close()
1329 dma_sync_dev(dev, &lp->dma->scb, sizeof(struct i596_scb)); in i596_close()
1333 wait_cmd(dev, lp->dma, 100, "close2 timed out"); in i596_close()
1334 spin_unlock_irqrestore(&lp->lock, flags); in i596_close()
1338 free_irq(dev->irq, dev); in i596_close()
1351 struct i596_dma *dma = lp->dma; in set_multicast_list()
1357 dev->name, netdev_mc_count(dev), in set_multicast_list()
1358 dev->flags & IFF_PROMISC ? "ON" : "OFF", in set_multicast_list()
1359 dev->flags & IFF_ALLMULTI ? "ON" : "OFF")); in set_multicast_list()
1361 if ((dev->flags & IFF_PROMISC) && in set_multicast_list()
1362 !(dma->cf_cmd.i596_config[8] & 0x01)) { in set_multicast_list()
1363 dma->cf_cmd.i596_config[8] |= 0x01; in set_multicast_list()
1366 if (!(dev->flags & IFF_PROMISC) && in set_multicast_list()
1367 (dma->cf_cmd.i596_config[8] & 0x01)) { in set_multicast_list()
1368 dma->cf_cmd.i596_config[8] &= ~0x01; in set_multicast_list()
1371 if ((dev->flags & IFF_ALLMULTI) && in set_multicast_list()
1372 (dma->cf_cmd.i596_config[11] & 0x20)) { in set_multicast_list()
1373 dma->cf_cmd.i596_config[11] &= ~0x20; in set_multicast_list()
1376 if (!(dev->flags & IFF_ALLMULTI) && in set_multicast_list()
1377 !(dma->cf_cmd.i596_config[11] & 0x20)) { in set_multicast_list()
1378 dma->cf_cmd.i596_config[11] |= 0x20; in set_multicast_list()
1382 if (dma->cf_cmd.cmd.command) in set_multicast_list()
1385 dev->name); in set_multicast_list()
1387 dma->cf_cmd.cmd.command = SWAP16(CmdConfigure); in set_multicast_list()
1388 dma_sync_dev(dev, &dma->cf_cmd, sizeof(struct cf_cmd)); in set_multicast_list()
1389 i596_add_cmd(dev, &dma->cf_cmd.cmd); in set_multicast_list()
1397 dev->name, cnt); in set_multicast_list()
1403 struct mc_cmd *cmd; in set_multicast_list() local
1405 cmd = &dma->mc_cmd; in set_multicast_list()
1406 cmd->cmd.command = SWAP16(CmdMulticastList); in set_multicast_list()
1407 cmd->mc_cnt = SWAP16(netdev_mc_count(dev) * 6); in set_multicast_list()
1408 cp = cmd->mc_addrs; in set_multicast_list()
1410 if (!cnt--) in set_multicast_list()
1412 memcpy(cp, ha->addr, ETH_ALEN); in set_multicast_list()
1417 dev->name, cp)); in set_multicast_list()
1420 dma_sync_dev(dev, &dma->mc_cmd, sizeof(struct mc_cmd)); in set_multicast_list()
1421 i596_add_cmd(dev, &cmd->cmd); in set_multicast_list()