Lines Matching +full:broken +full:- +full:prefetch +full:- +full:cmd
1 // SPDX-License-Identifier: GPL-1.0+
6 This driver is for the Apricot 82596 bus-master interface
18 non-cached page, so we can run on 68060 in copyback mode.
25 Most of my modifications relate to the braindead big-endian
27 'big-endian' mode, it thinks a 32 bit value of 0x12345678
140 * the following commands are available (p5-18). The 32-bit port command
141 * must be word-swapped with the most significant word written first.
172 #define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
173 #define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
233 struct i596_cmd cmd; member
241 struct i596_cmd cmd; member
247 struct i596_cmd cmd; member
253 struct i596_cmd cmd; member
258 struct i596_cmd cmd; member
264 unsigned short cmd; member
292 struct i596_cmd *cmd; member
341 0x8E, /* length, prefetch on */
364 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
371 static int max_cmd_backlog = TX_RING_SIZE-1;
378 ((struct i596_reg *) dev->base_addr)->ca = 1; in CA()
385 i = *(volatile u32 *) (dev->base_addr); in CA()
395 struct i596_reg *p = (struct i596_reg *) (dev->base_addr); in MPU_PORT()
396 p->porthi = ((c) | (u32) (x)) & 0xffff; in MPU_PORT()
397 p->portlo = ((c) | (u32) (x)) >> 16; in MPU_PORT()
404 *(volatile u32 *) dev->base_addr = v; in MPU_PORT()
406 *(volatile u32 *) dev->base_addr = v; in MPU_PORT()
414 while (--delcnt && lp->iscp.stat) in wait_istat()
417 printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n", in wait_istat()
418 dev->name, str, lp->scb.status, lp->scb.command); in wait_istat()
419 return -1; in wait_istat()
428 while (--delcnt && lp->scb.command) in wait_cmd()
431 printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n", in wait_cmd()
432 dev->name, str, lp->scb.status, lp->scb.command); in wait_cmd()
433 return -1; in wait_cmd()
440 static inline int wait_cfg(struct net_device *dev, struct i596_cmd *cmd, int delcnt, char *str) in wait_cfg() argument
442 volatile struct i596_cmd *c = cmd; in wait_cfg()
444 while (--delcnt && c->command) in wait_cfg()
447 printk(KERN_ERR "%s: %s.\n", dev->name, str); in wait_cfg()
448 return -1; in wait_cfg()
457 struct i596_private *lp = dev->ml_priv; in i596_display_data()
458 struct i596_cmd *cmd; in i596_display_data() local
463 &lp->scp, lp->scp.sysbus, lp->scp.iscp); in i596_display_data()
465 &lp->iscp, lp->iscp.stat, lp->iscp.scb); in i596_display_data()
467 " .cmd = %p, .rfd = %p\n", in i596_display_data()
468 &lp->scb, lp->scb.status, lp->scb.command, in i596_display_data()
469 lp->scb.cmd, lp->scb.rfd); in i596_display_data()
472 lp->scb.crc_err, lp->scb.align_err, lp->scb.resource_err, in i596_display_data()
473 lp->scb.over_err, lp->scb.rcvdt_err, lp->scb.short_err); in i596_display_data()
474 cmd = lp->cmd_head; in i596_display_data()
475 while (cmd != I596_NULL) { in i596_display_data()
476 printk(KERN_ERR "cmd at %p, .status = %04x, .command = %04x, .b_next = %p\n", in i596_display_data()
477 cmd, cmd->status, cmd->command, cmd->b_next); in i596_display_data()
478 cmd = cmd->v_next; in i596_display_data()
480 rfd = lp->rfd_head; in i596_display_data()
483 printk(KERN_ERR " %p .stat %04x, .cmd %04x, b_next %p, rbd %p," in i596_display_data()
485 rfd, rfd->stat, rfd->cmd, rfd->b_next, rfd->rbd, in i596_display_data()
486 rfd->count); in i596_display_data()
487 rfd = rfd->v_next; in i596_display_data()
488 } while (rfd != lp->rfd_head); in i596_display_data()
489 rbd = lp->rbd_head; in i596_display_data()
493 rbd, rbd->count, rbd->b_next, rbd->b_data, rbd->size); in i596_display_data()
494 rbd = rbd->v_next; in i596_display_data()
495 } while (rbd != lp->rbd_head); in i596_display_data()
519 printk(KERN_ERR "%s: Error interrupt\n", dev->name); in i596_error()
527 struct i596_private *lp = dev->ml_priv; in remove_rx_bufs()
531 for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) { in remove_rx_bufs()
532 if (rbd->skb == NULL) in remove_rx_bufs()
534 dev_kfree_skb(rbd->skb); in remove_rx_bufs()
535 rbd->skb = NULL; in remove_rx_bufs()
541 struct i596_private *lp = dev->ml_priv; in init_rx_bufs()
548 for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) { in init_rx_bufs()
553 return -ENOMEM; in init_rx_bufs()
556 rbd->v_next = rbd+1; in init_rx_bufs()
557 rbd->b_next = WSWAPrbd(virt_to_bus(rbd+1)); in init_rx_bufs()
558 rbd->b_addr = WSWAPrbd(virt_to_bus(rbd)); in init_rx_bufs()
559 rbd->skb = skb; in init_rx_bufs()
560 rbd->v_data = skb->data; in init_rx_bufs()
561 rbd->b_data = WSWAPchar(virt_to_bus(skb->data)); in init_rx_bufs()
562 rbd->size = PKT_BUF_SZ; in init_rx_bufs()
564 cache_clear(virt_to_phys(skb->data), PKT_BUF_SZ); in init_rx_bufs()
567 lp->rbd_head = lp->rbds; in init_rx_bufs()
568 rbd = lp->rbds + rx_ring_size - 1; in init_rx_bufs()
569 rbd->v_next = lp->rbds; in init_rx_bufs()
570 rbd->b_next = WSWAPrbd(virt_to_bus(lp->rbds)); in init_rx_bufs()
574 for (i = 0, rfd = lp->rfds; i < rx_ring_size; i++, rfd++) { in init_rx_bufs()
575 rfd->rbd = I596_NULL; in init_rx_bufs()
576 rfd->v_next = rfd+1; in init_rx_bufs()
577 rfd->v_prev = rfd-1; in init_rx_bufs()
578 rfd->b_next = WSWAPrfd(virt_to_bus(rfd+1)); in init_rx_bufs()
579 rfd->cmd = CMD_FLEX; in init_rx_bufs()
581 lp->rfd_head = lp->rfds; in init_rx_bufs()
582 lp->scb.rfd = WSWAPrfd(virt_to_bus(lp->rfds)); in init_rx_bufs()
583 rfd = lp->rfds; in init_rx_bufs()
584 rfd->rbd = lp->rbd_head; in init_rx_bufs()
585 rfd->v_prev = lp->rfds + rx_ring_size - 1; in init_rx_bufs()
586 rfd = lp->rfds + rx_ring_size - 1; in init_rx_bufs()
587 rfd->v_next = lp->rfds; in init_rx_bufs()
588 rfd->b_next = WSWAPrfd(virt_to_bus(lp->rfds)); in init_rx_bufs()
589 rfd->cmd = CMD_EOL|CMD_FLEX; in init_rx_bufs()
597 struct i596_private *lp = dev->ml_priv; in rebuild_rx_bufs()
603 lp->rfds[i].rbd = I596_NULL; in rebuild_rx_bufs()
604 lp->rfds[i].cmd = CMD_FLEX; in rebuild_rx_bufs()
606 lp->rfds[rx_ring_size-1].cmd = CMD_EOL|CMD_FLEX; in rebuild_rx_bufs()
607 lp->rfd_head = lp->rfds; in rebuild_rx_bufs()
608 lp->scb.rfd = WSWAPrfd(virt_to_bus(lp->rfds)); in rebuild_rx_bufs()
609 lp->rbd_head = lp->rbds; in rebuild_rx_bufs()
610 lp->rfds[0].rbd = WSWAPrbd(virt_to_bus(lp->rbds)); in rebuild_rx_bufs()
616 struct i596_private *lp = dev->ml_priv; in init_i596_mem()
621 udelay(100); /* Wait 100us - seems to help */ in init_i596_mem()
632 * as we make appropriate use of non-cached pages for in init_i596_mem()
648 MPU_PORT(dev, PORT_ALTSCP, (void *)virt_to_bus((void *)&lp->scp)); in init_i596_mem()
652 lp->last_cmd = jiffies; in init_i596_mem()
656 lp->scp.sysbus = 0x00000054; in init_i596_mem()
660 lp->scp.sysbus = 0x0000004c; in init_i596_mem()
663 lp->scp.iscp = WSWAPiscp(virt_to_bus((void *)&lp->iscp)); in init_i596_mem()
664 lp->iscp.scb = WSWAPscb(virt_to_bus((void *)&lp->scb)); in init_i596_mem()
665 lp->iscp.stat = ISCP_BUSY; in init_i596_mem()
666 lp->cmd_backlog = 0; in init_i596_mem()
668 lp->cmd_head = lp->scb.cmd = I596_NULL; in init_i596_mem()
672 lp->scb.t_on = 7 * 25; in init_i596_mem()
673 lp->scb.t_off = 1 * 25; in init_i596_mem()
677 DEB(DEB_INIT,printk(KERN_DEBUG "%s: starting i82596.\n", dev->name)); in init_i596_mem()
683 DEB(DEB_INIT,printk(KERN_DEBUG "%s: i82596 initialization successful\n", dev->name)); in init_i596_mem()
687 lp->scb.command = 0; in init_i596_mem()
707 DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdConfigure\n", dev->name)); in init_i596_mem()
708 memcpy(lp->cf_cmd.i596_config, init_setup, 14); in init_i596_mem()
709 lp->cf_cmd.cmd.command = CmdConfigure; in init_i596_mem()
710 i596_add_cmd(dev, &lp->cf_cmd.cmd); in init_i596_mem()
712 DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name)); in init_i596_mem()
713 memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN); in init_i596_mem()
714 lp->sa_cmd.cmd.command = CmdSASetup; in init_i596_mem()
715 i596_add_cmd(dev, &lp->sa_cmd.cmd); in init_i596_mem()
717 DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name)); in init_i596_mem()
718 lp->tdr_cmd.cmd.command = CmdTDR; in init_i596_mem()
719 i596_add_cmd(dev, &lp->tdr_cmd.cmd); in init_i596_mem()
721 spin_lock_irqsave (&lp->lock, flags); in init_i596_mem()
724 spin_unlock_irqrestore (&lp->lock, flags); in init_i596_mem()
727 DEB(DEB_INIT,printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name)); in init_i596_mem()
728 lp->scb.command = RX_START; in init_i596_mem()
731 spin_unlock_irqrestore (&lp->lock, flags); in init_i596_mem()
735 DEB(DEB_INIT,printk(KERN_DEBUG "%s: Receive unit started OK\n", dev->name)); in init_i596_mem()
739 printk(KERN_CRIT "%s: Failed to initialise 82596\n", dev->name); in init_i596_mem()
741 return -1; in init_i596_mem()
746 struct i596_private *lp = dev->ml_priv; in i596_rx()
752 lp->rfd_head, lp->rbd_head)); in i596_rx()
754 rfd = lp->rfd_head; /* Ref next frame to check */ in i596_rx()
756 while ((rfd->stat) & STAT_C) { /* Loop while complete frames */ in i596_rx()
757 if (rfd->rbd == I596_NULL) in i596_rx()
759 else if (rfd->rbd == lp->rbd_head->b_addr) in i596_rx()
760 rbd = lp->rbd_head; in i596_rx()
762 printk(KERN_CRIT "%s: rbd chain broken!\n", dev->name); in i596_rx()
767 rfd, rfd->rbd, rfd->stat)); in i596_rx()
769 if (rbd != I596_NULL && ((rfd->stat) & STAT_OK)) { in i596_rx()
771 int pkt_len = rbd->count & 0x3fff; in i596_rx()
772 struct sk_buff *skb = rbd->skb; in i596_rx()
775 DEB(DEB_RXADDR,print_eth(rbd->v_data, "received")); in i596_rx()
794 rbd->skb = newskb; in i596_rx()
795 rbd->v_data = newskb->data; in i596_rx()
796 rbd->b_data = WSWAPchar(virt_to_bus(newskb->data)); in i596_rx()
798 cache_clear(virt_to_phys(newskb->data), PKT_BUF_SZ); in i596_rx()
806 dev->stats.rx_dropped++; in i596_rx()
811 skb_put_data(skb, rbd->v_data, in i596_rx()
814 skb->protocol=eth_type_trans(skb,dev); in i596_rx()
815 skb->len = pkt_len; in i596_rx()
817 cache_clear(virt_to_phys(rbd->skb->data), in i596_rx()
821 dev->stats.rx_packets++; in i596_rx()
822 dev->stats.rx_bytes+=pkt_len; in i596_rx()
827 dev->name, rfd->stat)); in i596_rx()
828 dev->stats.rx_errors++; in i596_rx()
829 if ((rfd->stat) & 0x0001) in i596_rx()
830 dev->stats.collisions++; in i596_rx()
831 if ((rfd->stat) & 0x0080) in i596_rx()
832 dev->stats.rx_length_errors++; in i596_rx()
833 if ((rfd->stat) & 0x0100) in i596_rx()
834 dev->stats.rx_over_errors++; in i596_rx()
835 if ((rfd->stat) & 0x0200) in i596_rx()
836 dev->stats.rx_fifo_errors++; in i596_rx()
837 if ((rfd->stat) & 0x0400) in i596_rx()
838 dev->stats.rx_frame_errors++; in i596_rx()
839 if ((rfd->stat) & 0x0800) in i596_rx()
840 dev->stats.rx_crc_errors++; in i596_rx()
841 if ((rfd->stat) & 0x1000) in i596_rx()
842 dev->stats.rx_length_errors++; in i596_rx()
847 if (rbd != I596_NULL && (rbd->count & 0x4000)) { in i596_rx()
848 rbd->count = 0; in i596_rx()
849 lp->rbd_head = rbd->v_next; in i596_rx()
854 rfd->rbd = I596_NULL; in i596_rx()
855 rfd->stat = 0; in i596_rx()
856 rfd->cmd = CMD_EOL|CMD_FLEX; in i596_rx()
857 rfd->count = 0; in i596_rx()
859 /* Remove end-of-list from old end descriptor */ in i596_rx()
861 rfd->v_prev->cmd = CMD_FLEX; in i596_rx()
865 lp->scb.rfd = rfd->b_next; in i596_rx()
866 lp->rfd_head = rfd->v_next; in i596_rx()
867 rfd = lp->rfd_head; in i596_rx()
880 while (lp->cmd_head != I596_NULL) { in i596_cleanup_cmd()
881 ptr = lp->cmd_head; in i596_cleanup_cmd()
882 lp->cmd_head = ptr->v_next; in i596_cleanup_cmd()
883 lp->cmd_backlog--; in i596_cleanup_cmd()
885 switch ((ptr->command) & 0x7) { in i596_cleanup_cmd()
889 struct sk_buff *skb = tx_cmd->skb; in i596_cleanup_cmd()
893 dev->stats.tx_errors++; in i596_cleanup_cmd()
894 dev->stats.tx_aborted_errors++; in i596_cleanup_cmd()
896 ptr->v_next = ptr->b_next = I596_NULL; in i596_cleanup_cmd()
897 tx_cmd->cmd.command = 0; /* Mark as free */ in i596_cleanup_cmd()
901 ptr->v_next = ptr->b_next = I596_NULL; in i596_cleanup_cmd()
906 lp->scb.cmd = I596_NULL; in i596_cleanup_cmd()
916 spin_lock_irqsave (&lp->lock, flags); in i596_reset()
922 lp->scb.command = CUC_ABORT | RX_ABORT; in i596_reset()
927 spin_unlock_irqrestore (&lp->lock, flags); in i596_reset()
936 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd) in i596_add_cmd() argument
938 struct i596_private *lp = dev->ml_priv; in i596_add_cmd()
939 int ioaddr = dev->base_addr; in i596_add_cmd()
944 cmd->status = 0; in i596_add_cmd()
945 cmd->command |= (CMD_EOL | CMD_INTR); in i596_add_cmd()
946 cmd->v_next = cmd->b_next = I596_NULL; in i596_add_cmd()
948 spin_lock_irqsave (&lp->lock, flags); in i596_add_cmd()
950 if (lp->cmd_head != I596_NULL) { in i596_add_cmd()
951 lp->cmd_tail->v_next = cmd; in i596_add_cmd()
952 lp->cmd_tail->b_next = WSWAPcmd(virt_to_bus(&cmd->status)); in i596_add_cmd()
954 lp->cmd_head = cmd; in i596_add_cmd()
956 lp->scb.cmd = WSWAPcmd(virt_to_bus(&cmd->status)); in i596_add_cmd()
957 lp->scb.command = CUC_START; in i596_add_cmd()
960 lp->cmd_tail = cmd; in i596_add_cmd()
961 lp->cmd_backlog++; in i596_add_cmd()
963 spin_unlock_irqrestore (&lp->lock, flags); in i596_add_cmd()
965 if (lp->cmd_backlog > max_cmd_backlog) { in i596_add_cmd()
966 unsigned long tickssofar = jiffies - lp->last_cmd; in i596_add_cmd()
971 printk(KERN_NOTICE "%s: command unit timed out, status resetting.\n", dev->name); in i596_add_cmd()
981 DEB(DEB_OPEN,printk(KERN_DEBUG "%s: i596_open() irq %d.\n", dev->name, dev->irq)); in i596_open()
983 if (request_irq(dev->irq, i596_interrupt, 0, "i82596", dev)) { in i596_open()
984 printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq); in i596_open()
985 return -EAGAIN; in i596_open()
990 res = -EAGAIN; in i596_open()
1002 res = -EAGAIN; in i596_open()
1016 free_irq(dev->irq, dev); in i596_open()
1023 struct i596_private *lp = dev->ml_priv; in i596_tx_timeout()
1024 int ioaddr = dev->base_addr; in i596_tx_timeout()
1028 dev->name)); in i596_tx_timeout()
1030 dev->stats.tx_errors++; in i596_tx_timeout()
1033 if (lp->last_restart == dev->stats.tx_packets) { in i596_tx_timeout()
1040 lp->scb.command = CUC_START | RX_START; in i596_tx_timeout()
1042 lp->last_restart = dev->stats.tx_packets; in i596_tx_timeout()
1051 struct i596_private *lp = dev->ml_priv; in i596_start_xmit()
1054 short length = skb->len; in i596_start_xmit()
1057 dev->name, skb->len, skb->data)); in i596_start_xmit()
1059 if (skb->len < ETH_ZLEN) { in i596_start_xmit()
1066 tx_cmd = lp->tx_cmds + lp->next_tx_cmd; in i596_start_xmit()
1067 tbd = lp->tbds + lp->next_tx_cmd; in i596_start_xmit()
1069 if (tx_cmd->cmd.command) { in i596_start_xmit()
1071 dev->name); in i596_start_xmit()
1072 dev->stats.tx_dropped++; in i596_start_xmit()
1076 if (++lp->next_tx_cmd == TX_RING_SIZE) in i596_start_xmit()
1077 lp->next_tx_cmd = 0; in i596_start_xmit()
1078 tx_cmd->tbd = WSWAPtbd(virt_to_bus(tbd)); in i596_start_xmit()
1079 tbd->next = I596_NULL; in i596_start_xmit()
1081 tx_cmd->cmd.command = CMD_FLEX | CmdTx; in i596_start_xmit()
1082 tx_cmd->skb = skb; in i596_start_xmit()
1084 tx_cmd->pad = 0; in i596_start_xmit()
1085 tx_cmd->size = 0; in i596_start_xmit()
1086 tbd->pad = 0; in i596_start_xmit()
1087 tbd->size = EOF | length; in i596_start_xmit()
1089 tbd->data = WSWAPchar(virt_to_bus(skb->data)); in i596_start_xmit()
1092 cache_push(virt_to_phys(skb->data), length); in i596_start_xmit()
1094 DEB(DEB_TXADDR,print_eth(skb->data, "tx-queued")); in i596_start_xmit()
1095 i596_add_cmd(dev, &tx_cmd->cmd); in i596_start_xmit()
1097 dev->stats.tx_packets++; in i596_start_xmit()
1098 dev->stats.tx_bytes += length; in i596_start_xmit()
1108 printk(KERN_DEBUG "i596 0x%p, %pM --> %pM %02X%02X, %s\n", in print_eth()
1132 return ERR_PTR(-ENODEV); in i82596_probe()
1137 return ERR_PTR(-ENOMEM); in i82596_probe()
1142 printk(KERN_NOTICE "Ethernet probe disabled - chip not present\n"); in i82596_probe()
1143 err = -ENODEV; in i82596_probe()
1147 dev->base_addr = MVME_I596_BASE; in i82596_probe()
1148 dev->irq = (unsigned) MVME16x_IRQ_I596; in i82596_probe()
1162 dev->base_addr = BVME_I596_BASE; in i82596_probe()
1163 dev->irq = (unsigned) BVME_IRQ_I596; in i82596_probe()
1167 err = -ENODEV; in i82596_probe()
1171 dev->mem_start = (int)__get_free_pages(GFP_ATOMIC, 0); in i82596_probe()
1172 if (!dev->mem_start) { in i82596_probe()
1173 err = -ENOMEM; in i82596_probe()
1177 DEB(DEB_PROBE,printk(KERN_INFO "%s: 82596 at %#3lx,", dev->name, dev->base_addr)); in i82596_probe()
1183 DEB(DEB_PROBE,printk(" IRQ %d.\n", dev->irq)); in i82596_probe()
1187 /* The 82596-specific entries in the device structure. */ in i82596_probe()
1188 dev->netdev_ops = &i596_netdev_ops; in i82596_probe()
1189 dev->watchdog_timeo = TX_TIMEOUT; in i82596_probe()
1191 dev->ml_priv = (void *)(dev->mem_start); in i82596_probe()
1193 lp = dev->ml_priv; in i82596_probe()
1195 "lp->scb at 0x%08lx\n", in i82596_probe()
1196 dev->name, (unsigned long)lp, in i82596_probe()
1197 sizeof(struct i596_private), (unsigned long)&lp->scb)); in i82596_probe()
1201 cache_push(virt_to_phys((void *)(dev->mem_start)), 4096); in i82596_probe()
1202 cache_clear(virt_to_phys((void *)(dev->mem_start)), 4096); in i82596_probe()
1203 kernel_set_cachemode((void *)(dev->mem_start), 4096, IOMAP_NOCACHE_SER); in i82596_probe()
1205 lp->scb.command = 0; in i82596_probe()
1206 lp->scb.cmd = I596_NULL; in i82596_probe()
1207 lp->scb.rfd = I596_NULL; in i82596_probe()
1208 spin_lock_init(&lp->lock); in i82596_probe()
1219 kernel_set_cachemode((void *)(dev->mem_start), 4096, in i82596_probe()
1222 free_page ((u32)(dev->mem_start)); in i82596_probe()
1250 ioaddr = dev->base_addr; in i596_interrupt()
1251 lp = dev->ml_priv; in i596_interrupt()
1253 spin_lock (&lp->lock); in i596_interrupt()
1256 status = lp->scb.status; in i596_interrupt()
1259 dev->name, irq, status)); in i596_interrupt()
1268 DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt completed command.\n", dev->name)); in i596_interrupt()
1270 …DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt command unit inactive %x.\n", dev->name, status… in i596_interrupt()
1272 while ((lp->cmd_head != I596_NULL) && (lp->cmd_head->status & STAT_C)) { in i596_interrupt()
1273 ptr = lp->cmd_head; in i596_interrupt()
1275 DEB(DEB_STATUS,printk(KERN_DEBUG "cmd_head->status = %04x, ->command = %04x\n", in i596_interrupt()
1276 lp->cmd_head->status, lp->cmd_head->command)); in i596_interrupt()
1277 lp->cmd_head = ptr->v_next; in i596_interrupt()
1278 lp->cmd_backlog--; in i596_interrupt()
1280 switch ((ptr->command) & 0x7) { in i596_interrupt()
1284 struct sk_buff *skb = tx_cmd->skb; in i596_interrupt()
1286 if ((ptr->status) & STAT_OK) { in i596_interrupt()
1287 DEB(DEB_TXADDR,print_eth(skb->data, "tx-done")); in i596_interrupt()
1289 dev->stats.tx_errors++; in i596_interrupt()
1290 if ((ptr->status) & 0x0020) in i596_interrupt()
1291 dev->stats.collisions++; in i596_interrupt()
1292 if (!((ptr->status) & 0x0040)) in i596_interrupt()
1293 dev->stats.tx_heartbeat_errors++; in i596_interrupt()
1294 if ((ptr->status) & 0x0400) in i596_interrupt()
1295 dev->stats.tx_carrier_errors++; in i596_interrupt()
1296 if ((ptr->status) & 0x0800) in i596_interrupt()
1297 dev->stats.collisions++; in i596_interrupt()
1298 if ((ptr->status) & 0x1000) in i596_interrupt()
1299 dev->stats.tx_aborted_errors++; in i596_interrupt()
1304 tx_cmd->cmd.command = 0; /* Mark free */ in i596_interrupt()
1309 unsigned short status = ((struct tdr_cmd *)ptr)->status; in i596_interrupt()
1312 DEB(DEB_TDR,printk(KERN_INFO "%s: link ok.\n", dev->name)); in i596_interrupt()
1315 printk(KERN_ERR "%s: Transceiver problem.\n", dev->name); in i596_interrupt()
1317 printk(KERN_ERR "%s: Termination problem.\n", dev->name); in i596_interrupt()
1319 printk(KERN_ERR "%s: Short circuit.\n", dev->name); in i596_interrupt()
1321 DEB(DEB_TDR,printk(KERN_INFO "%s: Time %d.\n", dev->name, status & 0x07ff)); in i596_interrupt()
1328 ptr->command = 0; in i596_interrupt()
1331 ptr->v_next = ptr->b_next = I596_NULL; in i596_interrupt()
1332 lp->last_cmd = jiffies; in i596_interrupt()
1335 ptr = lp->cmd_head; in i596_interrupt()
1336 while ((ptr != I596_NULL) && (ptr != lp->cmd_tail)) { in i596_interrupt()
1337 ptr->command &= 0x1fff; in i596_interrupt()
1338 ptr = ptr->v_next; in i596_interrupt()
1341 if ((lp->cmd_head != I596_NULL)) in i596_interrupt()
1343 lp->scb.cmd = WSWAPcmd(virt_to_bus(&lp->cmd_head->status)); in i596_interrupt()
1347 DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt received a frame.\n", dev->name)); in i596_interrupt()
1349 /* Only RX_START if stopped - RGH 07-07-96 */ in i596_interrupt()
1352 …DEB(DEB_ERRORS,printk(KERN_ERR "%s: i596 interrupt receive unit inactive, status 0x%x\n", dev->nam… in i596_interrupt()
1354 dev->stats.rx_errors++; in i596_interrupt()
1355 dev->stats.rx_fifo_errors++; in i596_interrupt()
1361 lp->scb.command = ack_cmd; in i596_interrupt()
1382 DEB(DEB_INTS,printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name)); in i596_interrupt()
1384 spin_unlock (&lp->lock); in i596_interrupt()
1390 struct i596_private *lp = dev->ml_priv; in i596_close()
1396 dev->name, lp->scb.status)); in i596_close()
1398 spin_lock_irqsave(&lp->lock, flags); in i596_close()
1401 lp->scb.command = CUC_ABORT | RX_ABORT; in i596_close()
1406 spin_unlock_irqrestore(&lp->lock, flags); in i596_close()
1431 free_irq(dev->irq, dev); in i596_close()
1443 struct i596_private *lp = dev->ml_priv; in set_multicast_list()
1447 dev->name, netdev_mc_count(dev), in set_multicast_list()
1448 dev->flags & IFF_PROMISC ? "ON" : "OFF", in set_multicast_list()
1449 dev->flags & IFF_ALLMULTI ? "ON" : "OFF")); in set_multicast_list()
1451 if (wait_cfg(dev, &lp->cf_cmd.cmd, 1000, "config change request timed out")) in set_multicast_list()
1454 if ((dev->flags & IFF_PROMISC) && !(lp->cf_cmd.i596_config[8] & 0x01)) { in set_multicast_list()
1455 lp->cf_cmd.i596_config[8] |= 0x01; in set_multicast_list()
1458 if (!(dev->flags & IFF_PROMISC) && (lp->cf_cmd.i596_config[8] & 0x01)) { in set_multicast_list()
1459 lp->cf_cmd.i596_config[8] &= ~0x01; in set_multicast_list()
1462 if ((dev->flags & IFF_ALLMULTI) && (lp->cf_cmd.i596_config[11] & 0x20)) { in set_multicast_list()
1463 lp->cf_cmd.i596_config[11] &= ~0x20; in set_multicast_list()
1466 if (!(dev->flags & IFF_ALLMULTI) && !(lp->cf_cmd.i596_config[11] & 0x20)) { in set_multicast_list()
1467 lp->cf_cmd.i596_config[11] |= 0x20; in set_multicast_list()
1471 lp->cf_cmd.cmd.command = CmdConfigure; in set_multicast_list()
1472 i596_add_cmd(dev, &lp->cf_cmd.cmd); in set_multicast_list()
1480 dev->name, cnt); in set_multicast_list()
1486 struct mc_cmd *cmd; in set_multicast_list() local
1488 if (wait_cfg(dev, &lp->mc_cmd.cmd, 1000, "multicast list change request timed out")) in set_multicast_list()
1490 cmd = &lp->mc_cmd; in set_multicast_list()
1491 cmd->cmd.command = CmdMulticastList; in set_multicast_list()
1492 cmd->mc_cnt = cnt * ETH_ALEN; in set_multicast_list()
1493 cp = cmd->mc_addrs; in set_multicast_list()
1495 if (!cnt--) in set_multicast_list()
1497 memcpy(cp, ha->addr, ETH_ALEN); in set_multicast_list()
1500 dev->name, cp)); in set_multicast_list()
1503 i596_add_cmd(dev, &cmd->cmd); in set_multicast_list()
1509 static int debug = -1;
1530 kernel_set_cachemode((void *)(dev_82596->mem_start), 4096, in i82596_cleanup()
1533 free_page ((u32)(dev_82596->mem_start)); in i82596_cleanup()