Lines Matching +full:sb +full:- +full:tsi

1 // SPDX-License-Identifier: GPL-2.0-only
8 * It was taken from the frle-0.22 device driver.
11 * frle-0.22 device driver.
26 * 1 - Per card interrupt spinlock (to protect structures and such)
27 * 2 - Per SCQ scq spinlock
28 * 3 - Per card resource spinlock (to access registers, etc.)
42 #include <linux/dma-mapping.h>
105 #define CMD_BUSY(card) (readl((card)->membase + STAT) & NS_STAT_CMDBZ)
109 #define PTR_DIFF(a, b) ((u32)((unsigned long)(a) - (unsigned long)(b)))
112 #define ATM_SKB(s) (&(s)->atm)
116 (scq->dma + ((unsigned long)(p) - (unsigned long)(scq)->org))
143 static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb);
182 static int index = -1; in nicstar_init_one()
190 cards[index--] = NULL; /* don't increment index */ in nicstar_init_one()
196 return -ENODEV; in nicstar_init_one()
206 struct sk_buff *sb; in nicstar_remove_one() local
208 i = card->index; in nicstar_remove_one()
213 if (card->atmdev->phy && card->atmdev->phy->stop) in nicstar_remove_one()
214 card->atmdev->phy->stop(card->atmdev); in nicstar_remove_one()
217 writel(0x00000000, card->membase + CFG); in nicstar_remove_one()
219 /* De-register device */ in nicstar_remove_one()
220 atm_dev_deregister(card->atmdev); in nicstar_remove_one()
227 PRINTK("nicstar%d: freeing %d huge buffers.\n", i, card->hbpool.count); in nicstar_remove_one()
228 while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL) { in nicstar_remove_one()
235 card->iovpool.count); in nicstar_remove_one()
236 while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL) { in nicstar_remove_one()
241 while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL) in nicstar_remove_one()
243 while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL) in nicstar_remove_one()
244 dev_kfree_skb_any(sb); in nicstar_remove_one()
245 free_scq(card, card->scq0, NULL); in nicstar_remove_one()
247 if (card->scd2vc[j] != NULL) in nicstar_remove_one()
248 free_scq(card, card->scd2vc[j]->scq, card->scd2vc[j]->tx_vcc); in nicstar_remove_one()
250 idr_destroy(&card->idr); in nicstar_remove_one()
251 dma_free_coherent(&card->pcidev->dev, NS_RSQSIZE + NS_RSQ_ALIGNMENT, in nicstar_remove_one()
252 card->rsq.org, card->rsq.dma); in nicstar_remove_one()
253 dma_free_coherent(&card->pcidev->dev, NS_TSQSIZE + NS_TSQ_ALIGNMENT, in nicstar_remove_one()
254 card->tsq.org, card->tsq.dma); in nicstar_remove_one()
255 free_irq(card->pcidev->irq, card); in nicstar_remove_one()
256 iounmap(card->membase); in nicstar_remove_one()
317 spin_lock_irqsave(&card->res_lock, flags); in ns_read_sram()
319 writel(sram_address, card->membase + CMD); in ns_read_sram()
321 data = readl(card->membase + DR0); in ns_read_sram()
322 spin_unlock_irqrestore(&card->res_lock, flags); in ns_read_sram()
331 count--; /* count range now is 0..3 instead of 1..4 */ in ns_write_sram()
334 spin_lock_irqsave(&card->res_lock, flags); in ns_write_sram()
337 writel(*(value++), card->membase + i); in ns_write_sram()
339 so card->membase + DR0 == card->membase */ in ns_write_sram()
343 writel(sram_address, card->membase + CMD); in ns_write_sram()
344 spin_unlock_irqrestore(&card->res_lock, flags); in ns_write_sram()
367 if (dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32)) != 0) { in ns_init_card()
385 spin_lock_init(&card->int_lock); in ns_init_card()
386 spin_lock_init(&card->res_lock); in ns_init_card()
390 card->index = i; in ns_init_card()
391 card->atmdev = NULL; in ns_init_card()
392 card->pcidev = pcidev; in ns_init_card()
394 card->membase = ioremap(membase, NS_IOREMAP_SIZE); in ns_init_card()
395 if (!card->membase) { in ns_init_card()
401 PRINTK("nicstar%d: membase at 0x%p.\n", i, card->membase); in ns_init_card()
432 data = readl(card->membase + STAT); in ns_init_card()
434 writel(NS_STAT_TMROF, card->membase + STAT); in ns_init_card()
437 writel(NS_CFG_SWRST, card->membase + CFG); in ns_init_card()
439 writel(0x00000000, card->membase + CFG); in ns_init_card()
442 writel(0x00000008, card->membase + GP); in ns_init_card()
444 writel(0x00000001, card->membase + GP); in ns_init_card()
447 writel(NS_CMD_WRITE_UTILITY | 0x00000100, card->membase + CMD); /* Sync UTOPIA with SAR clock */ in ns_init_card()
452 writel(NS_CMD_READ_UTILITY | 0x00000200, card->membase + CMD); in ns_init_card()
454 data = readl(card->membase + DR0); in ns_init_card()
458 card->max_pcr = ATM_25_PCR; in ns_init_card()
460 writel(0x00000008, card->membase + DR0); in ns_init_card()
461 writel(NS_CMD_WRITE_UTILITY | 0x00000200, card->membase + CMD); in ns_init_card()
463 writel(NS_STAT_SFBQF, card->membase + STAT); in ns_init_card()
466 writel(0x00000022, card->membase + DR0); in ns_init_card()
467 writel(NS_CMD_WRITE_UTILITY | 0x00000202, card->membase + CMD); in ns_init_card()
473 card->max_pcr = ATM_OC3_PCR; in ns_init_card()
476 writel(0x00000002, card->membase + DR0); in ns_init_card()
477 writel(NS_CMD_WRITE_UTILITY | 0x00000205, card->membase + CMD); in ns_init_card()
486 writel(0x00000000, card->membase + GP); in ns_init_card()
495 card->sram_size = 128; in ns_init_card()
497 card->sram_size = 32; in ns_init_card()
498 PRINTK("nicstar%d: %dK x 32bit SRAM size.\n", i, card->sram_size); in ns_init_card()
500 card->rct_size = NS_MAX_RCTSIZE; in ns_init_card()
503 if (card->sram_size == 128) in ns_init_card()
508 if (card->sram_size == 32) { in ns_init_card()
512 card->rct_size = 4096; in ns_init_card()
518 card->vpibits = NS_VPIBITS; in ns_init_card()
519 if (card->rct_size == 4096) in ns_init_card()
520 card->vcibits = 12 - NS_VPIBITS; in ns_init_card()
521 else /* card->rct_size == 16384 */ in ns_init_card()
522 card->vcibits = 14 - NS_VPIBITS; in ns_init_card()
526 nicstar_init_eprom(card->membase); in ns_init_card()
529 writel(0x00000000, card->membase + VPM); in ns_init_card()
531 card->intcnt = 0; in ns_init_card()
533 (pcidev->irq, &ns_irq_handler, IRQF_SHARED, "nicstar", card) != 0) { in ns_init_card()
534 pr_err("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq); in ns_init_card()
541 card->tsq.org = dma_alloc_coherent(&card->pcidev->dev, in ns_init_card()
543 &card->tsq.dma, GFP_KERNEL); in ns_init_card()
544 if (card->tsq.org == NULL) { in ns_init_card()
550 card->tsq.base = PTR_ALIGN(card->tsq.org, NS_TSQ_ALIGNMENT); in ns_init_card()
551 card->tsq.next = card->tsq.base; in ns_init_card()
552 card->tsq.last = card->tsq.base + (NS_TSQ_NUM_ENTRIES - 1); in ns_init_card()
554 ns_tsi_init(card->tsq.base + j); in ns_init_card()
555 writel(0x00000000, card->membase + TSQH); in ns_init_card()
556 writel(ALIGN(card->tsq.dma, NS_TSQ_ALIGNMENT), card->membase + TSQB); in ns_init_card()
557 PRINTK("nicstar%d: TSQ base at 0x%p.\n", i, card->tsq.base); in ns_init_card()
560 card->rsq.org = dma_alloc_coherent(&card->pcidev->dev, in ns_init_card()
562 &card->rsq.dma, GFP_KERNEL); in ns_init_card()
563 if (card->rsq.org == NULL) { in ns_init_card()
569 card->rsq.base = PTR_ALIGN(card->rsq.org, NS_RSQ_ALIGNMENT); in ns_init_card()
570 card->rsq.next = card->rsq.base; in ns_init_card()
571 card->rsq.last = card->rsq.base + (NS_RSQ_NUM_ENTRIES - 1); in ns_init_card()
573 ns_rsqe_init(card->rsq.base + j); in ns_init_card()
574 writel(0x00000000, card->membase + RSQH); in ns_init_card()
575 writel(ALIGN(card->rsq.dma, NS_RSQ_ALIGNMENT), card->membase + RSQB); in ns_init_card()
576 PRINTK("nicstar%d: RSQ base at 0x%p.\n", i, card->rsq.base); in ns_init_card()
579 card->scq1 = NULL; in ns_init_card()
580 card->scq2 = NULL; in ns_init_card()
581 card->scq0 = get_scq(card, VBR_SCQSIZE, NS_VRSCD0); in ns_init_card()
582 if (card->scq0 == NULL) { in ns_init_card()
588 u32d[0] = scq_virt_to_bus(card->scq0, card->scq0->base); in ns_init_card()
595 card->scq0->scd = NS_VRSCD0; in ns_init_card()
596 PRINTK("nicstar%d: VBR-SCQ0 base at 0x%p.\n", i, card->scq0->base); in ns_init_card()
599 card->tst_addr = NS_TST0; in ns_init_card()
600 card->tst_free_entries = NS_TST_NUM_ENTRIES; in ns_init_card()
611 card->tste2vc[j] = NULL; in ns_init_card()
612 writel(NS_TST0 << 2, card->membase + TSTB); in ns_init_card()
623 for (j = 0; j < card->rct_size; j++) in ns_init_card()
626 memset(card->vcmap, 0, sizeof(card->vcmap)); in ns_init_card()
629 card->scd2vc[j] = NULL; in ns_init_card()
632 card->sbnr.min = MIN_SB; in ns_init_card()
633 card->sbnr.init = NUM_SB; in ns_init_card()
634 card->sbnr.max = MAX_SB; in ns_init_card()
635 card->lbnr.min = MIN_LB; in ns_init_card()
636 card->lbnr.init = NUM_LB; in ns_init_card()
637 card->lbnr.max = MAX_LB; in ns_init_card()
638 card->iovnr.min = MIN_IOVB; in ns_init_card()
639 card->iovnr.init = NUM_IOVB; in ns_init_card()
640 card->iovnr.max = MAX_IOVB; in ns_init_card()
641 card->hbnr.min = MIN_HB; in ns_init_card()
642 card->hbnr.init = NUM_HB; in ns_init_card()
643 card->hbnr.max = MAX_HB; in ns_init_card()
645 card->sm_handle = NULL; in ns_init_card()
646 card->sm_addr = 0x00000000; in ns_init_card()
647 card->lg_handle = NULL; in ns_init_card()
648 card->lg_addr = 0x00000000; in ns_init_card()
650 card->efbie = 1; /* To prevent push_rxbufs from enabling the interrupt */ in ns_init_card()
652 idr_init(&card->idr); in ns_init_card()
654 /* Pre-allocate some huge buffers */ in ns_init_card()
655 skb_queue_head_init(&card->hbpool.queue); in ns_init_card()
656 card->hbpool.count = 0; in ns_init_card()
669 skb_queue_tail(&card->hbpool.queue, hb); in ns_init_card()
670 card->hbpool.count++; in ns_init_card()
674 skb_queue_head_init(&card->lbpool.queue); in ns_init_card()
675 card->lbpool.count = 0; /* Not used */ in ns_init_card()
688 skb_queue_tail(&card->lbpool.queue, lb); in ns_init_card()
693 card->rcbuf = lb; in ns_init_card()
694 card->rawcell = (struct ns_rcqe *) lb->data; in ns_init_card()
695 card->rawch = NS_PRV_DMA(lb); in ns_init_card()
700 ns_stat_lfbqc_get(readl(card->membase + STAT))) < card->lbnr.min) { in ns_init_card()
710 skb_queue_head_init(&card->sbpool.queue); in ns_init_card()
711 card->sbpool.count = 0; /* Not used */ in ns_init_card()
713 struct sk_buff *sb; in ns_init_card() local
714 sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); in ns_init_card()
715 if (sb == NULL) { in ns_init_card()
723 NS_PRV_BUFTYPE(sb) = BUF_SM; in ns_init_card()
724 skb_queue_tail(&card->sbpool.queue, sb); in ns_init_card()
725 skb_reserve(sb, NS_AAL0_HEADER); in ns_init_card()
726 push_rxbufs(card, sb); in ns_init_card()
730 ns_stat_sfbqc_get(readl(card->membase + STAT))) < card->sbnr.min) { in ns_init_card()
740 skb_queue_head_init(&card->iovpool.queue); in ns_init_card()
741 card->iovpool.count = 0; in ns_init_card()
754 skb_queue_tail(&card->iovpool.queue, iovb); in ns_init_card()
755 card->iovpool.count++; in ns_init_card()
759 if (card->rct_size == 4096) in ns_init_card()
761 else /* (card->rct_size == 16384) */ in ns_init_card()
764 card->efbie = 1; in ns_init_card()
767 card->atmdev = atm_dev_register("nicstar", &card->pcidev->dev, &atm_ops, in ns_init_card()
768 -1, NULL); in ns_init_card()
769 if (card->atmdev == NULL) { in ns_init_card()
776 if (mac[i] == NULL || !mac_pton(mac[i], card->atmdev->esi)) { in ns_init_card()
777 nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET, in ns_init_card()
778 card->atmdev->esi, 6); in ns_init_card()
779 if (ether_addr_equal(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00")) { in ns_init_card()
780 nicstar_read_eprom(card->membase, in ns_init_card()
782 card->atmdev->esi, 6); in ns_init_card()
786 printk("nicstar%d: MAC address %pM\n", i, card->atmdev->esi); in ns_init_card()
788 card->atmdev->dev_data = card; in ns_init_card()
789 card->atmdev->ci_range.vpi_bits = card->vpibits; in ns_init_card()
790 card->atmdev->ci_range.vci_bits = card->vcibits; in ns_init_card()
791 card->atmdev->link_rate = card->max_pcr; in ns_init_card()
792 card->atmdev->phy = NULL; in ns_init_card()
795 if (card->max_pcr == ATM_OC3_PCR) in ns_init_card()
796 suni_init(card->atmdev); in ns_init_card()
800 if (card->max_pcr == ATM_25_PCR) in ns_init_card()
801 idt77105_init(card->atmdev); in ns_init_card()
804 if (card->atmdev->phy && card->atmdev->phy->start) in ns_init_card()
805 card->atmdev->phy->start(card->atmdev); in ns_init_card()
809 NS_CFG_PHYIE, card->membase + CFG); in ns_init_card()
819 writel(0x00000000, card->membase + CFG); in ns_init_card_error()
823 while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL) in ns_init_card_error()
827 struct sk_buff *sb; in ns_init_card_error() local
828 while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL) in ns_init_card_error()
829 dev_kfree_skb_any(sb); in ns_init_card_error()
830 free_scq(card, card->scq0, NULL); in ns_init_card_error()
834 while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL) in ns_init_card_error()
839 while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL) in ns_init_card_error()
843 dma_free_coherent(&card->pcidev->dev, NS_RSQSIZE + NS_RSQ_ALIGNMENT, in ns_init_card_error()
844 card->rsq.org, card->rsq.dma); in ns_init_card_error()
847 dma_free_coherent(&card->pcidev->dev, NS_TSQSIZE + NS_TSQ_ALIGNMENT, in ns_init_card_error()
848 card->tsq.org, card->tsq.dma); in ns_init_card_error()
851 free_irq(card->pcidev->irq, card); in ns_init_card_error()
854 iounmap(card->membase); in ns_init_card_error()
857 pci_disable_device(card->pcidev); in ns_init_card_error()
872 scq->org = dma_alloc_coherent(&card->pcidev->dev, in get_scq()
873 2 * size, &scq->dma, GFP_KERNEL); in get_scq()
874 if (!scq->org) { in get_scq()
878 scq->skb = kcalloc(size / NS_SCQE_SIZE, sizeof(*scq->skb), in get_scq()
880 if (!scq->skb) { in get_scq()
881 dma_free_coherent(&card->pcidev->dev, in get_scq()
882 2 * size, scq->org, scq->dma); in get_scq()
886 scq->num_entries = size / NS_SCQE_SIZE; in get_scq()
887 scq->base = PTR_ALIGN(scq->org, size); in get_scq()
888 scq->next = scq->base; in get_scq()
889 scq->last = scq->base + (scq->num_entries - 1); in get_scq()
890 scq->tail = scq->last; in get_scq()
891 scq->scd = scd; in get_scq()
892 scq->tbd_count = 0; in get_scq()
893 init_waitqueue_head(&scq->scqfull_waitq); in get_scq()
894 scq->full = 0; in get_scq()
895 spin_lock_init(&scq->lock); in get_scq()
905 if (scq->num_entries == VBR_SCQ_NUM_ENTRIES) in free_scq()
906 for (i = 0; i < scq->num_entries; i++) { in free_scq()
907 if (scq->skb[i] != NULL) { in free_scq()
908 vcc = ATM_SKB(scq->skb[i])->vcc; in free_scq()
909 if (vcc->pop != NULL) in free_scq()
910 vcc->pop(vcc, scq->skb[i]); in free_scq()
912 dev_kfree_skb_any(scq->skb[i]); in free_scq()
919 for (i = 0; i < scq->num_entries; i++) in free_scq()
920 dev_kfree_skb_any(scq->skb[i]); in free_scq()
922 for (i = 0; i < scq->num_entries; i++) { in free_scq()
923 if (scq->skb[i] != NULL) { in free_scq()
924 if (vcc->pop != NULL) in free_scq()
925 vcc->pop(vcc, scq->skb[i]); in free_scq()
927 dev_kfree_skb_any(scq->skb[i]); in free_scq()
931 kfree(scq->skb); in free_scq()
932 dma_free_coherent(&card->pcidev->dev, in free_scq()
933 2 * (scq->num_entries == VBR_SCQ_NUM_ENTRIES ? in free_scq()
935 scq->org, scq->dma); in free_scq()
953 addr1 = dma_map_single(&card->pcidev->dev, in push_rxbufs()
954 skb->data, in push_rxbufs()
963 card->index); in push_rxbufs()
966 stat = readl(card->membase + STAT); in push_rxbufs()
967 card->sbfqc = ns_stat_sfbqc_get(stat); in push_rxbufs()
968 card->lbfqc = ns_stat_lfbqc_get(stat); in push_rxbufs()
971 if (card->sm_addr) { in push_rxbufs()
972 addr2 = card->sm_addr; in push_rxbufs()
973 handle2 = card->sm_handle; in push_rxbufs()
974 card->sm_addr = 0x00000000; in push_rxbufs()
975 card->sm_handle = NULL; in push_rxbufs()
978 card->sm_addr = addr1; in push_rxbufs()
979 card->sm_handle = handle1; in push_rxbufs()
985 if (card->lg_addr) { in push_rxbufs()
986 addr2 = card->lg_addr; in push_rxbufs()
987 handle2 = card->lg_handle; in push_rxbufs()
988 card->lg_addr = 0x00000000; in push_rxbufs()
989 card->lg_handle = NULL; in push_rxbufs()
992 card->lg_addr = addr1; in push_rxbufs()
993 card->lg_handle = handle1; in push_rxbufs()
1000 if (card->sbfqc >= card->sbnr.max) { in push_rxbufs()
1001 skb_unlink(handle1, &card->sbpool.queue); in push_rxbufs()
1003 skb_unlink(handle2, &card->sbpool.queue); in push_rxbufs()
1007 card->sbfqc += 2; in push_rxbufs()
1010 if (card->lbfqc >= card->lbnr.max) { in push_rxbufs()
1011 skb_unlink(handle1, &card->lbpool.queue); in push_rxbufs()
1013 skb_unlink(handle2, &card->lbpool.queue); in push_rxbufs()
1017 card->lbfqc += 2; in push_rxbufs()
1020 id1 = idr_alloc(&card->idr, handle1, 0, 0, GFP_ATOMIC); in push_rxbufs()
1024 id2 = idr_alloc(&card->idr, handle2, 0, 0, GFP_ATOMIC); in push_rxbufs()
1028 spin_lock_irqsave(&card->res_lock, flags); in push_rxbufs()
1030 writel(addr2, card->membase + DR3); in push_rxbufs()
1031 writel(id2, card->membase + DR2); in push_rxbufs()
1032 writel(addr1, card->membase + DR1); in push_rxbufs()
1033 writel(id1, card->membase + DR0); in push_rxbufs()
1035 card->membase + CMD); in push_rxbufs()
1036 spin_unlock_irqrestore(&card->res_lock, flags); in push_rxbufs()
1039 card->index, in push_rxbufs()
1044 if (!card->efbie && card->sbfqc >= card->sbnr.min && in push_rxbufs()
1045 card->lbfqc >= card->lbnr.min) { in push_rxbufs()
1046 card->efbie = 1; in push_rxbufs()
1047 writel((readl(card->membase + CFG) | NS_CFG_EFBIE), in push_rxbufs()
1048 card->membase + CFG); in push_rxbufs()
1063 dev = card->atmdev; in ns_irq_handler()
1064 card->intcnt++; in ns_irq_handler()
1066 PRINTK("nicstar%d: NICStAR generated an interrupt\n", card->index); in ns_irq_handler()
1068 spin_lock_irqsave(&card->int_lock, flags); in ns_irq_handler()
1070 stat_r = readl(card->membase + STAT); in ns_irq_handler()
1074 TXPRINTK("nicstar%d: TSI interrupt\n", card->index); in ns_irq_handler()
1076 writel(NS_STAT_TSIF, card->membase + STAT); in ns_irq_handler()
1079 /* Incomplete CS-PDU has been transmitted */ in ns_irq_handler()
1081 writel(NS_STAT_TXICP, card->membase + STAT); in ns_irq_handler()
1082 TXPRINTK("nicstar%d: Incomplete CS-PDU transmitted.\n", in ns_irq_handler()
1083 card->index); in ns_irq_handler()
1088 writel(NS_STAT_TSQF, card->membase + STAT); in ns_irq_handler()
1089 PRINTK("nicstar%d: TSQ full.\n", card->index); in ns_irq_handler()
1095 writel(NS_STAT_TMROF, card->membase + STAT); in ns_irq_handler()
1096 PRINTK("nicstar%d: Timer overflow.\n", card->index); in ns_irq_handler()
1101 writel(NS_STAT_PHYI, card->membase + STAT); in ns_irq_handler()
1102 PRINTK("nicstar%d: PHY interrupt.\n", card->index); in ns_irq_handler()
1103 if (dev->phy && dev->phy->interrupt) { in ns_irq_handler()
1104 dev->phy->interrupt(dev); in ns_irq_handler()
1110 writel(NS_STAT_SFBQF, card->membase + STAT); in ns_irq_handler()
1112 card->index); in ns_irq_handler()
1117 writel(NS_STAT_LFBQF, card->membase + STAT); in ns_irq_handler()
1119 card->index); in ns_irq_handler()
1124 writel(NS_STAT_RSQF, card->membase + STAT); in ns_irq_handler()
1125 printk("nicstar%d: RSQ full.\n", card->index); in ns_irq_handler()
1129 /* Complete CS-PDU received */ in ns_irq_handler()
1131 RXPRINTK("nicstar%d: End of CS-PDU received.\n", card->index); in ns_irq_handler()
1133 writel(NS_STAT_EOPDU, card->membase + STAT); in ns_irq_handler()
1138 writel(NS_STAT_RAWCF, card->membase + STAT); in ns_irq_handler()
1141 card->index); in ns_irq_handler()
1146 while (readl(card->membase + RAWCT) != card->rawch) { in ns_irq_handler()
1148 if (ns_rcqe_islast(card->rawcell)) { in ns_irq_handler()
1151 oldbuf = card->rcbuf; in ns_irq_handler()
1152 card->rcbuf = idr_find(&card->idr, in ns_irq_handler()
1153 ns_rcqe_nextbufhandle(card->rawcell)); in ns_irq_handler()
1154 card->rawch = NS_PRV_DMA(card->rcbuf); in ns_irq_handler()
1155 card->rawcell = (struct ns_rcqe *) in ns_irq_handler()
1156 card->rcbuf->data; in ns_irq_handler()
1159 card->rawch += NS_RCQE_SIZE; in ns_irq_handler()
1160 card->rawcell++; in ns_irq_handler()
1168 struct sk_buff *sb; in ns_irq_handler() local
1170 writel(NS_STAT_SFBQE, card->membase + STAT); in ns_irq_handler()
1172 card->index); in ns_irq_handler()
1173 for (i = 0; i < card->sbnr.min; i++) { in ns_irq_handler()
1174 sb = dev_alloc_skb(NS_SMSKBSIZE); in ns_irq_handler()
1175 if (sb == NULL) { in ns_irq_handler()
1176 writel(readl(card->membase + CFG) & in ns_irq_handler()
1177 ~NS_CFG_EFBIE, card->membase + CFG); in ns_irq_handler()
1178 card->efbie = 0; in ns_irq_handler()
1181 NS_PRV_BUFTYPE(sb) = BUF_SM; in ns_irq_handler()
1182 skb_queue_tail(&card->sbpool.queue, sb); in ns_irq_handler()
1183 skb_reserve(sb, NS_AAL0_HEADER); in ns_irq_handler()
1184 push_rxbufs(card, sb); in ns_irq_handler()
1186 card->sbfqc = i; in ns_irq_handler()
1195 writel(NS_STAT_LFBQE, card->membase + STAT); in ns_irq_handler()
1197 card->index); in ns_irq_handler()
1198 for (i = 0; i < card->lbnr.min; i++) { in ns_irq_handler()
1201 writel(readl(card->membase + CFG) & in ns_irq_handler()
1202 ~NS_CFG_EFBIE, card->membase + CFG); in ns_irq_handler()
1203 card->efbie = 0; in ns_irq_handler()
1207 skb_queue_tail(&card->lbpool.queue, lb); in ns_irq_handler()
1211 card->lbfqc = i; in ns_irq_handler()
1217 writel(NS_STAT_RSQAF, card->membase + STAT); in ns_irq_handler()
1218 RXPRINTK("nicstar%d: RSQ almost full.\n", card->index); in ns_irq_handler()
1222 spin_unlock_irqrestore(&card->int_lock, flags); in ns_irq_handler()
1223 PRINTK("nicstar%d: end of interrupt service\n", card->index); in ns_irq_handler()
1241 short vpi = vcc->vpi; in ns_open()
1242 int vci = vcc->vci; in ns_open()
1244 card = (ns_dev *) vcc->dev->dev_data; in ns_open()
1245 PRINTK("nicstar%d: opening vpi.vci %d.%d \n", card->index, (int)vpi, in ns_open()
1247 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) { in ns_open()
1248 PRINTK("nicstar%d: unsupported AAL.\n", card->index); in ns_open()
1249 return -EINVAL; in ns_open()
1252 vc = &(card->vcmap[vpi << card->vcibits | vci]); in ns_open()
1253 vcc->dev_data = vc; in ns_open()
1256 if (vcc->qos.txtp.traffic_class != ATM_NONE && vc->tx) in ns_open()
1258 if (vcc->qos.rxtp.traffic_class != ATM_NONE && vc->rx) in ns_open()
1261 printk("nicstar%d: %s vci already in use.\n", card->index, in ns_open()
1263 return -EINVAL; in ns_open()
1266 set_bit(ATM_VF_ADDR, &vcc->flags); in ns_open()
1271 if (!test_bit(ATM_VF_PARTIAL, &vcc->flags)) { in ns_open()
1274 set_bit(ATM_VF_PARTIAL, &vcc->flags); in ns_open()
1275 if (vcc->qos.txtp.traffic_class == ATM_CBR) { in ns_open()
1277 if (vcc->qos.txtp.max_pcr == 0 && vcc->qos.txtp.pcr == 0 in ns_open()
1278 && vcc->qos.txtp.min_pcr == 0) { in ns_open()
1281 card->index); in ns_open()
1282 clear_bit(ATM_VF_PARTIAL, &vcc->flags); in ns_open()
1283 clear_bit(ATM_VF_ADDR, &vcc->flags); in ns_open()
1284 return -EINVAL; in ns_open()
1287 tcr = atm_pcr_goal(&(vcc->qos.txtp)); in ns_open()
1288 tcra = tcr >= 0 ? tcr : -tcr; in ns_open()
1291 card->index, vcc->qos.txtp.max_pcr); in ns_open()
1296 modl = tmpl % card->max_pcr; in ns_open()
1298 n = (int)(tmpl / card->max_pcr); in ns_open()
1304 (card->tst_free_entries - in ns_open()
1308 card->index); in ns_open()
1309 clear_bit(ATM_VF_PARTIAL, &vcc->flags); in ns_open()
1310 clear_bit(ATM_VF_ADDR, &vcc->flags); in ns_open()
1311 return -EINVAL; in ns_open()
1318 card->index); in ns_open()
1319 clear_bit(ATM_VF_PARTIAL, &vcc->flags); in ns_open()
1320 clear_bit(ATM_VF_ADDR, &vcc->flags); in ns_open()
1321 return -EINVAL; in ns_open()
1324 if (n > (card->tst_free_entries - NS_TST_RESERVED)) { in ns_open()
1327 card->index); in ns_open()
1328 clear_bit(ATM_VF_PARTIAL, &vcc->flags); in ns_open()
1329 clear_bit(ATM_VF_ADDR, &vcc->flags); in ns_open()
1330 return -EINVAL; in ns_open()
1332 card->tst_free_entries -= n; in ns_open()
1335 card->index, n); in ns_open()
1337 if (card->scd2vc[frscdi] == NULL) { in ns_open()
1338 card->scd2vc[frscdi] = vc; in ns_open()
1345 card->index); in ns_open()
1346 card->tst_free_entries += n; in ns_open()
1347 clear_bit(ATM_VF_PARTIAL, &vcc->flags); in ns_open()
1348 clear_bit(ATM_VF_ADDR, &vcc->flags); in ns_open()
1349 return -EBUSY; in ns_open()
1352 vc->cbr_scd = NS_FRSCD + frscdi * NS_FRSCD_SIZE; in ns_open()
1354 scq = get_scq(card, CBR_SCQSIZE, vc->cbr_scd); in ns_open()
1357 card->index); in ns_open()
1358 card->scd2vc[frscdi] = NULL; in ns_open()
1359 card->tst_free_entries += n; in ns_open()
1360 clear_bit(ATM_VF_PARTIAL, &vcc->flags); in ns_open()
1361 clear_bit(ATM_VF_ADDR, &vcc->flags); in ns_open()
1362 return -ENOMEM; in ns_open()
1364 vc->scq = scq; in ns_open()
1365 u32d[0] = scq_virt_to_bus(scq, scq->base); in ns_open()
1369 ns_write_sram(card, vc->cbr_scd, u32d, 4); in ns_open()
1372 } else if (vcc->qos.txtp.traffic_class == ATM_UBR) { in ns_open()
1373 vc->cbr_scd = 0x00000000; in ns_open()
1374 vc->scq = card->scq0; in ns_open()
1377 if (vcc->qos.txtp.traffic_class != ATM_NONE) { in ns_open()
1378 vc->tx = 1; in ns_open()
1379 vc->tx_vcc = vcc; in ns_open()
1380 vc->tbd_count = 0; in ns_open()
1382 if (vcc->qos.rxtp.traffic_class != ATM_NONE) { in ns_open()
1385 vc->rx = 1; in ns_open()
1386 vc->rx_vcc = vcc; in ns_open()
1387 vc->rx_iov = NULL; in ns_open()
1390 if (vcc->qos.aal == ATM_AAL5) in ns_open()
1392 else /* vcc->qos.aal == ATM_AAL0 */ in ns_open()
1399 (vpi << card->vcibits | vci) * in ns_open()
1405 set_bit(ATM_VF_READY, &vcc->flags); in ns_open()
1416 vc = vcc->dev_data; in ns_close()
1417 card = vcc->dev->dev_data; in ns_close()
1418 PRINTK("nicstar%d: closing vpi.vci %d.%d \n", card->index, in ns_close()
1419 (int)vcc->vpi, vcc->vci); in ns_close()
1421 clear_bit(ATM_VF_READY, &vcc->flags); in ns_close()
1423 if (vcc->qos.rxtp.traffic_class != ATM_NONE) { in ns_close()
1429 (vcc->vpi << card->vcibits | vcc->vci) * NS_RCT_ENTRY_SIZE; in ns_close()
1430 spin_lock_irqsave(&card->res_lock, flags); in ns_close()
1433 card->membase + CMD); in ns_close()
1434 spin_unlock_irqrestore(&card->res_lock, flags); in ns_close()
1436 vc->rx = 0; in ns_close()
1437 if (vc->rx_iov != NULL) { in ns_close()
1441 stat = readl(card->membase + STAT); in ns_close()
1442 card->sbfqc = ns_stat_sfbqc_get(stat); in ns_close()
1443 card->lbfqc = ns_stat_lfbqc_get(stat); in ns_close()
1447 card->index); in ns_close()
1448 iovb = vc->rx_iov; in ns_close()
1449 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, in ns_close()
1452 spin_lock_irqsave(&card->int_lock, flags); in ns_close()
1454 spin_unlock_irqrestore(&card->int_lock, flags); in ns_close()
1455 vc->rx_iov = NULL; in ns_close()
1459 if (vcc->qos.txtp.traffic_class != ATM_NONE) { in ns_close()
1460 vc->tx = 0; in ns_close()
1463 if (vcc->qos.txtp.traffic_class == ATM_CBR) { in ns_close()
1468 scq = vc->scq; in ns_close()
1471 spin_lock_irqsave(&scq->lock, flags); in ns_close()
1472 scqep = scq->next; in ns_close()
1473 if (scqep == scq->base) in ns_close()
1474 scqep = scq->last; in ns_close()
1476 scqep--; in ns_close()
1477 if (scqep == scq->tail) { in ns_close()
1478 spin_unlock_irqrestore(&scq->lock, flags); in ns_close()
1483 if (!ns_scqe_is_tsr(scqep) && scq->tail != scq->next) { in ns_close()
1490 scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE; in ns_close()
1491 scqi = scq->next - scq->base; in ns_close()
1495 *scq->next = tsr; in ns_close()
1497 scq->skb[index] = NULL; in ns_close()
1498 if (scq->next == scq->last) in ns_close()
1499 scq->next = scq->base; in ns_close()
1501 scq->next++; in ns_close()
1502 data = scq_virt_to_bus(scq, scq->next); in ns_close()
1503 ns_write_sram(card, scq->scd, &data, 1); in ns_close()
1505 spin_unlock_irqrestore(&scq->lock, flags); in ns_close()
1512 if (card->tste2vc[i] == vc) { in ns_close()
1513 ns_write_sram(card, card->tst_addr + i, &data, in ns_close()
1515 card->tste2vc[i] = NULL; in ns_close()
1516 card->tst_free_entries++; in ns_close()
1520 card->scd2vc[(vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE] = NULL; in ns_close()
1521 free_scq(card, vc->scq, vcc); in ns_close()
1525 if (vcc->qos.txtp.traffic_class != ATM_NONE) { in ns_close()
1527 scq_info *scq = card->scq0; in ns_close()
1529 spin_lock_irqsave(&scq->lock, flags); in ns_close()
1531 for (i = 0; i < scq->num_entries; i++) { in ns_close()
1532 if (scq->skb[i] && ATM_SKB(scq->skb[i])->vcc == vcc) { in ns_close()
1533 ATM_SKB(scq->skb[i])->vcc = NULL; in ns_close()
1534 atm_return(vcc, scq->skb[i]->truesize); in ns_close()
1540 spin_unlock_irqrestore(&scq->lock, flags); in ns_close()
1543 vcc->dev_data = NULL; in ns_close()
1544 clear_bit(ATM_VF_PARTIAL, &vcc->flags); in ns_close()
1545 clear_bit(ATM_VF_ADDR, &vcc->flags); in ns_close()
1550 stat = readl(card->membase + STAT); in ns_close()
1551 cfg = readl(card->membase + CFG); in ns_close()
1555 card->tsq.base, card->tsq.next, in ns_close()
1556 card->tsq.last, readl(card->membase + TSQT)); in ns_close()
1559 card->rsq.base, card->rsq.next, in ns_close()
1560 card->rsq.last, readl(card->membase + RSQT)); in ns_close()
1562 card->efbie ? "enabled" : "disabled"); in ns_close()
1564 ns_stat_sfbqc_get(stat), card->sbpool.count, in ns_close()
1565 ns_stat_lfbqc_get(stat), card->lbpool.count); in ns_close()
1567 card->hbpool.count, card->iovpool.count); in ns_close()
1583 new_tst = card->tst_addr; in fill_tst()
1588 if (card->tste2vc[e] == NULL) in fill_tst()
1592 printk("nicstar%d: No free TST entries found. \n", card->index); in fill_tst()
1598 data = ns_tste_make(NS_TST_OPCODE_FIXED, vc->cbr_scd); in fill_tst()
1601 if (cl >= NS_TST_NUM_ENTRIES && card->tste2vc[e] == NULL) { in fill_tst()
1602 card->tste2vc[e] = vc; in fill_tst()
1604 cl -= NS_TST_NUM_ENTRIES; in fill_tst()
1605 r--; in fill_tst()
1618 ns_write_sram(card, card->tst_addr + NS_TST_NUM_ENTRIES, &data, 1); in fill_tst()
1619 card->tst_addr = new_tst; in fill_tst()
1631 card = vcc->dev->dev_data; in _ns_send()
1632 TXPRINTK("nicstar%d: ns_send() called.\n", card->index); in _ns_send()
1633 if ((vc = (vc_map *) vcc->dev_data) == NULL) { in _ns_send()
1634 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", in _ns_send()
1635 card->index); in _ns_send()
1636 atomic_inc(&vcc->stats->tx_err); in _ns_send()
1638 return -EINVAL; in _ns_send()
1641 if (!vc->tx) { in _ns_send()
1642 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", in _ns_send()
1643 card->index); in _ns_send()
1644 atomic_inc(&vcc->stats->tx_err); in _ns_send()
1646 return -EINVAL; in _ns_send()
1649 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) { in _ns_send()
1651 card->index); in _ns_send()
1652 atomic_inc(&vcc->stats->tx_err); in _ns_send()
1654 return -EINVAL; in _ns_send()
1657 if (skb_shinfo(skb)->nr_frags != 0) { in _ns_send()
1658 printk("nicstar%d: No scatter-gather yet.\n", card->index); in _ns_send()
1659 atomic_inc(&vcc->stats->tx_err); in _ns_send()
1661 return -EINVAL; in _ns_send()
1664 ATM_SKB(skb)->vcc = vcc; in _ns_send()
1666 NS_PRV_DMA(skb) = dma_map_single(&card->pcidev->dev, skb->data, in _ns_send()
1667 skb->len, DMA_TO_DEVICE); in _ns_send()
1669 if (vcc->qos.aal == ATM_AAL5) { in _ns_send()
1670 buflen = (skb->len + 47 + 8) / 48 * 48; /* Multiple of 48 */ in _ns_send()
1673 scqe.word_3 = cpu_to_le32(skb->len); in _ns_send()
1675 ns_tbd_mkword_4(0, (u32) vcc->vpi, (u32) vcc->vci, 0, in _ns_send()
1676 ATM_SKB(skb)-> in _ns_send()
1679 } else { /* (vcc->qos.aal == ATM_AAL0) */ in _ns_send()
1685 if (*skb->data & 0x02) /* Payload type 1 - end of pdu */ in _ns_send()
1688 cpu_to_le32(*((u32 *) skb->data) & ~NS_TBD_VC_MASK); in _ns_send()
1691 cpu_to_le32((((u32) vcc-> in _ns_send()
1692 vpi) << NS_TBD_VPI_SHIFT | ((u32) vcc-> in _ns_send()
1697 if (vcc->qos.txtp.traffic_class == ATM_CBR) { in _ns_send()
1699 scq = ((vc_map *) vcc->dev_data)->scq; in _ns_send()
1703 scq = card->scq0; in _ns_send()
1707 atomic_inc(&vcc->stats->tx_err); in _ns_send()
1708 dma_unmap_single(&card->pcidev->dev, NS_PRV_DMA(skb), skb->len, in _ns_send()
1711 return -EIO; in _ns_send()
1713 atomic_inc(&vcc->stats->tx); in _ns_send()
1738 spin_lock_irqsave(&scq->lock, flags); in push_scqe()
1739 while (scq->tail == scq->next) { in push_scqe()
1741 spin_unlock_irqrestore(&scq->lock, flags); in push_scqe()
1742 printk("nicstar%d: Error pushing TBD.\n", card->index); in push_scqe()
1746 scq->full = 1; in push_scqe()
1747 wait_event_interruptible_lock_irq_timeout(scq->scqfull_waitq, in push_scqe()
1748 scq->tail != scq->next, in push_scqe()
1749 scq->lock, in push_scqe()
1752 if (scq->full) { in push_scqe()
1753 spin_unlock_irqrestore(&scq->lock, flags); in push_scqe()
1755 card->index); in push_scqe()
1759 *scq->next = *tbd; in push_scqe()
1760 index = (int)(scq->next - scq->base); in push_scqe()
1761 scq->skb[index] = skb; in push_scqe()
1763 card->index, skb, index); in push_scqe()
1765 card->index, le32_to_cpu(tbd->word_1), le32_to_cpu(tbd->word_2), in push_scqe()
1766 le32_to_cpu(tbd->word_3), le32_to_cpu(tbd->word_4), in push_scqe()
1767 scq->next); in push_scqe()
1768 if (scq->next == scq->last) in push_scqe()
1769 scq->next = scq->base; in push_scqe()
1771 scq->next++; in push_scqe()
1773 vc->tbd_count++; in push_scqe()
1774 if (scq->num_entries == VBR_SCQ_NUM_ENTRIES) { in push_scqe()
1775 scq->tbd_count++; in push_scqe()
1780 if (vc->tbd_count >= MAX_TBD_PER_VC in push_scqe()
1781 || scq->tbd_count >= MAX_TBD_PER_SCQ) { in push_scqe()
1784 while (scq->tail == scq->next) { in push_scqe()
1786 data = scq_virt_to_bus(scq, scq->next); in push_scqe()
1787 ns_write_sram(card, scq->scd, &data, 1); in push_scqe()
1788 spin_unlock_irqrestore(&scq->lock, flags); in push_scqe()
1790 card->index); in push_scqe()
1794 scq->full = 1; in push_scqe()
1797 wait_event_interruptible_lock_irq_timeout(scq->scqfull_waitq, in push_scqe()
1798 scq->tail != scq->next, in push_scqe()
1799 scq->lock, in push_scqe()
1803 if (!scq->full) { in push_scqe()
1808 scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE; in push_scqe()
1809 scqi = scq->next - scq->base; in push_scqe()
1814 *scq->next = tsr; in push_scqe()
1816 scq->skb[index] = NULL; in push_scqe()
1819 card->index, le32_to_cpu(tsr.word_1), in push_scqe()
1821 le32_to_cpu(tsr.word_4), scq->next); in push_scqe()
1822 if (scq->next == scq->last) in push_scqe()
1823 scq->next = scq->base; in push_scqe()
1825 scq->next++; in push_scqe()
1826 vc->tbd_count = 0; in push_scqe()
1827 scq->tbd_count = 0; in push_scqe()
1830 card->index); in push_scqe()
1832 data = scq_virt_to_bus(scq, scq->next); in push_scqe()
1833 ns_write_sram(card, scq->scd, &data, 1); in push_scqe()
1835 spin_unlock_irqrestore(&scq->lock, flags); in push_scqe()
1849 if (card->tsq.next == card->tsq.last) in process_tsq()
1850 one_ahead = card->tsq.base; in process_tsq()
1852 one_ahead = card->tsq.next + 1; in process_tsq()
1854 if (one_ahead == card->tsq.last) in process_tsq()
1855 two_ahead = card->tsq.base; in process_tsq()
1859 while (!ns_tsi_isempty(card->tsq.next) || !ns_tsi_isempty(one_ahead) || in process_tsq()
1866 while (ns_tsi_isempty(card->tsq.next)) { in process_tsq()
1867 if (card->tsq.next == card->tsq.last) in process_tsq()
1868 card->tsq.next = card->tsq.base; in process_tsq()
1870 card->tsq.next++; in process_tsq()
1873 if (!ns_tsi_tmrof(card->tsq.next)) { in process_tsq()
1874 scdi = ns_tsi_getscdindex(card->tsq.next); in process_tsq()
1876 scq = card->scq0; in process_tsq()
1878 if (card->scd2vc[scdi] == NULL) { in process_tsq()
1881 card->index); in process_tsq()
1882 ns_tsi_init(card->tsq.next); in process_tsq()
1885 scq = card->scd2vc[scdi]->scq; in process_tsq()
1887 drain_scq(card, scq, ns_tsi_getscqpos(card->tsq.next)); in process_tsq()
1888 scq->full = 0; in process_tsq()
1889 wake_up_interruptible(&(scq->scqfull_waitq)); in process_tsq()
1892 ns_tsi_init(card->tsq.next); in process_tsq()
1893 previous = card->tsq.next; in process_tsq()
1894 if (card->tsq.next == card->tsq.last) in process_tsq()
1895 card->tsq.next = card->tsq.base; in process_tsq()
1897 card->tsq.next++; in process_tsq()
1899 if (card->tsq.next == card->tsq.last) in process_tsq()
1900 one_ahead = card->tsq.base; in process_tsq()
1902 one_ahead = card->tsq.next + 1; in process_tsq()
1904 if (one_ahead == card->tsq.last) in process_tsq()
1905 two_ahead = card->tsq.base; in process_tsq()
1911 writel(PTR_DIFF(previous, card->tsq.base), in process_tsq()
1912 card->membase + TSQH); in process_tsq()
1923 card->index, scq, pos); in drain_scq()
1924 if (pos >= scq->num_entries) { in drain_scq()
1925 printk("nicstar%d: Bad index on drain_scq().\n", card->index); in drain_scq()
1929 spin_lock_irqsave(&scq->lock, flags); in drain_scq()
1930 i = (int)(scq->tail - scq->base); in drain_scq()
1931 if (++i == scq->num_entries) in drain_scq()
1934 skb = scq->skb[i]; in drain_scq()
1936 card->index, skb, i); in drain_scq()
1938 dma_unmap_single(&card->pcidev->dev, in drain_scq()
1940 skb->len, in drain_scq()
1942 vcc = ATM_SKB(skb)->vcc; in drain_scq()
1943 if (vcc && vcc->pop != NULL) { in drain_scq()
1944 vcc->pop(vcc, skb); in drain_scq()
1948 scq->skb[i] = NULL; in drain_scq()
1950 if (++i == scq->num_entries) in drain_scq()
1953 scq->tail = scq->base + pos; in drain_scq()
1954 spin_unlock_irqrestore(&scq->lock, flags); in drain_scq()
1961 if (!ns_rsqe_valid(card->rsq.next)) in process_rsq()
1964 dequeue_rx(card, card->rsq.next); in process_rsq()
1965 ns_rsqe_init(card->rsq.next); in process_rsq()
1966 previous = card->rsq.next; in process_rsq()
1967 if (card->rsq.next == card->rsq.last) in process_rsq()
1968 card->rsq.next = card->rsq.base; in process_rsq()
1970 card->rsq.next++; in process_rsq()
1971 } while (ns_rsqe_valid(card->rsq.next)); in process_rsq()
1972 writel(PTR_DIFF(previous, card->rsq.base), card->membase + RSQH); in process_rsq()
1988 stat = readl(card->membase + STAT); in dequeue_rx()
1989 card->sbfqc = ns_stat_sfbqc_get(stat); in dequeue_rx()
1990 card->lbfqc = ns_stat_lfbqc_get(stat); in dequeue_rx()
1992 id = le32_to_cpu(rsqe->buffer_handle); in dequeue_rx()
1993 skb = idr_remove(&card->idr, id); in dequeue_rx()
1996 "nicstar%d: skb not found!\n", card->index); in dequeue_rx()
1999 dma_sync_single_for_cpu(&card->pcidev->dev, in dequeue_rx()
2004 dma_unmap_single(&card->pcidev->dev, in dequeue_rx()
2011 if (vpi >= 1UL << card->vpibits || vci >= 1UL << card->vcibits) { in dequeue_rx()
2012 printk("nicstar%d: SDU received for out-of-range vc %d.%d.\n", in dequeue_rx()
2013 card->index, vpi, vci); in dequeue_rx()
2018 vc = &(card->vcmap[vpi << card->vcibits | vci]); in dequeue_rx()
2019 if (!vc->rx) { in dequeue_rx()
2020 RXPRINTK("nicstar%d: SDU received on non-rx vc %d.%d.\n", in dequeue_rx()
2021 card->index, vpi, vci); in dequeue_rx()
2026 vcc = vc->rx_vcc; in dequeue_rx()
2028 if (vcc->qos.aal == ATM_AAL0) { in dequeue_rx()
2029 struct sk_buff *sb; in dequeue_rx() local
2033 cell = skb->data; in dequeue_rx()
2034 for (i = ns_rsqe_cellcount(rsqe); i; i--) { in dequeue_rx()
2035 sb = dev_alloc_skb(NS_SMSKBSIZE); in dequeue_rx()
2036 if (!sb) { in dequeue_rx()
2039 card->index); in dequeue_rx()
2040 atomic_add(i, &vcc->stats->rx_drop); in dequeue_rx()
2043 if (!atm_charge(vcc, sb->truesize)) { in dequeue_rx()
2046 card->index); in dequeue_rx()
2047 atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */ in dequeue_rx()
2048 dev_kfree_skb_any(sb); in dequeue_rx()
2052 *((u32 *) sb->data) = le32_to_cpu(rsqe->word_1) << 4 | in dequeue_rx()
2055 *((u32 *) sb->data) |= 0x00000002; in dequeue_rx()
2056 skb_put(sb, NS_AAL0_HEADER); in dequeue_rx()
2057 memcpy(skb_tail_pointer(sb), cell, ATM_CELL_PAYLOAD); in dequeue_rx()
2058 skb_put(sb, ATM_CELL_PAYLOAD); in dequeue_rx()
2059 ATM_SKB(sb)->vcc = vcc; in dequeue_rx()
2060 __net_timestamp(sb); in dequeue_rx()
2061 vcc->push(vcc, sb); in dequeue_rx()
2062 atomic_inc(&vcc->stats->rx); in dequeue_rx()
2072 if ((iovb = vc->rx_iov) == NULL) { in dequeue_rx()
2073 iovb = skb_dequeue(&(card->iovpool.queue)); in dequeue_rx()
2078 card->index); in dequeue_rx()
2079 atomic_inc(&vcc->stats->rx_drop); in dequeue_rx()
2084 } else if (--card->iovpool.count < card->iovnr.min) { in dequeue_rx()
2089 skb_queue_tail(&card->iovpool.queue, new_iovb); in dequeue_rx()
2090 card->iovpool.count++; in dequeue_rx()
2093 vc->rx_iov = iovb; in dequeue_rx()
2095 iovb->len = 0; in dequeue_rx()
2096 iovb->data = iovb->head; in dequeue_rx()
2102 printk("nicstar%d: received too big AAL5 SDU.\n", card->index); in dequeue_rx()
2103 atomic_inc(&vcc->stats->rx_err); in dequeue_rx()
2104 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, in dequeue_rx()
2107 iovb->len = 0; in dequeue_rx()
2108 iovb->data = iovb->head; in dequeue_rx()
2111 iov = &((struct iovec *)iovb->data)[NS_PRV_IOVCNT(iovb)++]; in dequeue_rx()
2112 iov->iov_base = (void *)skb; in dequeue_rx()
2113 iov->iov_len = ns_rsqe_cellcount(rsqe) * 48; in dequeue_rx()
2114 iovb->len += iov->iov_len; in dequeue_rx()
2121 card->index); in dequeue_rx()
2123 atomic_inc(&vcc->stats->rx_err); in dequeue_rx()
2125 vc->rx_iov = NULL; in dequeue_rx()
2134 card->index); in dequeue_rx()
2136 atomic_inc(&vcc->stats->rx_err); in dequeue_rx()
2137 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, in dequeue_rx()
2139 vc->rx_iov = NULL; in dequeue_rx()
2149 (skb->data + iov->iov_len - 6); in dequeue_rx()
2153 len + 8 > iovb->len || len + (47 + 8) < iovb->len) { in dequeue_rx()
2154 printk("nicstar%d: AAL5 CRC error", card->index); in dequeue_rx()
2155 if (len + 8 > iovb->len || len + (47 + 8) < iovb->len) in dequeue_rx()
2156 printk(" - PDU size mismatch.\n"); in dequeue_rx()
2159 atomic_inc(&vcc->stats->rx_err); in dequeue_rx()
2160 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data, in dequeue_rx()
2162 vc->rx_iov = NULL; in dequeue_rx()
2171 if (!atm_charge(vcc, skb->truesize)) { in dequeue_rx()
2173 atomic_inc(&vcc->stats->rx_drop); in dequeue_rx()
2177 ATM_SKB(skb)->vcc = vcc; in dequeue_rx()
2179 vcc->push(vcc, skb); in dequeue_rx()
2180 atomic_inc(&vcc->stats->rx); in dequeue_rx()
2183 struct sk_buff *sb; in dequeue_rx() local
2185 sb = (struct sk_buff *)(iov - 1)->iov_base; in dequeue_rx()
2189 if (!atm_charge(vcc, sb->truesize)) { in dequeue_rx()
2190 push_rxbufs(card, sb); in dequeue_rx()
2191 atomic_inc(&vcc->stats->rx_drop); in dequeue_rx()
2193 skb_put(sb, len); in dequeue_rx()
2194 dequeue_sm_buf(card, sb); in dequeue_rx()
2195 ATM_SKB(sb)->vcc = vcc; in dequeue_rx()
2196 __net_timestamp(sb); in dequeue_rx()
2197 vcc->push(vcc, sb); in dequeue_rx()
2198 atomic_inc(&vcc->stats->rx); in dequeue_rx()
2205 if (!atm_charge(vcc, skb->truesize)) { in dequeue_rx()
2207 atomic_inc(&vcc->stats->rx_drop); in dequeue_rx()
2211 skb_copy_from_linear_data(sb, skb->data, in dequeue_rx()
2213 skb_put(skb, len - NS_SMBUFSIZE); in dequeue_rx()
2214 ATM_SKB(skb)->vcc = vcc; in dequeue_rx()
2216 vcc->push(vcc, skb); in dequeue_rx()
2217 atomic_inc(&vcc->stats->rx); in dequeue_rx()
2220 push_rxbufs(card, sb); in dequeue_rx()
2226 struct sk_buff *hb, *sb, *lb; in dequeue_rx() local
2230 hb = skb_dequeue(&(card->hbpool.queue)); in dequeue_rx()
2237 card->index); in dequeue_rx()
2238 atomic_inc(&vcc->stats->rx_drop); in dequeue_rx()
2241 iovb->data, in dequeue_rx()
2243 vc->rx_iov = NULL; in dequeue_rx()
2246 } else if (card->hbpool.count < card->hbnr.min) { in dequeue_rx()
2251 skb_queue_tail(&card->hbpool. in dequeue_rx()
2253 card->hbpool.count++; in dequeue_rx()
2257 } else if (--card->hbpool.count < card->hbnr.min) { in dequeue_rx()
2262 skb_queue_tail(&card->hbpool.queue, in dequeue_rx()
2264 card->hbpool.count++; in dequeue_rx()
2266 if (card->hbpool.count < card->hbnr.min) { in dequeue_rx()
2272 skb_queue_tail(&card->hbpool. in dequeue_rx()
2274 card->hbpool.count++; in dequeue_rx()
2279 iov = (struct iovec *)iovb->data; in dequeue_rx()
2281 if (!atm_charge(vcc, hb->truesize)) { in dequeue_rx()
2284 if (card->hbpool.count < card->hbnr.max) { in dequeue_rx()
2285 skb_queue_tail(&card->hbpool.queue, hb); in dequeue_rx()
2286 card->hbpool.count++; in dequeue_rx()
2289 atomic_inc(&vcc->stats->rx_drop); in dequeue_rx()
2292 sb = (struct sk_buff *)iov->iov_base; in dequeue_rx()
2293 skb_copy_from_linear_data(sb, hb->data, in dequeue_rx()
2294 iov->iov_len); in dequeue_rx()
2295 skb_put(hb, iov->iov_len); in dequeue_rx()
2296 remaining = len - iov->iov_len; in dequeue_rx()
2299 push_rxbufs(card, sb); in dequeue_rx()
2303 lb = (struct sk_buff *)iov->iov_base; in dequeue_rx()
2305 min_t(int, remaining, iov->iov_len); in dequeue_rx()
2311 remaining -= tocopy; in dequeue_rx()
2315 if (remaining != 0 || hb->len != len) in dequeue_rx()
2318 card->index); in dequeue_rx()
2320 ATM_SKB(hb)->vcc = vcc; in dequeue_rx()
2322 vcc->push(vcc, hb); in dequeue_rx()
2323 atomic_inc(&vcc->stats->rx); in dequeue_rx()
2327 vc->rx_iov = NULL; in dequeue_rx()
2337 card->index); in recycle_rx_buf()
2345 while (count-- > 0) in recycle_iovec_rx_bufs()
2346 recycle_rx_buf(card, (struct sk_buff *)(iov++)->iov_base); in recycle_iovec_rx_bufs()
2351 if (card->iovpool.count < card->iovnr.max) { in recycle_iov_buf()
2352 skb_queue_tail(&card->iovpool.queue, iovb); in recycle_iov_buf()
2353 card->iovpool.count++; in recycle_iov_buf()
2358 static void dequeue_sm_buf(ns_dev * card, struct sk_buff *sb) in dequeue_sm_buf() argument
2360 skb_unlink(sb, &card->sbpool.queue); in dequeue_sm_buf()
2361 if (card->sbfqc < card->sbnr.init) { in dequeue_sm_buf()
2365 skb_queue_tail(&card->sbpool.queue, new_sb); in dequeue_sm_buf()
2370 if (card->sbfqc < card->sbnr.init) in dequeue_sm_buf()
2375 skb_queue_tail(&card->sbpool.queue, new_sb); in dequeue_sm_buf()
2384 skb_unlink(lb, &card->lbpool.queue); in dequeue_lg_buf()
2385 if (card->lbfqc < card->lbnr.init) { in dequeue_lg_buf()
2389 skb_queue_tail(&card->lbpool.queue, new_lb); in dequeue_lg_buf()
2394 if (card->lbfqc < card->lbnr.init) in dequeue_lg_buf()
2399 skb_queue_tail(&card->lbpool.queue, new_lb); in dequeue_lg_buf()
2413 card = (ns_dev *) dev->dev_data; in ns_proc_read()
2414 stat = readl(card->membase + STAT); in ns_proc_read()
2415 if (!left--) in ns_proc_read()
2417 if (!left--) in ns_proc_read()
2419 ns_stat_sfbqc_get(stat), card->sbnr.min, in ns_proc_read()
2420 card->sbnr.init, card->sbnr.max); in ns_proc_read()
2421 if (!left--) in ns_proc_read()
2423 ns_stat_lfbqc_get(stat), card->lbnr.min, in ns_proc_read()
2424 card->lbnr.init, card->lbnr.max); in ns_proc_read()
2425 if (!left--) in ns_proc_read()
2427 card->hbpool.count, card->hbnr.min, in ns_proc_read()
2428 card->hbnr.init, card->hbnr.max); in ns_proc_read()
2429 if (!left--) in ns_proc_read()
2431 card->iovpool.count, card->iovnr.min, in ns_proc_read()
2432 card->iovnr.init, card->iovnr.max); in ns_proc_read()
2433 if (!left--) { in ns_proc_read()
2436 sprintf(page, "Interrupt counter: %u \n", card->intcnt); in ns_proc_read()
2437 card->intcnt = 0; in ns_proc_read()
2444 if (card->max_pcr == ATM_25_PCR && !left--) { in ns_proc_read()
2451 card->membase + CMD); in ns_proc_read()
2453 phy_regs[i] = readl(card->membase + DR0) & 0x000000FF; in ns_proc_read()
2460 #endif /* 0 - Dump 25.6 Mbps PHY registers */ in ns_proc_read()
2463 if (left-- < NS_TST_NUM_ENTRIES) { in ns_proc_read()
2464 if (card->tste2vc[left + 1] == NULL) in ns_proc_read()
2465 return sprintf(page, "%5d - VBR/UBR \n", left + 1); in ns_proc_read()
2467 return sprintf(page, "%5d - %d %d \n", left + 1, in ns_proc_read()
2468 card->tste2vc[left + 1]->tx_vcc->vpi, in ns_proc_read()
2469 card->tste2vc[left + 1]->tx_vcc->vci); in ns_proc_read()
2482 card = dev->dev_data; in ns_ioctl()
2486 (pl.buftype, &((pool_levels __user *) arg)->buftype)) in ns_ioctl()
2487 return -EFAULT; in ns_ioctl()
2491 ns_stat_sfbqc_get(readl(card->membase + STAT)); in ns_ioctl()
2492 pl.level.min = card->sbnr.min; in ns_ioctl()
2493 pl.level.init = card->sbnr.init; in ns_ioctl()
2494 pl.level.max = card->sbnr.max; in ns_ioctl()
2499 ns_stat_lfbqc_get(readl(card->membase + STAT)); in ns_ioctl()
2500 pl.level.min = card->lbnr.min; in ns_ioctl()
2501 pl.level.init = card->lbnr.init; in ns_ioctl()
2502 pl.level.max = card->lbnr.max; in ns_ioctl()
2506 pl.count = card->hbpool.count; in ns_ioctl()
2507 pl.level.min = card->hbnr.min; in ns_ioctl()
2508 pl.level.init = card->hbnr.init; in ns_ioctl()
2509 pl.level.max = card->hbnr.max; in ns_ioctl()
2513 pl.count = card->iovpool.count; in ns_ioctl()
2514 pl.level.min = card->iovnr.min; in ns_ioctl()
2515 pl.level.init = card->iovnr.init; in ns_ioctl()
2516 pl.level.max = card->iovnr.max; in ns_ioctl()
2520 return -ENOIOCTLCMD; in ns_ioctl()
2526 return -EFAULT; in ns_ioctl()
2530 return -EPERM; in ns_ioctl()
2532 return -EFAULT; in ns_ioctl()
2535 return -EINVAL; in ns_ioctl()
2537 return -EINVAL; in ns_ioctl()
2541 return -EINVAL; in ns_ioctl()
2542 card->sbnr.min = pl.level.min; in ns_ioctl()
2543 card->sbnr.init = pl.level.init; in ns_ioctl()
2544 card->sbnr.max = pl.level.max; in ns_ioctl()
2549 return -EINVAL; in ns_ioctl()
2550 card->lbnr.min = pl.level.min; in ns_ioctl()
2551 card->lbnr.init = pl.level.init; in ns_ioctl()
2552 card->lbnr.max = pl.level.max; in ns_ioctl()
2557 return -EINVAL; in ns_ioctl()
2558 card->hbnr.min = pl.level.min; in ns_ioctl()
2559 card->hbnr.init = pl.level.init; in ns_ioctl()
2560 card->hbnr.max = pl.level.max; in ns_ioctl()
2565 return -EINVAL; in ns_ioctl()
2566 card->iovnr.min = pl.level.min; in ns_ioctl()
2567 card->iovnr.init = pl.level.init; in ns_ioctl()
2568 card->iovnr.max = pl.level.max; in ns_ioctl()
2572 return -EINVAL; in ns_ioctl()
2579 return -EPERM; in ns_ioctl()
2583 while (card->sbfqc < card->sbnr.init) { in ns_ioctl()
2584 struct sk_buff *sb; in ns_ioctl() local
2586 sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); in ns_ioctl()
2587 if (sb == NULL) in ns_ioctl()
2588 return -ENOMEM; in ns_ioctl()
2589 NS_PRV_BUFTYPE(sb) = BUF_SM; in ns_ioctl()
2590 skb_queue_tail(&card->sbpool.queue, sb); in ns_ioctl()
2591 skb_reserve(sb, NS_AAL0_HEADER); in ns_ioctl()
2592 push_rxbufs(card, sb); in ns_ioctl()
2597 while (card->lbfqc < card->lbnr.init) { in ns_ioctl()
2602 return -ENOMEM; in ns_ioctl()
2604 skb_queue_tail(&card->lbpool.queue, lb); in ns_ioctl()
2611 while (card->hbpool.count > card->hbnr.init) { in ns_ioctl()
2614 spin_lock_irqsave(&card->int_lock, flags); in ns_ioctl()
2615 hb = skb_dequeue(&card->hbpool.queue); in ns_ioctl()
2616 card->hbpool.count--; in ns_ioctl()
2617 spin_unlock_irqrestore(&card->int_lock, flags); in ns_ioctl()
2621 card->index); in ns_ioctl()
2626 while (card->hbpool.count < card->hbnr.init) { in ns_ioctl()
2631 return -ENOMEM; in ns_ioctl()
2633 spin_lock_irqsave(&card->int_lock, flags); in ns_ioctl()
2634 skb_queue_tail(&card->hbpool.queue, hb); in ns_ioctl()
2635 card->hbpool.count++; in ns_ioctl()
2636 spin_unlock_irqrestore(&card->int_lock, flags); in ns_ioctl()
2641 while (card->iovpool.count > card->iovnr.init) { in ns_ioctl()
2644 spin_lock_irqsave(&card->int_lock, flags); in ns_ioctl()
2645 iovb = skb_dequeue(&card->iovpool.queue); in ns_ioctl()
2646 card->iovpool.count--; in ns_ioctl()
2647 spin_unlock_irqrestore(&card->int_lock, flags); in ns_ioctl()
2651 card->index); in ns_ioctl()
2656 while (card->iovpool.count < card->iovnr.init) { in ns_ioctl()
2661 return -ENOMEM; in ns_ioctl()
2663 spin_lock_irqsave(&card->int_lock, flags); in ns_ioctl()
2664 skb_queue_tail(&card->iovpool.queue, iovb); in ns_ioctl()
2665 card->iovpool.count++; in ns_ioctl()
2666 spin_unlock_irqrestore(&card->int_lock, flags); in ns_ioctl()
2671 return -EINVAL; in ns_ioctl()
2677 if (dev->phy && dev->phy->ioctl) { in ns_ioctl()
2678 return dev->phy->ioctl(dev, cmd, arg); in ns_ioctl()
2680 printk("nicstar%d: %s == NULL \n", card->index, in ns_ioctl()
2681 dev->phy ? "dev->phy->ioctl" : "dev->phy"); in ns_ioctl()
2682 return -ENOIOCTLCMD; in ns_ioctl()
2704 if (!spin_trylock_irqsave(&card->int_lock, flags)) { in ns_poll()
2710 stat_r = readl(card->membase + STAT); in ns_poll()
2719 writel(stat_w, card->membase + STAT); in ns_poll()
2720 spin_unlock_irqrestore(&card->int_lock, flags); in ns_poll()
2732 card = dev->dev_data; in ns_phy_put()
2733 spin_lock_irqsave(&card->res_lock, flags); in ns_phy_put()
2735 writel((u32) value, card->membase + DR0); in ns_phy_put()
2737 card->membase + CMD); in ns_phy_put()
2738 spin_unlock_irqrestore(&card->res_lock, flags); in ns_phy_put()
2747 card = dev->dev_data; in ns_phy_get()
2748 spin_lock_irqsave(&card->res_lock, flags); in ns_phy_get()
2751 card->membase + CMD); in ns_phy_get()
2753 data = readl(card->membase + DR0) & 0x000000FF; in ns_phy_get()
2754 spin_unlock_irqrestore(&card->res_lock, flags); in ns_phy_get()