Lines Matching +full:rx +full:- +full:watermark
1 // SPDX-License-Identifier: GPL-2.0-only
23 #include <linux/dma-mapping.h>
77 /* When running via timberdale in DMA mode, the RX interrupt should be
79 RX DMA internally.
147 #define KS8842_USE_DMA(adapter) (((adapter)->dma_tx.channel != -1) && \
148 ((adapter)->dma_rx.channel != -1))
168 iowrite32(1, adapter->hw_addr + REQ_TIMB_DMA_RESUME); in ks8842_resume_dma()
173 iowrite16(bank, adapter->hw_addr + REG_SELECT_BANK); in ks8842_select_bank()
180 iowrite8(value, adapter->hw_addr + offset); in ks8842_write8()
187 iowrite16(value, adapter->hw_addr + offset); in ks8842_write16()
195 reg = ioread16(adapter->hw_addr + offset); in ks8842_enable_bits()
197 iowrite16(reg, adapter->hw_addr + offset); in ks8842_enable_bits()
205 reg = ioread16(adapter->hw_addr + offset); in ks8842_clear_bits()
207 iowrite16(reg, adapter->hw_addr + offset); in ks8842_clear_bits()
214 iowrite32(value, adapter->hw_addr + offset); in ks8842_write32()
221 return ioread8(adapter->hw_addr + offset); in ks8842_read8()
228 return ioread16(adapter->hw_addr + offset); in ks8842_read16()
235 return ioread32(adapter->hw_addr + offset); in ks8842_read32()
240 if (adapter->conf_flags & MICREL_KS884X) { in ks8842_reset()
243 iowrite16(0, adapter->hw_addr + REG_GRR); in ks8842_reset()
250 iowrite16(0, adapter->hw_addr + REG_GRR); in ks8842_reset()
252 iowrite32(0x1, adapter->hw_addr + REG_TIMB_RST); in ks8842_reset()
306 /* RX frame pointer autoincrement */ in ks8842_reset_hw()
309 /* RX 2 kb high watermark */ in ks8842_reset_hw()
321 /* restart port auto-negotiation */ in ks8842_reset_hw()
335 /* When running in DMA Mode the RX interrupt is not enabled in in ks8842_reset_hw()
336 timberdale because RX data is received by DMA callbacks in ks8842_reset_hw()
338 to timberdale when there is RX data for its DMA FIFOs */ in ks8842_reset_hw()
339 iowrite16(ENABLED_IRQS_DMA_IP, adapter->hw_addr + REG_TIMB_IER); in ks8842_reset_hw()
342 if (!(adapter->conf_flags & MICREL_KS884X)) in ks8842_reset_hw()
344 adapter->hw_addr + REG_TIMB_IER); in ks8842_reset_hw()
358 addr[ETH_ALEN - i - 1] = ks8842_read8(adapter, 2, REG_MARL + i); in ks8842_init_mac_addr()
359 eth_hw_addr_set(adapter->netdev, addr); in ks8842_init_mac_addr()
361 if (adapter->conf_flags & MICREL_KS884X) { in ks8842_init_mac_addr()
390 spin_lock_irqsave(&adapter->lock, flags); in ks8842_write_mac_addr()
392 ks8842_write8(adapter, 2, mac[ETH_ALEN - i - 1], REG_MARL + i); in ks8842_write_mac_addr()
393 if (!(adapter->conf_flags & MICREL_KS884X)) in ks8842_write_mac_addr()
394 ks8842_write8(adapter, 39, mac[ETH_ALEN - i - 1], in ks8842_write_mac_addr()
398 if (adapter->conf_flags & MICREL_KS884X) { in ks8842_write_mac_addr()
413 spin_unlock_irqrestore(&adapter->lock, flags); in ks8842_write_mac_addr()
424 struct ks8842_tx_dma_ctl *ctl = &adapter->dma_tx; in ks8842_tx_frame_dma()
425 u8 *buf = ctl->buf; in ks8842_tx_frame_dma()
427 if (ctl->adesc) { in ks8842_tx_frame_dma()
433 sg_dma_len(&ctl->sg) = skb->len + sizeof(u32); in ks8842_tx_frame_dma()
439 *buf++ = skb->len & 0xff; in ks8842_tx_frame_dma()
440 *buf++ = (skb->len >> 8) & 0xff; in ks8842_tx_frame_dma()
441 skb_copy_from_linear_data(skb, buf, skb->len); in ks8842_tx_frame_dma()
443 dma_sync_single_range_for_device(adapter->dev, in ks8842_tx_frame_dma()
444 sg_dma_address(&ctl->sg), 0, sg_dma_len(&ctl->sg), in ks8842_tx_frame_dma()
448 if (sg_dma_len(&ctl->sg) % 4) in ks8842_tx_frame_dma()
449 sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4; in ks8842_tx_frame_dma()
451 ctl->adesc = dmaengine_prep_slave_sg(ctl->chan, in ks8842_tx_frame_dma()
452 &ctl->sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT); in ks8842_tx_frame_dma()
453 if (!ctl->adesc) in ks8842_tx_frame_dma()
456 ctl->adesc->callback_param = netdev; in ks8842_tx_frame_dma()
457 ctl->adesc->callback = ks8842_dma_tx_cb; in ks8842_tx_frame_dma()
458 ctl->adesc->tx_submit(ctl->adesc); in ks8842_tx_frame_dma()
460 netdev->stats.tx_bytes += skb->len; in ks8842_tx_frame_dma()
470 int len = skb->len; in ks8842_tx_frame()
473 __func__, skb->len, skb->head, skb->data, in ks8842_tx_frame()
480 if (adapter->conf_flags & KS884X_16BIT) { in ks8842_tx_frame()
481 u16 *ptr16 = (u16 *)skb->data; in ks8842_tx_frame()
484 netdev->stats.tx_bytes += len; in ks8842_tx_frame()
488 iowrite16(*ptr16++, adapter->hw_addr + REG_QMU_DATA_LO); in ks8842_tx_frame()
489 iowrite16(*ptr16++, adapter->hw_addr + REG_QMU_DATA_HI); in ks8842_tx_frame()
490 len -= sizeof(u32); in ks8842_tx_frame()
494 u32 *ptr = (u32 *)skb->data; in ks8842_tx_frame()
500 netdev->stats.tx_bytes += len; in ks8842_tx_frame()
504 iowrite32(*ptr, adapter->hw_addr + REG_QMU_DATA_LO); in ks8842_tx_frame()
505 len -= sizeof(u32); in ks8842_tx_frame()
520 netdev_dbg(netdev, "RX error, status: %x\n", status); in ks8842_update_rx_err_counters()
522 netdev->stats.rx_errors++; in ks8842_update_rx_err_counters()
524 netdev->stats.rx_length_errors++; in ks8842_update_rx_err_counters()
526 netdev->stats.rx_crc_errors++; in ks8842_update_rx_err_counters()
528 netdev->stats.rx_frame_errors++; in ks8842_update_rx_err_counters()
534 netdev_dbg(netdev, "RX packet, len: %d\n", len); in ks8842_update_rx_counters()
536 netdev->stats.rx_packets++; in ks8842_update_rx_counters()
537 netdev->stats.rx_bytes += len; in ks8842_update_rx_counters()
539 netdev->stats.multicast++; in ks8842_update_rx_counters()
545 struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx; in __ks8842_start_new_rx_dma()
546 struct scatterlist *sg = &ctl->sg; in __ks8842_start_new_rx_dma()
549 ctl->skb = netdev_alloc_skb(netdev, DMA_BUFFER_SIZE); in __ks8842_start_new_rx_dma()
550 if (ctl->skb) { in __ks8842_start_new_rx_dma()
552 sg_dma_address(sg) = dma_map_single(adapter->dev, in __ks8842_start_new_rx_dma()
553 ctl->skb->data, DMA_BUFFER_SIZE, DMA_FROM_DEVICE); in __ks8842_start_new_rx_dma()
554 if (dma_mapping_error(adapter->dev, sg_dma_address(sg))) { in __ks8842_start_new_rx_dma()
555 err = -ENOMEM; in __ks8842_start_new_rx_dma()
562 ctl->adesc = dmaengine_prep_slave_sg(ctl->chan, in __ks8842_start_new_rx_dma()
565 if (!ctl->adesc) { in __ks8842_start_new_rx_dma()
566 err = -ENOMEM; in __ks8842_start_new_rx_dma()
570 ctl->adesc->callback_param = netdev; in __ks8842_start_new_rx_dma()
571 ctl->adesc->callback = ks8842_dma_rx_cb; in __ks8842_start_new_rx_dma()
572 ctl->adesc->tx_submit(ctl->adesc); in __ks8842_start_new_rx_dma()
574 err = -ENOMEM; in __ks8842_start_new_rx_dma()
582 dma_unmap_single(adapter->dev, sg_dma_address(sg), in __ks8842_start_new_rx_dma()
585 dev_kfree_skb(ctl->skb); in __ks8842_start_new_rx_dma()
586 ctl->skb = NULL; in __ks8842_start_new_rx_dma()
588 printk(KERN_ERR DRV_NAME": Failed to start RX DMA: %d\n", err); in __ks8842_start_new_rx_dma()
595 struct net_device *netdev = adapter->netdev; in ks8842_rx_frame_dma_tasklet()
596 struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx; in ks8842_rx_frame_dma_tasklet()
597 struct sk_buff *skb = ctl->skb; in ks8842_rx_frame_dma_tasklet()
598 dma_addr_t addr = sg_dma_address(&ctl->sg); in ks8842_rx_frame_dma_tasklet()
601 ctl->adesc = NULL; in ks8842_rx_frame_dma_tasklet()
607 dma_unmap_single(adapter->dev, addr, DMA_BUFFER_SIZE, DMA_FROM_DEVICE); in ks8842_rx_frame_dma_tasklet()
609 status = *((u32 *)skb->data); in ks8842_rx_frame_dma_tasklet()
611 netdev_dbg(netdev, "%s - rx_data: status: %x\n", in ks8842_rx_frame_dma_tasklet()
624 skb->protocol = eth_type_trans(skb, netdev); in ks8842_rx_frame_dma_tasklet()
638 if (adapter->conf_flags & KS884X_16BIT) { in ks8842_rx_frame()
641 netdev_dbg(netdev, "%s - rx_data: status: %x\n", in ks8842_rx_frame()
647 netdev_dbg(netdev, "%s - rx_data: status: %x\n", in ks8842_rx_frame()
659 if (adapter->conf_flags & KS884X_16BIT) { in ks8842_rx_frame()
663 *data16++ = ioread16(adapter->hw_addr + in ks8842_rx_frame()
665 *data16++ = ioread16(adapter->hw_addr + in ks8842_rx_frame()
667 len -= sizeof(u32); in ks8842_rx_frame()
674 *data++ = ioread32(adapter->hw_addr + in ks8842_rx_frame()
676 len -= sizeof(u32); in ks8842_rx_frame()
679 skb->protocol = eth_type_trans(skb, netdev); in ks8842_rx_frame()
682 netdev->stats.rx_dropped++; in ks8842_rx_frame()
686 /* set high watermark to 3K */ in ks8842_rx_frame()
692 /* set high watermark to 2K */ in ks8842_rx_frame()
700 netdev_dbg(netdev, "%s Entry - rx_data: %d\n", __func__, rx_data); in ks8842_handle_rx()
711 netdev_dbg(netdev, "%s - entry, sr: %x\n", __func__, sr); in ks8842_handle_tx()
712 netdev->stats.tx_packets++; in ks8842_handle_tx()
721 netdev->stats.rx_errors++; in ks8842_handle_rx_overrun()
722 netdev->stats.rx_fifo_errors++; in ks8842_handle_rx_overrun()
728 struct net_device *netdev = adapter->netdev; in ks8842_tasklet()
734 spin_lock_irqsave(&adapter->lock, flags); in ks8842_tasklet()
735 entry_bank = ioread16(adapter->hw_addr + REG_SELECT_BANK); in ks8842_tasklet()
736 spin_unlock_irqrestore(&adapter->lock, flags); in ks8842_tasklet()
739 netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr); in ks8842_tasklet()
741 /* when running in DMA mode, do not ack RX interrupts, it is handled in ks8842_tasklet()
750 if (!(adapter->conf_flags & MICREL_KS884X)) in ks8842_tasklet()
752 iowrite32(0x1, adapter->hw_addr + REG_TIMB_IAR); in ks8842_tasklet()
781 /* re-enable interrupts, put back the bank selection register */ in ks8842_tasklet()
782 spin_lock_irqsave(&adapter->lock, flags); in ks8842_tasklet()
787 iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK); in ks8842_tasklet()
794 spin_unlock_irqrestore(&adapter->lock, flags); in ks8842_tasklet()
802 u16 entry_bank = ioread16(adapter->hw_addr + REG_SELECT_BANK); in ks8842_irq()
806 netdev_dbg(netdev, "%s - ISR: 0x%x\n", __func__, isr); in ks8842_irq()
810 /* disable all but RX IRQ, since the FPGA relies on it*/ in ks8842_irq()
817 tasklet_schedule(&adapter->tasklet); in ks8842_irq()
822 iowrite16(entry_bank, adapter->hw_addr + REG_SELECT_BANK); in ks8842_irq()
837 netdev_dbg(netdev, "RX DMA finished\n"); in ks8842_dma_rx_cb()
839 if (adapter->dma_rx.adesc) in ks8842_dma_rx_cb()
840 tasklet_schedule(&adapter->dma_rx.tasklet); in ks8842_dma_rx_cb()
847 struct ks8842_tx_dma_ctl *ctl = &adapter->dma_tx; in ks8842_dma_tx_cb()
851 if (!ctl->adesc) in ks8842_dma_tx_cb()
854 netdev->stats.tx_packets++; in ks8842_dma_tx_cb()
855 ctl->adesc = NULL; in ks8842_dma_tx_cb()
863 struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx; in ks8842_stop_dma()
864 struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx; in ks8842_stop_dma()
866 tx_ctl->adesc = NULL; in ks8842_stop_dma()
867 if (tx_ctl->chan) in ks8842_stop_dma()
868 dmaengine_terminate_all(tx_ctl->chan); in ks8842_stop_dma()
870 rx_ctl->adesc = NULL; in ks8842_stop_dma()
871 if (rx_ctl->chan) in ks8842_stop_dma()
872 dmaengine_terminate_all(rx_ctl->chan); in ks8842_stop_dma()
874 if (sg_dma_address(&rx_ctl->sg)) in ks8842_stop_dma()
875 dma_unmap_single(adapter->dev, sg_dma_address(&rx_ctl->sg), in ks8842_stop_dma()
877 sg_dma_address(&rx_ctl->sg) = 0; in ks8842_stop_dma()
879 dev_kfree_skb(rx_ctl->skb); in ks8842_stop_dma()
880 rx_ctl->skb = NULL; in ks8842_stop_dma()
885 struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx; in ks8842_dealloc_dma_bufs()
886 struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx; in ks8842_dealloc_dma_bufs()
890 if (tx_ctl->chan) in ks8842_dealloc_dma_bufs()
891 dma_release_channel(tx_ctl->chan); in ks8842_dealloc_dma_bufs()
892 tx_ctl->chan = NULL; in ks8842_dealloc_dma_bufs()
894 if (rx_ctl->chan) in ks8842_dealloc_dma_bufs()
895 dma_release_channel(rx_ctl->chan); in ks8842_dealloc_dma_bufs()
896 rx_ctl->chan = NULL; in ks8842_dealloc_dma_bufs()
898 tasklet_kill(&rx_ctl->tasklet); in ks8842_dealloc_dma_bufs()
900 if (sg_dma_address(&tx_ctl->sg)) in ks8842_dealloc_dma_bufs()
901 dma_unmap_single(adapter->dev, sg_dma_address(&tx_ctl->sg), in ks8842_dealloc_dma_bufs()
903 sg_dma_address(&tx_ctl->sg) = 0; in ks8842_dealloc_dma_bufs()
905 kfree(tx_ctl->buf); in ks8842_dealloc_dma_bufs()
906 tx_ctl->buf = NULL; in ks8842_dealloc_dma_bufs()
911 return chan->chan_id == (long)filter_param; in ks8842_dma_filter_fn()
917 struct ks8842_tx_dma_ctl *tx_ctl = &adapter->dma_tx; in ks8842_alloc_dma_bufs()
918 struct ks8842_rx_dma_ctl *rx_ctl = &adapter->dma_rx; in ks8842_alloc_dma_bufs()
927 sg_init_table(&tx_ctl->sg, 1); in ks8842_alloc_dma_bufs()
929 tx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn, in ks8842_alloc_dma_bufs()
930 (void *)(long)tx_ctl->channel); in ks8842_alloc_dma_bufs()
931 if (!tx_ctl->chan) { in ks8842_alloc_dma_bufs()
932 err = -ENODEV; in ks8842_alloc_dma_bufs()
937 tx_ctl->buf = kmalloc(DMA_BUFFER_SIZE, GFP_KERNEL); in ks8842_alloc_dma_bufs()
938 if (!tx_ctl->buf) { in ks8842_alloc_dma_bufs()
939 err = -ENOMEM; in ks8842_alloc_dma_bufs()
943 sg_dma_address(&tx_ctl->sg) = dma_map_single(adapter->dev, in ks8842_alloc_dma_bufs()
944 tx_ctl->buf, DMA_BUFFER_SIZE, DMA_TO_DEVICE); in ks8842_alloc_dma_bufs()
945 if (dma_mapping_error(adapter->dev, sg_dma_address(&tx_ctl->sg))) { in ks8842_alloc_dma_bufs()
946 err = -ENOMEM; in ks8842_alloc_dma_bufs()
947 sg_dma_address(&tx_ctl->sg) = 0; in ks8842_alloc_dma_bufs()
951 rx_ctl->chan = dma_request_channel(mask, ks8842_dma_filter_fn, in ks8842_alloc_dma_bufs()
952 (void *)(long)rx_ctl->channel); in ks8842_alloc_dma_bufs()
953 if (!rx_ctl->chan) { in ks8842_alloc_dma_bufs()
954 err = -ENODEV; in ks8842_alloc_dma_bufs()
958 tasklet_setup(&rx_ctl->tasklet, ks8842_rx_frame_dma_tasklet); in ks8842_alloc_dma_bufs()
973 netdev_dbg(netdev, "%s - entry\n", __func__); in ks8842_open()
979 /* start RX dma */ in ks8842_open()
989 adapter->dma_rx.channel = -1; in ks8842_open()
990 adapter->dma_tx.channel = -1; in ks8842_open()
997 ks8842_write_mac_addr(adapter, netdev->dev_addr); in ks8842_open()
1001 err = request_irq(adapter->irq, ks8842_irq, IRQF_SHARED, DRV_NAME, in ks8842_open()
1004 pr_err("Failed to request IRQ: %d: %d\n", adapter->irq, err); in ks8842_open()
1015 netdev_dbg(netdev, "%s - entry\n", __func__); in ks8842_close()
1017 cancel_work_sync(&adapter->timeout_work); in ks8842_close()
1023 free_irq(adapter->irq, netdev); in ks8842_close()
1043 spin_lock_irqsave(&adapter->lock, flags); in ks8842_xmit_frame()
1044 if (adapter->dma_tx.adesc) in ks8842_xmit_frame()
1046 spin_unlock_irqrestore(&adapter->lock, flags); in ks8842_xmit_frame()
1052 if (ks8842_tx_fifo_space(adapter) < netdev->mtu + 8) in ks8842_xmit_frame()
1062 char *mac = (u8 *)addr->sa_data; in ks8842_set_mac()
1066 if (!is_valid_ether_addr(addr->sa_data)) in ks8842_set_mac()
1067 return -EADDRNOTAVAIL; in ks8842_set_mac()
1079 struct net_device *netdev = adapter->netdev; in ks8842_tx_timeout_work()
1084 spin_lock_irqsave(&adapter->lock, flags); in ks8842_tx_timeout_work()
1095 spin_unlock_irqrestore(&adapter->lock, flags); in ks8842_tx_timeout_work()
1099 ks8842_write_mac_addr(adapter, netdev->dev_addr); in ks8842_tx_timeout_work()
1113 schedule_work(&adapter->timeout_work); in ks8842_tx_timeout()
1131 int err = -ENOMEM; in ks8842_probe()
1135 struct ks8842_platform_data *pdata = dev_get_platdata(&pdev->dev); in ks8842_probe()
1141 dev_err(&pdev->dev, "Invalid resource\n"); in ks8842_probe()
1142 return -EINVAL; in ks8842_probe()
1144 if (!request_mem_region(iomem->start, resource_size(iomem), DRV_NAME)) in ks8842_probe()
1151 SET_NETDEV_DEV(netdev, &pdev->dev); in ks8842_probe()
1154 adapter->netdev = netdev; in ks8842_probe()
1155 INIT_WORK(&adapter->timeout_work, ks8842_tx_timeout_work); in ks8842_probe()
1156 adapter->hw_addr = ioremap(iomem->start, resource_size(iomem)); in ks8842_probe()
1157 adapter->conf_flags = iomem->flags; in ks8842_probe()
1159 if (!adapter->hw_addr) in ks8842_probe()
1162 adapter->irq = platform_get_irq(pdev, 0); in ks8842_probe()
1163 if (adapter->irq < 0) { in ks8842_probe()
1164 err = adapter->irq; in ks8842_probe()
1168 adapter->dev = (pdev->dev.parent) ? pdev->dev.parent : &pdev->dev; in ks8842_probe()
1171 if (!(adapter->conf_flags & MICREL_KS884X) && pdata && in ks8842_probe()
1172 (pdata->tx_dma_channel != -1) && in ks8842_probe()
1173 (pdata->rx_dma_channel != -1)) { in ks8842_probe()
1174 adapter->dma_rx.channel = pdata->rx_dma_channel; in ks8842_probe()
1175 adapter->dma_tx.channel = pdata->tx_dma_channel; in ks8842_probe()
1177 adapter->dma_rx.channel = -1; in ks8842_probe()
1178 adapter->dma_tx.channel = -1; in ks8842_probe()
1181 tasklet_setup(&adapter->tasklet, ks8842_tasklet); in ks8842_probe()
1182 spin_lock_init(&adapter->lock); in ks8842_probe()
1184 netdev->netdev_ops = &ks8842_netdev_ops; in ks8842_probe()
1185 netdev->ethtool_ops = &ks8842_ethtool_ops; in ks8842_probe()
1188 i = netdev->addr_len; in ks8842_probe()
1190 for (i = 0; i < netdev->addr_len; i++) in ks8842_probe()
1191 if (pdata->macaddr[i] != 0) in ks8842_probe()
1194 if (i < netdev->addr_len) in ks8842_probe()
1196 eth_hw_addr_set(netdev, pdata->macaddr); in ks8842_probe()
1199 if (i == netdev->addr_len) { in ks8842_probe()
1202 if (!is_valid_ether_addr(netdev->dev_addr)) in ks8842_probe()
1208 strcpy(netdev->name, "eth%d"); in ks8842_probe()
1222 iounmap(adapter->hw_addr); in ks8842_probe()
1226 release_mem_region(iomem->start, resource_size(iomem)); in ks8842_probe()
1238 tasklet_kill(&adapter->tasklet); in ks8842_remove()
1239 iounmap(adapter->hw_addr); in ks8842_remove()
1241 release_mem_region(iomem->start, resource_size(iomem)); in ks8842_remove()
1256 MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");