Lines Matching +full:11 +full:mp

256 #define TX_IHL_SHIFT			11
419 static inline u32 rdl(struct mv643xx_eth_private *mp, int offset) in rdl() argument
421 return readl(mp->shared->base + offset); in rdl()
424 static inline u32 rdlp(struct mv643xx_eth_private *mp, int offset) in rdlp() argument
426 return readl(mp->base + offset); in rdlp()
429 static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data) in wrl() argument
431 writel(data, mp->shared->base + offset); in wrl()
434 static inline void wrlp(struct mv643xx_eth_private *mp, int offset, u32 data) in wrlp() argument
436 writel(data, mp->base + offset); in wrlp()
453 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); in rxq_enable() local
454 wrlp(mp, RXQ_COMMAND, 1 << rxq->index); in rxq_enable()
459 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); in rxq_disable() local
462 wrlp(mp, RXQ_COMMAND, mask << 8); in rxq_disable()
463 while (rdlp(mp, RXQ_COMMAND) & mask) in rxq_disable()
469 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_reset_hw_ptr() local
474 wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr); in txq_reset_hw_ptr()
479 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_enable() local
480 wrlp(mp, TXQ_COMMAND, 1 << txq->index); in txq_enable()
485 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_disable() local
488 wrlp(mp, TXQ_COMMAND, mask << 8); in txq_disable()
489 while (rdlp(mp, TXQ_COMMAND) & mask) in txq_disable()
495 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_maybe_wake() local
496 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); in txq_maybe_wake()
508 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); in rxq_process() local
509 struct net_device_stats *stats = &mp->dev->stats; in rxq_process()
533 dma_unmap_single(mp->dev->dev.parent, rx_desc->buf_ptr, in rxq_process()
538 mp->work_rx_refill |= 1 << rxq->index; in rxq_process()
570 skb->protocol = eth_type_trans(skb, mp->dev); in rxq_process()
572 napi_gro_receive(&mp->napi, skb); in rxq_process()
582 netdev_err(mp->dev, in rxq_process()
593 mp->work_rx &= ~(1 << rxq->index); in rxq_process()
600 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); in rxq_refill() local
610 skb = netdev_alloc_skb(mp->dev, mp->skb_size); in rxq_refill()
613 mp->oom = 1; in rxq_refill()
630 rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent, in rxq_refill()
648 mp->work_rx_refill &= ~(1 << rxq->index); in rxq_refill()
670 static int skb_tx_csum(struct mv643xx_eth_private *mp, struct sk_buff *skb, in skb_tx_csum() argument
686 if (length - hdr_len > mp->shared->tx_csum_limit || in skb_tx_csum()
778 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_put_hdr_tso() local
790 ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_csum, length); in txq_put_hdr_tso()
821 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_submit_tso() local
877 mp->work_tx_end &= ~(1 << txq->index); in txq_submit_tso()
893 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_submit_frag_skb() local
923 desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent, in txq_submit_frag_skb()
932 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_submit_skb() local
949 ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_sts, skb->len); in txq_submit_skb()
970 desc->buf_ptr = dma_map_single(mp->dev->dev.parent, skb->data, in txq_submit_skb()
982 mp->work_tx_end &= ~(1 << txq->index); in txq_submit_skb()
995 struct mv643xx_eth_private *mp = netdev_priv(dev); in mv643xx_eth_xmit() local
1001 txq = mp->txq + queue; in mv643xx_eth_xmit()
1034 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_kick() local
1035 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); in txq_kick()
1041 if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index)) in txq_kick()
1044 hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index)); in txq_kick()
1054 mp->work_tx_end &= ~(1 << txq->index); in txq_kick()
1059 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_reclaim() local
1060 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); in txq_reclaim()
1094 dma_unmap_page(mp->dev->dev.parent, in txq_reclaim()
1099 dma_unmap_single(mp->dev->dev.parent, in txq_reclaim()
1113 netdev_info(mp->dev, "tx error\n"); in txq_reclaim()
1114 mp->dev->stats.tx_errors++; in txq_reclaim()
1122 mp->work_tx &= ~(1 << txq->index); in txq_reclaim()
1133 static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst) in tx_set_rate() argument
1139 token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000); in tx_set_rate()
1143 mtu = (mp->dev->mtu + 255) >> 8; in tx_set_rate()
1151 switch (mp->shared->tx_bw_control) { in tx_set_rate()
1153 wrlp(mp, TX_BW_RATE, token_rate); in tx_set_rate()
1154 wrlp(mp, TX_BW_MTU, mtu); in tx_set_rate()
1155 wrlp(mp, TX_BW_BURST, bucket_size); in tx_set_rate()
1158 wrlp(mp, TX_BW_RATE_MOVED, token_rate); in tx_set_rate()
1159 wrlp(mp, TX_BW_MTU_MOVED, mtu); in tx_set_rate()
1160 wrlp(mp, TX_BW_BURST_MOVED, bucket_size); in tx_set_rate()
1167 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_set_rate() local
1171 token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000); in txq_set_rate()
1179 wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14); in txq_set_rate()
1180 wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate); in txq_set_rate()
1185 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_set_fixed_prio_mode() local
1193 switch (mp->shared->tx_bw_control) { in txq_set_fixed_prio_mode()
1203 val = rdlp(mp, off); in txq_set_fixed_prio_mode()
1205 wrlp(mp, off, val); in txq_set_fixed_prio_mode()
1213 struct mv643xx_eth_private *mp = netdev_priv(dev); in mv643xx_eth_adjust_link() local
1214 u32 pscr = rdlp(mp, PORT_SERIAL_CONTROL); in mv643xx_eth_adjust_link()
1248 wrlp(mp, PORT_SERIAL_CONTROL, pscr); in mv643xx_eth_adjust_link()
1254 struct mv643xx_eth_private *mp = netdev_priv(dev); in mv643xx_eth_get_stats() local
1261 for (i = 0; i < mp->txq_count; i++) { in mv643xx_eth_get_stats()
1262 struct tx_queue *txq = mp->txq + i; in mv643xx_eth_get_stats()
1276 static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset) in mib_read() argument
1278 return rdl(mp, MIB_COUNTERS(mp->port_num) + offset); in mib_read()
1281 static void mib_counters_clear(struct mv643xx_eth_private *mp) in mib_counters_clear() argument
1286 mib_read(mp, i); in mib_counters_clear()
1289 rdlp(mp, RX_DISCARD_FRAME_CNT); in mib_counters_clear()
1290 rdlp(mp, RX_OVERRUN_FRAME_CNT); in mib_counters_clear()
1293 static void mib_counters_update(struct mv643xx_eth_private *mp) in mib_counters_update() argument
1295 struct mib_counters *p = &mp->mib_counters; in mib_counters_update()
1297 spin_lock_bh(&mp->mib_counters_lock); in mib_counters_update()
1298 p->good_octets_received += mib_read(mp, 0x00); in mib_counters_update()
1299 p->bad_octets_received += mib_read(mp, 0x08); in mib_counters_update()
1300 p->internal_mac_transmit_err += mib_read(mp, 0x0c); in mib_counters_update()
1301 p->good_frames_received += mib_read(mp, 0x10); in mib_counters_update()
1302 p->bad_frames_received += mib_read(mp, 0x14); in mib_counters_update()
1303 p->broadcast_frames_received += mib_read(mp, 0x18); in mib_counters_update()
1304 p->multicast_frames_received += mib_read(mp, 0x1c); in mib_counters_update()
1305 p->frames_64_octets += mib_read(mp, 0x20); in mib_counters_update()
1306 p->frames_65_to_127_octets += mib_read(mp, 0x24); in mib_counters_update()
1307 p->frames_128_to_255_octets += mib_read(mp, 0x28); in mib_counters_update()
1308 p->frames_256_to_511_octets += mib_read(mp, 0x2c); in mib_counters_update()
1309 p->frames_512_to_1023_octets += mib_read(mp, 0x30); in mib_counters_update()
1310 p->frames_1024_to_max_octets += mib_read(mp, 0x34); in mib_counters_update()
1311 p->good_octets_sent += mib_read(mp, 0x38); in mib_counters_update()
1312 p->good_frames_sent += mib_read(mp, 0x40); in mib_counters_update()
1313 p->excessive_collision += mib_read(mp, 0x44); in mib_counters_update()
1314 p->multicast_frames_sent += mib_read(mp, 0x48); in mib_counters_update()
1315 p->broadcast_frames_sent += mib_read(mp, 0x4c); in mib_counters_update()
1316 p->unrec_mac_control_received += mib_read(mp, 0x50); in mib_counters_update()
1317 p->fc_sent += mib_read(mp, 0x54); in mib_counters_update()
1318 p->good_fc_received += mib_read(mp, 0x58); in mib_counters_update()
1319 p->bad_fc_received += mib_read(mp, 0x5c); in mib_counters_update()
1320 p->undersize_received += mib_read(mp, 0x60); in mib_counters_update()
1321 p->fragments_received += mib_read(mp, 0x64); in mib_counters_update()
1322 p->oversize_received += mib_read(mp, 0x68); in mib_counters_update()
1323 p->jabber_received += mib_read(mp, 0x6c); in mib_counters_update()
1324 p->mac_receive_error += mib_read(mp, 0x70); in mib_counters_update()
1325 p->bad_crc_event += mib_read(mp, 0x74); in mib_counters_update()
1326 p->collision += mib_read(mp, 0x78); in mib_counters_update()
1327 p->late_collision += mib_read(mp, 0x7c); in mib_counters_update()
1329 p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT); in mib_counters_update()
1330 p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT); in mib_counters_update()
1331 spin_unlock_bh(&mp->mib_counters_lock); in mib_counters_update()
1336 struct mv643xx_eth_private *mp = from_timer(mp, t, mib_counters_timer); in mib_counters_timer_wrapper() local
1337 mib_counters_update(mp); in mib_counters_timer_wrapper()
1338 mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ); in mib_counters_timer_wrapper()
1354 static unsigned int get_rx_coal(struct mv643xx_eth_private *mp) in get_rx_coal() argument
1356 u32 val = rdlp(mp, SDMA_CONFIG); in get_rx_coal()
1359 if (mp->shared->extended_rx_coal_limit) in get_rx_coal()
1365 temp += mp->t_clk / 2; in get_rx_coal()
1366 do_div(temp, mp->t_clk); in get_rx_coal()
1371 static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int usec) in set_rx_coal() argument
1376 temp = (u64)usec * mp->t_clk; in set_rx_coal()
1380 val = rdlp(mp, SDMA_CONFIG); in set_rx_coal()
1381 if (mp->shared->extended_rx_coal_limit) { in set_rx_coal()
1393 wrlp(mp, SDMA_CONFIG, val); in set_rx_coal()
1396 static unsigned int get_tx_coal(struct mv643xx_eth_private *mp) in get_tx_coal() argument
1400 temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4; in get_tx_coal()
1402 temp += mp->t_clk / 2; in get_tx_coal()
1403 do_div(temp, mp->t_clk); in get_tx_coal()
1408 static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int usec) in set_tx_coal() argument
1412 temp = (u64)usec * mp->t_clk; in set_tx_coal()
1419 wrlp(mp, TX_FIFO_URGENT_THRESHOLD, temp << 4); in set_tx_coal()
1483 mv643xx_eth_get_link_ksettings_phy(struct mv643xx_eth_private *mp, in mv643xx_eth_get_link_ksettings_phy() argument
1486 struct net_device *dev = mp->dev; in mv643xx_eth_get_link_ksettings_phy()
1502 mv643xx_eth_get_link_ksettings_phyless(struct mv643xx_eth_private *mp, in mv643xx_eth_get_link_ksettings_phyless() argument
1508 port_status = rdlp(mp, PORT_STATUS); in mv643xx_eth_get_link_ksettings_phyless()
1570 struct mv643xx_eth_private *mp = netdev_priv(dev); in mv643xx_eth_get_link_ksettings() local
1573 return mv643xx_eth_get_link_ksettings_phy(mp, cmd); in mv643xx_eth_get_link_ksettings()
1575 return mv643xx_eth_get_link_ksettings_phyless(mp, cmd); in mv643xx_eth_get_link_ksettings()
1620 struct mv643xx_eth_private *mp = netdev_priv(dev); in mv643xx_eth_get_coalesce() local
1622 ec->rx_coalesce_usecs = get_rx_coal(mp); in mv643xx_eth_get_coalesce()
1623 ec->tx_coalesce_usecs = get_tx_coal(mp); in mv643xx_eth_get_coalesce()
1633 struct mv643xx_eth_private *mp = netdev_priv(dev); in mv643xx_eth_set_coalesce() local
1635 set_rx_coal(mp, ec->rx_coalesce_usecs); in mv643xx_eth_set_coalesce()
1636 set_tx_coal(mp, ec->tx_coalesce_usecs); in mv643xx_eth_set_coalesce()
1646 struct mv643xx_eth_private *mp = netdev_priv(dev); in mv643xx_eth_get_ringparam() local
1651 er->rx_pending = mp->rx_ring_size; in mv643xx_eth_get_ringparam()
1652 er->tx_pending = mp->tx_ring_size; in mv643xx_eth_get_ringparam()
1660 struct mv643xx_eth_private *mp = netdev_priv(dev); in mv643xx_eth_set_ringparam() local
1665 mp->rx_ring_size = min(er->rx_pending, 4096U); in mv643xx_eth_set_ringparam()
1666 mp->tx_ring_size = clamp_t(unsigned int, er->tx_pending, in mv643xx_eth_set_ringparam()
1668 if (mp->tx_ring_size != er->tx_pending) in mv643xx_eth_set_ringparam()
1670 mp->tx_ring_size, er->tx_pending); in mv643xx_eth_set_ringparam()
1688 struct mv643xx_eth_private *mp = netdev_priv(dev); in mv643xx_eth_set_features() local
1691 wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000); in mv643xx_eth_set_features()
1710 struct mv643xx_eth_private *mp = netdev_priv(dev); in mv643xx_eth_get_ethtool_stats() local
1714 mib_counters_update(mp); in mv643xx_eth_get_ethtool_stats()
1723 p = ((void *)mp->dev) + stat->netdev_off; in mv643xx_eth_get_ethtool_stats()
1725 p = ((void *)mp) + stat->mp_off; in mv643xx_eth_get_ethtool_stats()
1761 static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr) in uc_addr_get() argument
1763 unsigned int mac_h = rdlp(mp, MAC_ADDR_HIGH); in uc_addr_get()
1764 unsigned int mac_l = rdlp(mp, MAC_ADDR_LOW); in uc_addr_get()
1774 static void uc_addr_set(struct mv643xx_eth_private *mp, const u8 *addr) in uc_addr_set() argument
1776 wrlp(mp, MAC_ADDR_HIGH, in uc_addr_set()
1778 wrlp(mp, MAC_ADDR_LOW, (addr[4] << 8) | addr[5]); in uc_addr_set()
1804 struct mv643xx_eth_private *mp = netdev_priv(dev); in mv643xx_eth_program_unicast_filter() local
1809 uc_addr_set(mp, dev->dev_addr); in mv643xx_eth_program_unicast_filter()
1811 port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE; in mv643xx_eth_program_unicast_filter()
1820 int off = UNICAST_TABLE(mp->port_num) + i; in mv643xx_eth_program_unicast_filter()
1834 wrl(mp, off, v); in mv643xx_eth_program_unicast_filter()
1837 wrlp(mp, PORT_CONFIG, port_config); in mv643xx_eth_program_unicast_filter()
1860 struct mv643xx_eth_private *mp = netdev_priv(dev); in mv643xx_eth_program_multicast_filter() local
1892 wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i * sizeof(u32), in mv643xx_eth_program_multicast_filter()
1894 wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i * sizeof(u32), in mv643xx_eth_program_multicast_filter()
1903 wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i * sizeof(u32), in mv643xx_eth_program_multicast_filter()
1905 wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i * sizeof(u32), in mv643xx_eth_program_multicast_filter()
1934 static int rxq_init(struct mv643xx_eth_private *mp, int index) in rxq_init() argument
1936 struct rx_queue *rxq = mp->rxq + index; in rxq_init()
1943 rxq->rx_ring_size = mp->rx_ring_size; in rxq_init()
1951 if (index == 0 && size <= mp->rx_desc_sram_size) { in rxq_init()
1952 rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr, in rxq_init()
1953 mp->rx_desc_sram_size); in rxq_init()
1954 rxq->rx_desc_dma = mp->rx_desc_sram_addr; in rxq_init()
1956 rxq->rx_desc_area = dma_alloc_coherent(mp->dev->dev.parent, in rxq_init()
1962 netdev_err(mp->dev, in rxq_init()
1990 if (index == 0 && size <= mp->rx_desc_sram_size) in rxq_init()
1993 dma_free_coherent(mp->dev->dev.parent, size, in rxq_init()
2003 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); in rxq_deinit() local
2016 netdev_err(mp->dev, "error freeing rx ring -- %d skbs stuck\n", in rxq_deinit()
2021 rxq->rx_desc_area_size <= mp->rx_desc_sram_size) in rxq_deinit()
2024 dma_free_coherent(mp->dev->dev.parent, rxq->rx_desc_area_size, in rxq_deinit()
2030 static int txq_init(struct mv643xx_eth_private *mp, int index) in txq_init() argument
2032 struct tx_queue *txq = mp->txq + index; in txq_init()
2040 txq->tx_ring_size = mp->tx_ring_size; in txq_init()
2055 if (index == 0 && size <= mp->tx_desc_sram_size) { in txq_init()
2056 txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr, in txq_init()
2057 mp->tx_desc_sram_size); in txq_init()
2058 txq->tx_desc_dma = mp->tx_desc_sram_addr; in txq_init()
2060 txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent, in txq_init()
2066 netdev_err(mp->dev, in txq_init()
2096 txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent, in txq_init()
2110 if (index == 0 && size <= mp->tx_desc_sram_size) in txq_init()
2113 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, in txq_init()
2120 struct mv643xx_eth_private *mp = txq_to_mp(txq); in txq_deinit() local
2128 txq->tx_desc_area_size <= mp->tx_desc_sram_size) in txq_deinit()
2131 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size, in txq_deinit()
2136 dma_free_coherent(mp->dev->dev.parent, in txq_deinit()
2143 static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp) in mv643xx_eth_collect_events() argument
2148 int_cause = rdlp(mp, INT_CAUSE) & mp->int_mask; in mv643xx_eth_collect_events()
2155 int_cause_ext = rdlp(mp, INT_CAUSE_EXT); in mv643xx_eth_collect_events()
2159 wrlp(mp, INT_CAUSE, ~int_cause); in mv643xx_eth_collect_events()
2160 mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) & in mv643xx_eth_collect_events()
2161 ~(rdlp(mp, TXQ_COMMAND) & 0xff); in mv643xx_eth_collect_events()
2162 mp->work_rx |= (int_cause & INT_RX) >> 2; in mv643xx_eth_collect_events()
2167 wrlp(mp, INT_CAUSE_EXT, ~int_cause_ext); in mv643xx_eth_collect_events()
2169 mp->work_link = 1; in mv643xx_eth_collect_events()
2170 mp->work_tx |= int_cause_ext & INT_EXT_TX; in mv643xx_eth_collect_events()
2179 struct mv643xx_eth_private *mp = netdev_priv(dev); in mv643xx_eth_irq() local
2181 if (unlikely(!mv643xx_eth_collect_events(mp))) in mv643xx_eth_irq()
2184 wrlp(mp, INT_MASK, 0); in mv643xx_eth_irq()
2185 napi_schedule(&mp->napi); in mv643xx_eth_irq()
2190 static void handle_link_event(struct mv643xx_eth_private *mp) in handle_link_event() argument
2192 struct net_device *dev = mp->dev; in handle_link_event()
2198 port_status = rdlp(mp, PORT_STATUS); in handle_link_event()
2207 for (i = 0; i < mp->txq_count; i++) { in handle_link_event()
2208 struct tx_queue *txq = mp->txq + i; in handle_link_event()
2243 struct mv643xx_eth_private *mp; in mv643xx_eth_poll() local
2246 mp = container_of(napi, struct mv643xx_eth_private, napi); in mv643xx_eth_poll()
2248 if (unlikely(mp->oom)) { in mv643xx_eth_poll()
2249 mp->oom = 0; in mv643xx_eth_poll()
2250 del_timer(&mp->rx_oom); in mv643xx_eth_poll()
2259 if (mp->work_link) { in mv643xx_eth_poll()
2260 mp->work_link = 0; in mv643xx_eth_poll()
2261 handle_link_event(mp); in mv643xx_eth_poll()
2266 queue_mask = mp->work_tx | mp->work_tx_end | mp->work_rx; in mv643xx_eth_poll()
2267 if (likely(!mp->oom)) in mv643xx_eth_poll()
2268 queue_mask |= mp->work_rx_refill; in mv643xx_eth_poll()
2271 if (mv643xx_eth_collect_events(mp)) in mv643xx_eth_poll()
2283 if (mp->work_tx_end & queue_mask) { in mv643xx_eth_poll()
2284 txq_kick(mp->txq + queue); in mv643xx_eth_poll()
2285 } else if (mp->work_tx & queue_mask) { in mv643xx_eth_poll()
2286 work_done += txq_reclaim(mp->txq + queue, work_tbd, 0); in mv643xx_eth_poll()
2287 txq_maybe_wake(mp->txq + queue); in mv643xx_eth_poll()
2288 } else if (mp->work_rx & queue_mask) { in mv643xx_eth_poll()
2289 work_done += rxq_process(mp->rxq + queue, work_tbd); in mv643xx_eth_poll()
2290 } else if (!mp->oom && (mp->work_rx_refill & queue_mask)) { in mv643xx_eth_poll()
2291 work_done += rxq_refill(mp->rxq + queue, work_tbd); in mv643xx_eth_poll()
2298 if (mp->oom) in mv643xx_eth_poll()
2299 mod_timer(&mp->rx_oom, jiffies + (HZ / 10)); in mv643xx_eth_poll()
2301 wrlp(mp, INT_MASK, mp->int_mask); in mv643xx_eth_poll()
2309 struct mv643xx_eth_private *mp = from_timer(mp, t, rx_oom); in oom_timer_wrapper() local
2311 napi_schedule(&mp->napi); in oom_timer_wrapper()
2314 static void port_start(struct mv643xx_eth_private *mp) in port_start() argument
2316 struct net_device *dev = mp->dev; in port_start()
2336 pscr = rdlp(mp, PORT_SERIAL_CONTROL); in port_start()
2339 wrlp(mp, PORT_SERIAL_CONTROL, pscr); in port_start()
2344 wrlp(mp, PORT_SERIAL_CONTROL, pscr); in port_start()
2349 tx_set_rate(mp, 1000000000, 16777216); in port_start()
2350 for (i = 0; i < mp->txq_count; i++) { in port_start()
2351 struct tx_queue *txq = mp->txq + i; in port_start()
2363 mv643xx_eth_set_features(mp->dev, mp->dev->features); in port_start()
2368 wrlp(mp, PORT_CONFIG_EXT, 0x00000000); in port_start()
2373 mv643xx_eth_program_unicast_filter(mp->dev); in port_start()
2378 for (i = 0; i < mp->rxq_count; i++) { in port_start()
2379 struct rx_queue *rxq = mp->rxq + i; in port_start()
2384 wrlp(mp, RXQ_CURRENT_DESC_PTR(i), addr); in port_start()
2390 static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp) in mv643xx_eth_recalc_skb_size() argument
2400 skb_size = mp->dev->mtu + 36; in mv643xx_eth_recalc_skb_size()
2407 mp->skb_size = (skb_size + 7) & ~7; in mv643xx_eth_recalc_skb_size()
2415 mp->skb_size += SKB_DMA_REALIGN; in mv643xx_eth_recalc_skb_size()
2420 struct mv643xx_eth_private *mp = netdev_priv(dev); in mv643xx_eth_open() local
2424 wrlp(mp, INT_CAUSE, 0); in mv643xx_eth_open()
2425 wrlp(mp, INT_CAUSE_EXT, 0); in mv643xx_eth_open()
2426 rdlp(mp, INT_CAUSE_EXT); in mv643xx_eth_open()
2435 mv643xx_eth_recalc_skb_size(mp); in mv643xx_eth_open()
2437 napi_enable(&mp->napi); in mv643xx_eth_open()
2439 mp->int_mask = INT_EXT; in mv643xx_eth_open()
2441 for (i = 0; i < mp->rxq_count; i++) { in mv643xx_eth_open()
2442 err = rxq_init(mp, i); in mv643xx_eth_open()
2445 rxq_deinit(mp->rxq + i); in mv643xx_eth_open()
2449 rxq_refill(mp->rxq + i, INT_MAX); in mv643xx_eth_open()
2450 mp->int_mask |= INT_RX_0 << i; in mv643xx_eth_open()
2453 if (mp->oom) { in mv643xx_eth_open()
2454 mp->rx_oom.expires = jiffies + (HZ / 10); in mv643xx_eth_open()
2455 add_timer(&mp->rx_oom); in mv643xx_eth_open()
2458 for (i = 0; i < mp->txq_count; i++) { in mv643xx_eth_open()
2459 err = txq_init(mp, i); in mv643xx_eth_open()
2462 txq_deinit(mp->txq + i); in mv643xx_eth_open()
2465 mp->int_mask |= INT_TX_END_0 << i; in mv643xx_eth_open()
2468 add_timer(&mp->mib_counters_timer); in mv643xx_eth_open()
2469 port_start(mp); in mv643xx_eth_open()
2471 wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX); in mv643xx_eth_open()
2472 wrlp(mp, INT_MASK, mp->int_mask); in mv643xx_eth_open()
2478 for (i = 0; i < mp->rxq_count; i++) in mv643xx_eth_open()
2479 rxq_deinit(mp->rxq + i); in mv643xx_eth_open()
2481 napi_disable(&mp->napi); in mv643xx_eth_open()
2487 static void port_reset(struct mv643xx_eth_private *mp) in port_reset() argument
2492 for (i = 0; i < mp->rxq_count; i++) in port_reset()
2493 rxq_disable(mp->rxq + i); in port_reset()
2494 for (i = 0; i < mp->txq_count; i++) in port_reset()
2495 txq_disable(mp->txq + i); in port_reset()
2498 u32 ps = rdlp(mp, PORT_STATUS); in port_reset()
2506 data = rdlp(mp, PORT_SERIAL_CONTROL); in port_reset()
2510 wrlp(mp, PORT_SERIAL_CONTROL, data); in port_reset()
2515 struct mv643xx_eth_private *mp = netdev_priv(dev); in mv643xx_eth_stop() local
2518 wrlp(mp, INT_MASK_EXT, 0x00000000); in mv643xx_eth_stop()
2519 wrlp(mp, INT_MASK, 0x00000000); in mv643xx_eth_stop()
2520 rdlp(mp, INT_MASK); in mv643xx_eth_stop()
2522 napi_disable(&mp->napi); in mv643xx_eth_stop()
2524 del_timer_sync(&mp->rx_oom); in mv643xx_eth_stop()
2531 port_reset(mp); in mv643xx_eth_stop()
2533 mib_counters_update(mp); in mv643xx_eth_stop()
2534 del_timer_sync(&mp->mib_counters_timer); in mv643xx_eth_stop()
2536 for (i = 0; i < mp->rxq_count; i++) in mv643xx_eth_stop()
2537 rxq_deinit(mp->rxq + i); in mv643xx_eth_stop()
2538 for (i = 0; i < mp->txq_count; i++) in mv643xx_eth_stop()
2539 txq_deinit(mp->txq + i); in mv643xx_eth_stop()
2559 struct mv643xx_eth_private *mp = netdev_priv(dev); in mv643xx_eth_change_mtu() local
2562 mv643xx_eth_recalc_skb_size(mp); in mv643xx_eth_change_mtu()
2563 tx_set_rate(mp, 1000000000, 16777216); in mv643xx_eth_change_mtu()
2585 struct mv643xx_eth_private *mp; in tx_timeout_task() local
2587 mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task); in tx_timeout_task()
2588 if (netif_running(mp->dev)) { in tx_timeout_task()
2589 netif_tx_stop_all_queues(mp->dev); in tx_timeout_task()
2590 port_reset(mp); in tx_timeout_task()
2591 port_start(mp); in tx_timeout_task()
2592 netif_tx_wake_all_queues(mp->dev); in tx_timeout_task()
2598 struct mv643xx_eth_private *mp = netdev_priv(dev); in mv643xx_eth_tx_timeout() local
2602 schedule_work(&mp->tx_timeout_task); in mv643xx_eth_tx_timeout()
2608 struct mv643xx_eth_private *mp = netdev_priv(dev); in mv643xx_eth_netpoll() local
2610 wrlp(mp, INT_MASK, 0x00000000); in mv643xx_eth_netpoll()
2611 rdlp(mp, INT_MASK); in mv643xx_eth_netpoll()
2615 wrlp(mp, INT_MASK, mp->int_mask); in mv643xx_eth_netpoll()
2894 static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr) in phy_addr_set() argument
2896 int addr_shift = 5 * mp->port_num; in phy_addr_set()
2899 data = rdl(mp, PHY_ADDR); in phy_addr_set()
2902 wrl(mp, PHY_ADDR, data); in phy_addr_set()
2905 static int phy_addr_get(struct mv643xx_eth_private *mp) in phy_addr_get() argument
2909 data = rdl(mp, PHY_ADDR); in phy_addr_get()
2911 return (data >> (5 * mp->port_num)) & 0x1f; in phy_addr_get()
2914 static void set_params(struct mv643xx_eth_private *mp, in set_params() argument
2917 struct net_device *dev = mp->dev; in set_params()
2925 uc_addr_get(mp, addr); in set_params()
2929 mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE; in set_params()
2931 mp->rx_ring_size = pd->rx_queue_size; in set_params()
2932 mp->rx_desc_sram_addr = pd->rx_sram_addr; in set_params()
2933 mp->rx_desc_sram_size = pd->rx_sram_size; in set_params()
2935 mp->rxq_count = pd->rx_queue_count ? : 1; in set_params()
2941 mp->tx_ring_size = clamp_t(unsigned int, tx_ring_size, in set_params()
2943 if (mp->tx_ring_size != tx_ring_size) in set_params()
2945 mp->tx_ring_size, tx_ring_size); in set_params()
2947 mp->tx_desc_sram_addr = pd->tx_sram_addr; in set_params()
2948 mp->tx_desc_sram_size = pd->tx_sram_size; in set_params()
2950 mp->txq_count = pd->tx_queue_count ? : 1; in set_params()
2953 static int get_phy_mode(struct mv643xx_eth_private *mp) in get_phy_mode() argument
2955 struct device *dev = mp->dev->dev.parent; in get_phy_mode()
2970 static struct phy_device *phy_scan(struct mv643xx_eth_private *mp, in phy_scan() argument
2980 start = phy_addr_get(mp) & 0x1f; in phy_scan()
2995 phydev = phy_connect(mp->dev, phy_id, mv643xx_eth_adjust_link, in phy_scan()
2996 get_phy_mode(mp)); in phy_scan()
2998 phy_addr_set(mp, addr); in phy_scan()
3006 static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex) in phy_init() argument
3008 struct net_device *dev = mp->dev; in phy_init()
3027 static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex) in init_pscr() argument
3029 struct net_device *dev = mp->dev; in init_pscr()
3032 pscr = rdlp(mp, PORT_SERIAL_CONTROL); in init_pscr()
3035 wrlp(mp, PORT_SERIAL_CONTROL, pscr); in init_pscr()
3053 wrlp(mp, PORT_SERIAL_CONTROL, pscr); in init_pscr()
3076 struct mv643xx_eth_private *mp; in mv643xx_eth_probe() local
3098 mp = netdev_priv(dev); in mv643xx_eth_probe()
3099 platform_set_drvdata(pdev, mp); in mv643xx_eth_probe()
3101 mp->shared = platform_get_drvdata(pd->shared); in mv643xx_eth_probe()
3102 mp->base = mp->shared->base + 0x0400 + (pd->port_number << 10); in mv643xx_eth_probe()
3103 mp->port_num = pd->port_number; in mv643xx_eth_probe()
3105 mp->dev = dev; in mv643xx_eth_probe()
3109 psc1r = rdlp(mp, PORT_SERIAL_CONTROL1); in mv643xx_eth_probe()
3144 wrlp(mp, PORT_SERIAL_CONTROL1, psc1r); in mv643xx_eth_probe()
3151 mp->t_clk = 133000000; in mv643xx_eth_probe()
3152 mp->clk = devm_clk_get(&pdev->dev, NULL); in mv643xx_eth_probe()
3153 if (!IS_ERR(mp->clk)) { in mv643xx_eth_probe()
3154 clk_prepare_enable(mp->clk); in mv643xx_eth_probe()
3155 mp->t_clk = clk_get_rate(mp->clk); in mv643xx_eth_probe()
3156 } else if (!IS_ERR(mp->shared->clk)) { in mv643xx_eth_probe()
3157 mp->t_clk = clk_get_rate(mp->shared->clk); in mv643xx_eth_probe()
3160 set_params(mp, pd); in mv643xx_eth_probe()
3161 netif_set_real_num_tx_queues(dev, mp->txq_count); in mv643xx_eth_probe()
3162 netif_set_real_num_rx_queues(dev, mp->rxq_count); in mv643xx_eth_probe()
3166 phydev = of_phy_connect(mp->dev, pd->phy_node, in mv643xx_eth_probe()
3168 get_phy_mode(mp)); in mv643xx_eth_probe()
3172 phy_addr_set(mp, phydev->mdio.addr); in mv643xx_eth_probe()
3174 phydev = phy_scan(mp, pd->phy_addr); in mv643xx_eth_probe()
3179 phy_init(mp, pd->speed, pd->duplex); in mv643xx_eth_probe()
3190 init_pscr(mp, pd->speed, pd->duplex); in mv643xx_eth_probe()
3193 mib_counters_clear(mp); in mv643xx_eth_probe()
3195 timer_setup(&mp->mib_counters_timer, mib_counters_timer_wrapper, 0); in mv643xx_eth_probe()
3196 mp->mib_counters_timer.expires = jiffies + 30 * HZ; in mv643xx_eth_probe()
3198 spin_lock_init(&mp->mib_counters_lock); in mv643xx_eth_probe()
3200 INIT_WORK(&mp->tx_timeout_task, tx_timeout_task); in mv643xx_eth_probe()
3202 netif_napi_add(dev, &mp->napi, mv643xx_eth_poll); in mv643xx_eth_probe()
3204 timer_setup(&mp->rx_oom, oom_timer_wrapper, 0); in mv643xx_eth_probe()
3232 if (mp->shared->win_protect) in mv643xx_eth_probe()
3233 wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect); in mv643xx_eth_probe()
3237 wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE); in mv643xx_eth_probe()
3239 set_rx_coal(mp, 250); in mv643xx_eth_probe()
3240 set_tx_coal(mp, 0); in mv643xx_eth_probe()
3247 mp->port_num, dev->dev_addr); in mv643xx_eth_probe()
3249 if (mp->tx_desc_sram_size > 0) in mv643xx_eth_probe()
3255 if (!IS_ERR(mp->clk)) in mv643xx_eth_probe()
3256 clk_disable_unprepare(mp->clk); in mv643xx_eth_probe()
3264 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev); in mv643xx_eth_remove() local
3265 struct net_device *dev = mp->dev; in mv643xx_eth_remove()
3267 unregister_netdev(mp->dev); in mv643xx_eth_remove()
3270 cancel_work_sync(&mp->tx_timeout_task); in mv643xx_eth_remove()
3272 if (!IS_ERR(mp->clk)) in mv643xx_eth_remove()
3273 clk_disable_unprepare(mp->clk); in mv643xx_eth_remove()
3275 free_netdev(mp->dev); in mv643xx_eth_remove()
3280 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev); in mv643xx_eth_shutdown() local
3283 wrlp(mp, INT_MASK, 0); in mv643xx_eth_shutdown()
3284 rdlp(mp, INT_MASK); in mv643xx_eth_shutdown()
3286 if (netif_running(mp->dev)) in mv643xx_eth_shutdown()
3287 port_reset(mp); in mv643xx_eth_shutdown()