1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* MHI Network driver - Network over MHI bus 3 * 4 * Copyright (C) 2020 Linaro Ltd <loic.poulain@linaro.org> 5 */ 6 7 #include <linux/if_arp.h> 8 #include <linux/mhi.h> 9 #include <linux/mod_devicetable.h> 10 #include <linux/module.h> 11 #include <linux/netdevice.h> 12 #include <linux/skbuff.h> 13 #include <linux/u64_stats_sync.h> 14 15 #define MHI_NET_MIN_MTU ETH_MIN_MTU 16 #define MHI_NET_MAX_MTU 0xffff 17 #define MHI_NET_DEFAULT_MTU 0x4000 18 19 struct mhi_net_stats { 20 u64_stats_t rx_packets; 21 u64_stats_t rx_bytes; 22 u64_stats_t rx_errors; 23 u64_stats_t rx_dropped; 24 u64_stats_t tx_packets; 25 u64_stats_t tx_bytes; 26 u64_stats_t tx_errors; 27 u64_stats_t tx_dropped; 28 atomic_t rx_queued; 29 struct u64_stats_sync tx_syncp; 30 struct u64_stats_sync rx_syncp; 31 }; 32 33 struct mhi_net_dev { 34 struct mhi_device *mdev; 35 struct net_device *ndev; 36 struct delayed_work rx_refill; 37 struct mhi_net_stats stats; 38 u32 rx_queue_sz; 39 }; 40 41 static int mhi_ndo_open(struct net_device *ndev) 42 { 43 struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); 44 45 /* Feed the rx buffer pool */ 46 schedule_delayed_work(&mhi_netdev->rx_refill, 0); 47 48 /* Carrier is established via out-of-band channel (e.g. qmi) */ 49 netif_carrier_on(ndev); 50 51 netif_start_queue(ndev); 52 53 return 0; 54 } 55 56 static int mhi_ndo_stop(struct net_device *ndev) 57 { 58 struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); 59 60 netif_stop_queue(ndev); 61 netif_carrier_off(ndev); 62 cancel_delayed_work_sync(&mhi_netdev->rx_refill); 63 64 return 0; 65 } 66 67 static int mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev) 68 { 69 struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); 70 struct mhi_device *mdev = mhi_netdev->mdev; 71 int err; 72 73 err = mhi_queue_skb(mdev, DMA_TO_DEVICE, skb, skb->len, MHI_EOT); 74 if (unlikely(err)) { 75 net_err_ratelimited("%s: Failed to queue TX buf (%d)\n", 76 ndev->name, err); 77 78 u64_stats_update_begin(&mhi_netdev->stats.tx_syncp); 79 u64_stats_inc(&mhi_netdev->stats.tx_dropped); 80 u64_stats_update_end(&mhi_netdev->stats.tx_syncp); 81 82 /* drop the packet */ 83 dev_kfree_skb_any(skb); 84 } 85 86 if (mhi_queue_is_full(mdev, DMA_TO_DEVICE)) 87 netif_stop_queue(ndev); 88 89 return NETDEV_TX_OK; 90 } 91 92 static void mhi_ndo_get_stats64(struct net_device *ndev, 93 struct rtnl_link_stats64 *stats) 94 { 95 struct mhi_net_dev *mhi_netdev = netdev_priv(ndev); 96 unsigned int start; 97 98 do { 99 start = u64_stats_fetch_begin_irq(&mhi_netdev->stats.rx_syncp); 100 stats->rx_packets = u64_stats_read(&mhi_netdev->stats.rx_packets); 101 stats->rx_bytes = u64_stats_read(&mhi_netdev->stats.rx_bytes); 102 stats->rx_errors = u64_stats_read(&mhi_netdev->stats.rx_errors); 103 stats->rx_dropped = u64_stats_read(&mhi_netdev->stats.rx_dropped); 104 } while (u64_stats_fetch_retry_irq(&mhi_netdev->stats.rx_syncp, start)); 105 106 do { 107 start = u64_stats_fetch_begin_irq(&mhi_netdev->stats.tx_syncp); 108 stats->tx_packets = u64_stats_read(&mhi_netdev->stats.tx_packets); 109 stats->tx_bytes = u64_stats_read(&mhi_netdev->stats.tx_bytes); 110 stats->tx_errors = u64_stats_read(&mhi_netdev->stats.tx_errors); 111 stats->tx_dropped = u64_stats_read(&mhi_netdev->stats.tx_dropped); 112 } while (u64_stats_fetch_retry_irq(&mhi_netdev->stats.tx_syncp, start)); 113 } 114 115 static const struct net_device_ops mhi_netdev_ops = { 116 .ndo_open = mhi_ndo_open, 117 .ndo_stop = mhi_ndo_stop, 118 .ndo_start_xmit = mhi_ndo_xmit, 119 .ndo_get_stats64 = mhi_ndo_get_stats64, 120 }; 121 122 static void mhi_net_setup(struct net_device *ndev) 123 { 124 ndev->header_ops = NULL; /* No header */ 125 ndev->type = ARPHRD_NONE; /* QMAP... */ 126 ndev->hard_header_len = 0; 127 ndev->addr_len = 0; 128 ndev->flags = IFF_POINTOPOINT | IFF_NOARP; 129 ndev->netdev_ops = &mhi_netdev_ops; 130 ndev->mtu = MHI_NET_DEFAULT_MTU; 131 ndev->min_mtu = MHI_NET_MIN_MTU; 132 ndev->max_mtu = MHI_NET_MAX_MTU; 133 ndev->tx_queue_len = 1000; 134 } 135 136 static void mhi_net_dl_callback(struct mhi_device *mhi_dev, 137 struct mhi_result *mhi_res) 138 { 139 struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev); 140 struct sk_buff *skb = mhi_res->buf_addr; 141 int remaining; 142 143 remaining = atomic_dec_return(&mhi_netdev->stats.rx_queued); 144 145 if (unlikely(mhi_res->transaction_status)) { 146 dev_kfree_skb_any(skb); 147 148 /* MHI layer stopping/resetting the DL channel */ 149 if (mhi_res->transaction_status == -ENOTCONN) 150 return; 151 152 u64_stats_update_begin(&mhi_netdev->stats.rx_syncp); 153 u64_stats_inc(&mhi_netdev->stats.rx_errors); 154 u64_stats_update_end(&mhi_netdev->stats.rx_syncp); 155 } else { 156 u64_stats_update_begin(&mhi_netdev->stats.rx_syncp); 157 u64_stats_inc(&mhi_netdev->stats.rx_packets); 158 u64_stats_add(&mhi_netdev->stats.rx_bytes, mhi_res->bytes_xferd); 159 u64_stats_update_end(&mhi_netdev->stats.rx_syncp); 160 161 skb->protocol = htons(ETH_P_MAP); 162 skb_put(skb, mhi_res->bytes_xferd); 163 netif_rx(skb); 164 } 165 166 /* Refill if RX buffers queue becomes low */ 167 if (remaining <= mhi_netdev->rx_queue_sz / 2) 168 schedule_delayed_work(&mhi_netdev->rx_refill, 0); 169 } 170 171 static void mhi_net_ul_callback(struct mhi_device *mhi_dev, 172 struct mhi_result *mhi_res) 173 { 174 struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev); 175 struct net_device *ndev = mhi_netdev->ndev; 176 struct sk_buff *skb = mhi_res->buf_addr; 177 178 /* Hardware has consumed the buffer, so free the skb (which is not 179 * freed by the MHI stack) and perform accounting. 180 */ 181 dev_consume_skb_any(skb); 182 183 u64_stats_update_begin(&mhi_netdev->stats.tx_syncp); 184 if (unlikely(mhi_res->transaction_status)) { 185 186 /* MHI layer stopping/resetting the UL channel */ 187 if (mhi_res->transaction_status == -ENOTCONN) { 188 u64_stats_update_end(&mhi_netdev->stats.tx_syncp); 189 return; 190 } 191 192 u64_stats_inc(&mhi_netdev->stats.tx_errors); 193 } else { 194 u64_stats_inc(&mhi_netdev->stats.tx_packets); 195 u64_stats_add(&mhi_netdev->stats.tx_bytes, mhi_res->bytes_xferd); 196 } 197 u64_stats_update_end(&mhi_netdev->stats.tx_syncp); 198 199 if (netif_queue_stopped(ndev)) 200 netif_wake_queue(ndev); 201 } 202 203 static void mhi_net_rx_refill_work(struct work_struct *work) 204 { 205 struct mhi_net_dev *mhi_netdev = container_of(work, struct mhi_net_dev, 206 rx_refill.work); 207 struct net_device *ndev = mhi_netdev->ndev; 208 struct mhi_device *mdev = mhi_netdev->mdev; 209 int size = READ_ONCE(ndev->mtu); 210 struct sk_buff *skb; 211 int err; 212 213 while (atomic_read(&mhi_netdev->stats.rx_queued) < mhi_netdev->rx_queue_sz) { 214 skb = netdev_alloc_skb(ndev, size); 215 if (unlikely(!skb)) 216 break; 217 218 err = mhi_queue_skb(mdev, DMA_FROM_DEVICE, skb, size, MHI_EOT); 219 if (unlikely(err)) { 220 net_err_ratelimited("%s: Failed to queue RX buf (%d)\n", 221 ndev->name, err); 222 kfree_skb(skb); 223 break; 224 } 225 226 atomic_inc(&mhi_netdev->stats.rx_queued); 227 228 /* Do not hog the CPU if rx buffers are consumed faster than 229 * queued (unlikely). 230 */ 231 cond_resched(); 232 } 233 234 /* If we're still starved of rx buffers, reschedule later */ 235 if (unlikely(!atomic_read(&mhi_netdev->stats.rx_queued))) 236 schedule_delayed_work(&mhi_netdev->rx_refill, HZ / 2); 237 } 238 239 static int mhi_net_probe(struct mhi_device *mhi_dev, 240 const struct mhi_device_id *id) 241 { 242 const char *netname = (char *)id->driver_data; 243 struct device *dev = &mhi_dev->dev; 244 struct mhi_net_dev *mhi_netdev; 245 struct net_device *ndev; 246 int err; 247 248 ndev = alloc_netdev(sizeof(*mhi_netdev), netname, NET_NAME_PREDICTABLE, 249 mhi_net_setup); 250 if (!ndev) 251 return -ENOMEM; 252 253 mhi_netdev = netdev_priv(ndev); 254 dev_set_drvdata(dev, mhi_netdev); 255 mhi_netdev->ndev = ndev; 256 mhi_netdev->mdev = mhi_dev; 257 SET_NETDEV_DEV(ndev, &mhi_dev->dev); 258 259 /* All MHI net channels have 128 ring elements (at least for now) */ 260 mhi_netdev->rx_queue_sz = 128; 261 262 INIT_DELAYED_WORK(&mhi_netdev->rx_refill, mhi_net_rx_refill_work); 263 u64_stats_init(&mhi_netdev->stats.rx_syncp); 264 u64_stats_init(&mhi_netdev->stats.tx_syncp); 265 266 /* Start MHI channels */ 267 err = mhi_prepare_for_transfer(mhi_dev); 268 if (err) 269 goto out_err; 270 271 err = register_netdev(ndev); 272 if (err) 273 goto out_err; 274 275 return 0; 276 277 out_err: 278 free_netdev(ndev); 279 return err; 280 } 281 282 static void mhi_net_remove(struct mhi_device *mhi_dev) 283 { 284 struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev); 285 286 unregister_netdev(mhi_netdev->ndev); 287 288 mhi_unprepare_from_transfer(mhi_netdev->mdev); 289 290 free_netdev(mhi_netdev->ndev); 291 } 292 293 static const struct mhi_device_id mhi_net_id_table[] = { 294 { .chan = "IP_HW0", .driver_data = (kernel_ulong_t)"mhi_hwip%d" }, 295 { .chan = "IP_SW0", .driver_data = (kernel_ulong_t)"mhi_swip%d" }, 296 {} 297 }; 298 MODULE_DEVICE_TABLE(mhi, mhi_net_id_table); 299 300 static struct mhi_driver mhi_net_driver = { 301 .probe = mhi_net_probe, 302 .remove = mhi_net_remove, 303 .dl_xfer_cb = mhi_net_dl_callback, 304 .ul_xfer_cb = mhi_net_ul_callback, 305 .id_table = mhi_net_id_table, 306 .driver = { 307 .name = "mhi_net", 308 .owner = THIS_MODULE, 309 }, 310 }; 311 312 module_mhi_driver(mhi_net_driver); 313 314 MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>"); 315 MODULE_DESCRIPTION("Network over MHI"); 316 MODULE_LICENSE("GPL v2"); 317