1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2021, MediaTek Inc. 4 * Copyright (c) 2021-2022, Intel Corporation. 5 * 6 * Authors: 7 * Chandrashekar Devegowda <chandrashekar.devegowda@intel.com> 8 * Haijun Liu <haijun.liu@mediatek.com> 9 * Ricardo Martinez <ricardo.martinez@linux.intel.com> 10 * 11 * Contributors: 12 * Amir Hanania <amir.hanania@intel.com> 13 * Andy Shevchenko <andriy.shevchenko@linux.intel.com> 14 * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com> 15 * Eliot Lee <eliot.lee@intel.com> 16 * Moises Veleta <moises.veleta@intel.com> 17 * Sreehari Kancharla <sreehari.kancharla@intel.com> 18 */ 19 20 #include <linux/atomic.h> 21 #include <linux/device.h> 22 #include <linux/gfp.h> 23 #include <linux/if_arp.h> 24 #include <linux/if_ether.h> 25 #include <linux/ip.h> 26 #include <linux/kernel.h> 27 #include <linux/list.h> 28 #include <linux/netdev_features.h> 29 #include <linux/netdevice.h> 30 #include <linux/pm_runtime.h> 31 #include <linux/skbuff.h> 32 #include <linux/types.h> 33 #include <linux/wwan.h> 34 #include <net/ipv6.h> 35 #include <net/pkt_sched.h> 36 37 #include "t7xx_hif_dpmaif_rx.h" 38 #include "t7xx_hif_dpmaif_tx.h" 39 #include "t7xx_netdev.h" 40 #include "t7xx_pci.h" 41 #include "t7xx_port_proxy.h" 42 #include "t7xx_state_monitor.h" 43 44 #define IP_MUX_SESSION_DEFAULT 0 45 #define SBD_PACKET_TYPE_MASK GENMASK(7, 4) 46 47 static void t7xx_ccmni_enable_napi(struct t7xx_ccmni_ctrl *ctlb) 48 { 49 struct dpmaif_ctrl *ctrl; 50 int i, ret; 51 52 ctrl = ctlb->hif_ctrl; 53 54 if (ctlb->is_napi_en) 55 return; 56 57 for (i = 0; i < RXQ_NUM; i++) { 58 /* The usage count has to be bumped every time before calling 59 * napi_schedule. It will be decresed in the poll routine, 60 * right after napi_complete_done is called. 61 */ 62 ret = pm_runtime_resume_and_get(ctrl->dev); 63 if (ret < 0) { 64 dev_err(ctrl->dev, "Failed to resume device: %d\n", 65 ret); 66 return; 67 } 68 napi_enable(ctlb->napi[i]); 69 napi_schedule(ctlb->napi[i]); 70 } 71 ctlb->is_napi_en = true; 72 } 73 74 static void t7xx_ccmni_disable_napi(struct t7xx_ccmni_ctrl *ctlb) 75 { 76 int i; 77 78 if (!ctlb->is_napi_en) 79 return; 80 81 for (i = 0; i < RXQ_NUM; i++) { 82 napi_synchronize(ctlb->napi[i]); 83 napi_disable(ctlb->napi[i]); 84 } 85 86 ctlb->is_napi_en = false; 87 } 88 89 static int t7xx_ccmni_open(struct net_device *dev) 90 { 91 struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev); 92 struct t7xx_ccmni_ctrl *ccmni_ctl = ccmni->ctlb; 93 94 netif_carrier_on(dev); 95 netif_tx_start_all_queues(dev); 96 if (!atomic_fetch_inc(&ccmni_ctl->napi_usr_refcnt)) 97 t7xx_ccmni_enable_napi(ccmni_ctl); 98 99 atomic_inc(&ccmni->usage); 100 return 0; 101 } 102 103 static int t7xx_ccmni_close(struct net_device *dev) 104 { 105 struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev); 106 struct t7xx_ccmni_ctrl *ccmni_ctl = ccmni->ctlb; 107 108 atomic_dec(&ccmni->usage); 109 if (atomic_dec_and_test(&ccmni_ctl->napi_usr_refcnt)) 110 t7xx_ccmni_disable_napi(ccmni_ctl); 111 112 netif_carrier_off(dev); 113 netif_tx_disable(dev); 114 return 0; 115 } 116 117 static int t7xx_ccmni_send_packet(struct t7xx_ccmni *ccmni, struct sk_buff *skb, 118 unsigned int txq_number) 119 { 120 struct t7xx_ccmni_ctrl *ctlb = ccmni->ctlb; 121 struct t7xx_skb_cb *skb_cb = T7XX_SKB_CB(skb); 122 123 skb_cb->netif_idx = ccmni->index; 124 125 if (t7xx_dpmaif_tx_send_skb(ctlb->hif_ctrl, txq_number, skb)) 126 return NETDEV_TX_BUSY; 127 128 return 0; 129 } 130 131 static netdev_tx_t t7xx_ccmni_start_xmit(struct sk_buff *skb, struct net_device *dev) 132 { 133 struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev); 134 int skb_len = skb->len; 135 136 /* If MTU is changed or there is no headroom, drop the packet */ 137 if (skb->len > dev->mtu || skb_headroom(skb) < sizeof(struct ccci_header)) { 138 dev_kfree_skb(skb); 139 dev->stats.tx_dropped++; 140 return NETDEV_TX_OK; 141 } 142 143 if (t7xx_ccmni_send_packet(ccmni, skb, DPMAIF_TX_DEFAULT_QUEUE)) 144 return NETDEV_TX_BUSY; 145 146 dev->stats.tx_packets++; 147 dev->stats.tx_bytes += skb_len; 148 149 return NETDEV_TX_OK; 150 } 151 152 static void t7xx_ccmni_tx_timeout(struct net_device *dev, unsigned int __always_unused txqueue) 153 { 154 struct t7xx_ccmni *ccmni = netdev_priv(dev); 155 156 dev->stats.tx_errors++; 157 158 if (atomic_read(&ccmni->usage) > 0) 159 netif_tx_wake_all_queues(dev); 160 } 161 162 static const struct net_device_ops ccmni_netdev_ops = { 163 .ndo_open = t7xx_ccmni_open, 164 .ndo_stop = t7xx_ccmni_close, 165 .ndo_start_xmit = t7xx_ccmni_start_xmit, 166 .ndo_tx_timeout = t7xx_ccmni_tx_timeout, 167 }; 168 169 static void t7xx_ccmni_start(struct t7xx_ccmni_ctrl *ctlb) 170 { 171 struct t7xx_ccmni *ccmni; 172 int i; 173 174 for (i = 0; i < ctlb->nic_dev_num; i++) { 175 ccmni = ctlb->ccmni_inst[i]; 176 if (!ccmni) 177 continue; 178 179 if (atomic_read(&ccmni->usage) > 0) { 180 netif_tx_start_all_queues(ccmni->dev); 181 netif_carrier_on(ccmni->dev); 182 } 183 } 184 185 if (atomic_read(&ctlb->napi_usr_refcnt)) 186 t7xx_ccmni_enable_napi(ctlb); 187 } 188 189 static void t7xx_ccmni_pre_stop(struct t7xx_ccmni_ctrl *ctlb) 190 { 191 struct t7xx_ccmni *ccmni; 192 int i; 193 194 for (i = 0; i < ctlb->nic_dev_num; i++) { 195 ccmni = ctlb->ccmni_inst[i]; 196 if (!ccmni) 197 continue; 198 199 if (atomic_read(&ccmni->usage) > 0) 200 netif_tx_disable(ccmni->dev); 201 } 202 } 203 204 static void t7xx_ccmni_post_stop(struct t7xx_ccmni_ctrl *ctlb) 205 { 206 struct t7xx_ccmni *ccmni; 207 int i; 208 209 if (atomic_read(&ctlb->napi_usr_refcnt)) 210 t7xx_ccmni_disable_napi(ctlb); 211 212 for (i = 0; i < ctlb->nic_dev_num; i++) { 213 ccmni = ctlb->ccmni_inst[i]; 214 if (!ccmni) 215 continue; 216 217 if (atomic_read(&ccmni->usage) > 0) 218 netif_carrier_off(ccmni->dev); 219 } 220 } 221 222 static void t7xx_ccmni_wwan_setup(struct net_device *dev) 223 { 224 dev->needed_headroom += sizeof(struct ccci_header); 225 226 dev->mtu = ETH_DATA_LEN; 227 dev->max_mtu = CCMNI_MTU_MAX; 228 BUILD_BUG_ON(CCMNI_MTU_MAX > DPMAIF_HW_MTU_SIZE); 229 230 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN; 231 dev->watchdog_timeo = CCMNI_NETDEV_WDT_TO; 232 233 dev->flags = IFF_POINTOPOINT | IFF_NOARP; 234 235 dev->features = NETIF_F_VLAN_CHALLENGED; 236 237 dev->features |= NETIF_F_SG; 238 dev->hw_features |= NETIF_F_SG; 239 240 dev->features |= NETIF_F_HW_CSUM; 241 dev->hw_features |= NETIF_F_HW_CSUM; 242 243 dev->features |= NETIF_F_RXCSUM; 244 dev->hw_features |= NETIF_F_RXCSUM; 245 246 dev->features |= NETIF_F_GRO; 247 dev->hw_features |= NETIF_F_GRO; 248 249 dev->needs_free_netdev = true; 250 251 dev->type = ARPHRD_NONE; 252 253 dev->netdev_ops = &ccmni_netdev_ops; 254 } 255 256 static int t7xx_init_netdev_napi(struct t7xx_ccmni_ctrl *ctlb) 257 { 258 int i; 259 260 /* one HW, but shared with multiple net devices, 261 * so add a dummy device for NAPI. 262 */ 263 ctlb->dummy_dev = alloc_netdev_dummy(0); 264 if (!ctlb->dummy_dev) 265 return -ENOMEM; 266 267 atomic_set(&ctlb->napi_usr_refcnt, 0); 268 ctlb->is_napi_en = false; 269 270 for (i = 0; i < RXQ_NUM; i++) { 271 ctlb->napi[i] = &ctlb->hif_ctrl->rxq[i].napi; 272 netif_napi_add_weight(ctlb->dummy_dev, ctlb->napi[i], t7xx_dpmaif_napi_rx_poll, 273 NIC_NAPI_POLL_BUDGET); 274 } 275 276 return 0; 277 } 278 279 static void t7xx_uninit_netdev_napi(struct t7xx_ccmni_ctrl *ctlb) 280 { 281 int i; 282 283 for (i = 0; i < RXQ_NUM; i++) { 284 netif_napi_del(ctlb->napi[i]); 285 ctlb->napi[i] = NULL; 286 } 287 free_netdev(ctlb->dummy_dev); 288 } 289 290 static int t7xx_ccmni_wwan_newlink(void *ctxt, struct net_device *dev, u32 if_id, 291 struct netlink_ext_ack *extack) 292 { 293 struct t7xx_ccmni_ctrl *ctlb = ctxt; 294 struct t7xx_ccmni *ccmni; 295 int ret; 296 297 if (if_id >= ARRAY_SIZE(ctlb->ccmni_inst)) 298 return -EINVAL; 299 300 ccmni = wwan_netdev_drvpriv(dev); 301 ccmni->index = if_id; 302 ccmni->ctlb = ctlb; 303 ccmni->dev = dev; 304 atomic_set(&ccmni->usage, 0); 305 ctlb->ccmni_inst[if_id] = ccmni; 306 307 ret = register_netdevice(dev); 308 if (ret) 309 return ret; 310 311 netif_device_attach(dev); 312 return 0; 313 } 314 315 static void t7xx_ccmni_wwan_dellink(void *ctxt, struct net_device *dev, struct list_head *head) 316 { 317 struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev); 318 struct t7xx_ccmni_ctrl *ctlb = ctxt; 319 u8 if_id = ccmni->index; 320 321 if (if_id >= ARRAY_SIZE(ctlb->ccmni_inst)) 322 return; 323 324 if (WARN_ON(ctlb->ccmni_inst[if_id] != ccmni)) 325 return; 326 327 unregister_netdevice(dev); 328 } 329 330 static const struct wwan_ops ccmni_wwan_ops = { 331 .priv_size = sizeof(struct t7xx_ccmni), 332 .setup = t7xx_ccmni_wwan_setup, 333 .newlink = t7xx_ccmni_wwan_newlink, 334 .dellink = t7xx_ccmni_wwan_dellink, 335 }; 336 337 static int t7xx_ccmni_register_wwan(struct t7xx_ccmni_ctrl *ctlb) 338 { 339 struct device *dev = ctlb->hif_ctrl->dev; 340 int ret; 341 342 if (ctlb->wwan_is_registered) 343 return 0; 344 345 /* WWAN core will create a netdev for the default IP MUX channel */ 346 ret = wwan_register_ops(dev, &ccmni_wwan_ops, ctlb, IP_MUX_SESSION_DEFAULT); 347 if (ret < 0) { 348 dev_err(dev, "Unable to register WWAN ops, %d\n", ret); 349 return ret; 350 } 351 352 ctlb->wwan_is_registered = true; 353 return 0; 354 } 355 356 static int t7xx_ccmni_md_state_callback(enum md_state state, void *para) 357 { 358 struct t7xx_ccmni_ctrl *ctlb = para; 359 struct device *dev; 360 int ret = 0; 361 362 dev = ctlb->hif_ctrl->dev; 363 ctlb->md_sta = state; 364 365 switch (state) { 366 case MD_STATE_READY: 367 ret = t7xx_ccmni_register_wwan(ctlb); 368 if (!ret) 369 t7xx_ccmni_start(ctlb); 370 break; 371 372 case MD_STATE_EXCEPTION: 373 case MD_STATE_STOPPED: 374 t7xx_ccmni_pre_stop(ctlb); 375 376 ret = t7xx_dpmaif_md_state_callback(ctlb->hif_ctrl, state); 377 if (ret < 0) 378 dev_err(dev, "DPMAIF md state callback err, state=%d\n", state); 379 380 t7xx_ccmni_post_stop(ctlb); 381 break; 382 383 case MD_STATE_WAITING_FOR_HS1: 384 case MD_STATE_WAITING_TO_STOP: 385 ret = t7xx_dpmaif_md_state_callback(ctlb->hif_ctrl, state); 386 if (ret < 0) 387 dev_err(dev, "DPMAIF md state callback err, state=%d\n", state); 388 389 break; 390 391 default: 392 break; 393 } 394 395 return ret; 396 } 397 398 static void init_md_status_notifier(struct t7xx_pci_dev *t7xx_dev) 399 { 400 struct t7xx_ccmni_ctrl *ctlb = t7xx_dev->ccmni_ctlb; 401 struct t7xx_fsm_notifier *md_status_notifier; 402 403 md_status_notifier = &ctlb->md_status_notify; 404 INIT_LIST_HEAD(&md_status_notifier->entry); 405 md_status_notifier->notifier_fn = t7xx_ccmni_md_state_callback; 406 md_status_notifier->data = ctlb; 407 408 t7xx_fsm_notifier_register(t7xx_dev->md, md_status_notifier); 409 } 410 411 static void t7xx_ccmni_recv_skb(struct t7xx_ccmni_ctrl *ccmni_ctlb, struct sk_buff *skb, 412 struct napi_struct *napi) 413 { 414 struct t7xx_skb_cb *skb_cb; 415 struct net_device *net_dev; 416 struct t7xx_ccmni *ccmni; 417 int pkt_type, skb_len; 418 u8 netif_id; 419 420 skb_cb = T7XX_SKB_CB(skb); 421 netif_id = skb_cb->netif_idx; 422 ccmni = ccmni_ctlb->ccmni_inst[netif_id]; 423 if (!ccmni) { 424 dev_kfree_skb(skb); 425 return; 426 } 427 428 net_dev = ccmni->dev; 429 pkt_type = skb_cb->rx_pkt_type; 430 skb->dev = net_dev; 431 if (pkt_type == PKT_TYPE_IP6) 432 skb->protocol = htons(ETH_P_IPV6); 433 else 434 skb->protocol = htons(ETH_P_IP); 435 436 skb_len = skb->len; 437 napi_gro_receive(napi, skb); 438 net_dev->stats.rx_packets++; 439 net_dev->stats.rx_bytes += skb_len; 440 } 441 442 static void t7xx_ccmni_queue_tx_irq_notify(struct t7xx_ccmni_ctrl *ctlb, int qno) 443 { 444 struct t7xx_ccmni *ccmni = ctlb->ccmni_inst[0]; 445 struct netdev_queue *net_queue; 446 447 if (netif_running(ccmni->dev) && atomic_read(&ccmni->usage) > 0) { 448 net_queue = netdev_get_tx_queue(ccmni->dev, qno); 449 if (netif_tx_queue_stopped(net_queue)) 450 netif_tx_wake_queue(net_queue); 451 } 452 } 453 454 static void t7xx_ccmni_queue_tx_full_notify(struct t7xx_ccmni_ctrl *ctlb, int qno) 455 { 456 struct t7xx_ccmni *ccmni = ctlb->ccmni_inst[0]; 457 struct netdev_queue *net_queue; 458 459 if (atomic_read(&ccmni->usage) > 0) { 460 netdev_err(ccmni->dev, "TX queue %d is full\n", qno); 461 net_queue = netdev_get_tx_queue(ccmni->dev, qno); 462 netif_tx_stop_queue(net_queue); 463 } 464 } 465 466 static void t7xx_ccmni_queue_state_notify(struct t7xx_pci_dev *t7xx_dev, 467 enum dpmaif_txq_state state, int qno) 468 { 469 struct t7xx_ccmni_ctrl *ctlb = t7xx_dev->ccmni_ctlb; 470 471 if (ctlb->md_sta != MD_STATE_READY) 472 return; 473 474 if (!ctlb->ccmni_inst[0]) { 475 dev_warn(&t7xx_dev->pdev->dev, "No netdev registered yet\n"); 476 return; 477 } 478 479 if (state == DMPAIF_TXQ_STATE_IRQ) 480 t7xx_ccmni_queue_tx_irq_notify(ctlb, qno); 481 else if (state == DMPAIF_TXQ_STATE_FULL) 482 t7xx_ccmni_queue_tx_full_notify(ctlb, qno); 483 } 484 485 int t7xx_ccmni_init(struct t7xx_pci_dev *t7xx_dev) 486 { 487 struct device *dev = &t7xx_dev->pdev->dev; 488 struct t7xx_ccmni_ctrl *ctlb; 489 int ret; 490 491 ctlb = devm_kzalloc(dev, sizeof(*ctlb), GFP_KERNEL); 492 if (!ctlb) 493 return -ENOMEM; 494 495 t7xx_dev->ccmni_ctlb = ctlb; 496 ctlb->t7xx_dev = t7xx_dev; 497 ctlb->callbacks.state_notify = t7xx_ccmni_queue_state_notify; 498 ctlb->callbacks.recv_skb = t7xx_ccmni_recv_skb; 499 ctlb->nic_dev_num = NIC_DEV_DEFAULT; 500 501 ctlb->hif_ctrl = t7xx_dpmaif_hif_init(t7xx_dev, &ctlb->callbacks); 502 if (!ctlb->hif_ctrl) 503 return -ENOMEM; 504 505 ret = t7xx_init_netdev_napi(ctlb); 506 if (ret) { 507 t7xx_dpmaif_hif_exit(ctlb->hif_ctrl); 508 return ret; 509 } 510 511 init_md_status_notifier(t7xx_dev); 512 return 0; 513 } 514 515 void t7xx_ccmni_exit(struct t7xx_pci_dev *t7xx_dev) 516 { 517 struct t7xx_ccmni_ctrl *ctlb = t7xx_dev->ccmni_ctlb; 518 519 t7xx_fsm_notifier_unregister(t7xx_dev->md, &ctlb->md_status_notify); 520 521 if (ctlb->wwan_is_registered) { 522 wwan_unregister_ops(&t7xx_dev->pdev->dev); 523 ctlb->wwan_is_registered = false; 524 } 525 526 t7xx_uninit_netdev_napi(ctlb); 527 t7xx_dpmaif_hif_exit(ctlb->hif_ctrl); 528 } 529