1 // SPDX-License-Identifier: GPL-2.0 2 3 /* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. 4 * Copyright (C) 2018-2024 Linaro Ltd. 5 */ 6 7 #include <linux/errno.h> 8 #include <linux/etherdevice.h> 9 #include <linux/if_arp.h> 10 #include <linux/if_rmnet.h> 11 #include <linux/netdevice.h> 12 #include <linux/pm_runtime.h> 13 #include <linux/skbuff.h> 14 #include <net/pkt_sched.h> 15 16 #include <linux/remoteproc/qcom_rproc.h> 17 18 #include "ipa.h" 19 #include "ipa_endpoint.h" 20 #include "ipa_mem.h" 21 #include "ipa_modem.h" 22 #include "ipa_smp2p.h" 23 #include "ipa_table.h" 24 #include "ipa_uc.h" 25 26 #define IPA_NETDEV_NAME "rmnet_ipa%d" 27 #define IPA_NETDEV_TAILROOM 0 /* for padding by mux layer */ 28 #define IPA_NETDEV_TIMEOUT 10 /* seconds */ 29 30 enum ipa_modem_state { 31 IPA_MODEM_STATE_STOPPED = 0, 32 IPA_MODEM_STATE_STARTING, 33 IPA_MODEM_STATE_RUNNING, 34 IPA_MODEM_STATE_STOPPING, 35 }; 36 37 /** 38 * struct ipa_priv - IPA network device private data 39 * @ipa: IPA pointer 40 * @tx: Transmit endpoint pointer 41 * @rx: Receive endpoint pointer 42 * @work: Work structure used to wake the modem netdev TX queue 43 */ 44 struct ipa_priv { 45 struct ipa *ipa; 46 struct ipa_endpoint *tx; 47 struct ipa_endpoint *rx; 48 struct work_struct work; 49 }; 50 51 /** ipa_open() - Opens the modem network interface */ 52 static int ipa_open(struct net_device *netdev) 53 { 54 struct ipa_priv *priv = netdev_priv(netdev); 55 struct ipa *ipa = priv->ipa; 56 struct device *dev; 57 int ret; 58 59 dev = ipa->dev; 60 ret = pm_runtime_get_sync(dev); 61 if (ret < 0) 62 goto err_power_put; 63 64 ret = ipa_endpoint_enable_one(priv->tx); 65 if (ret) 66 goto err_power_put; 67 68 ret = ipa_endpoint_enable_one(priv->rx); 69 if (ret) 70 goto err_disable_tx; 71 72 netif_start_queue(netdev); 73 74 (void)pm_runtime_put_autosuspend(dev); 75 76 return 0; 77 78 err_disable_tx: 79 ipa_endpoint_disable_one(priv->tx); 80 err_power_put: 81 pm_runtime_put_noidle(dev); 82 83 return ret; 84 } 85 86 /** ipa_stop() - Stops the modem network interface. */ 87 static int ipa_stop(struct net_device *netdev) 88 { 89 struct ipa_priv *priv = netdev_priv(netdev); 90 struct ipa *ipa = priv->ipa; 91 struct device *dev; 92 int ret; 93 94 dev = ipa->dev; 95 ret = pm_runtime_get_sync(dev); 96 if (ret < 0) 97 goto out_power_put; 98 99 netif_stop_queue(netdev); 100 101 ipa_endpoint_disable_one(priv->rx); 102 ipa_endpoint_disable_one(priv->tx); 103 out_power_put: 104 (void)pm_runtime_put_autosuspend(dev); 105 106 return 0; 107 } 108 109 /** ipa_start_xmit() - Transmit an skb 110 * @skb: Socket buffer to be transmitted 111 * @netdev: Network device 112 * 113 * Return: NETDEV_TX_OK if successful (or dropped), NETDEV_TX_BUSY otherwise 114 115 * Normally NETDEV_TX_OK indicates the buffer was successfully transmitted. 116 * If the buffer has an unexpected protocol or its size is out of range it 117 * is quietly dropped, returning NETDEV_TX_OK. NETDEV_TX_BUSY indicates 118 * the buffer cannot be sent at this time and should retried later. 119 */ 120 static netdev_tx_t 121 ipa_start_xmit(struct sk_buff *skb, struct net_device *netdev) 122 { 123 struct net_device_stats *stats = &netdev->stats; 124 struct ipa_priv *priv = netdev_priv(netdev); 125 struct ipa_endpoint *endpoint; 126 struct ipa *ipa = priv->ipa; 127 u32 skb_len = skb->len; 128 struct device *dev; 129 int ret; 130 131 if (!skb_len) 132 goto err_drop_skb; 133 134 endpoint = ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]; 135 if (endpoint->config.qmap && skb->protocol != htons(ETH_P_MAP)) 136 goto err_drop_skb; 137 138 /* The hardware must be powered for us to transmit, so if we're not 139 * ready we want the network stack to stop queueing until power is 140 * ACTIVE. Once runtime resume has completed, we inform the network 141 * stack it's OK to try transmitting again. 142 * 143 * We learn from pm_runtime_get() whether the hardware is powered. 144 * If it was not, powering up is either started or already underway. 145 * And in that case we want to disable queueing, expecting it to be 146 * re-enabled once power is ACTIVE. But runtime PM and network 147 * transmit run concurrently, and if we're not careful the requests 148 * to stop and start queueing could occur in the wrong order. 149 * 150 * For that reason we *always* stop queueing here, *before* the call 151 * to pm_runtime_get(). If we determine here that power is ACTIVE, 152 * we restart queueing before transmitting the SKB. Otherwise 153 * queueing will eventually be enabled after resume completes. 154 */ 155 netif_stop_queue(netdev); 156 157 dev = ipa->dev; 158 ret = pm_runtime_get(dev); 159 if (ret < 1) { 160 /* If a resume won't happen, just drop the packet */ 161 if (ret < 0 && ret != -EINPROGRESS) { 162 netif_wake_queue(netdev); 163 pm_runtime_put_noidle(dev); 164 goto err_drop_skb; 165 } 166 167 pm_runtime_put_noidle(dev); 168 169 return NETDEV_TX_BUSY; 170 } 171 172 netif_wake_queue(netdev); 173 174 ret = ipa_endpoint_skb_tx(endpoint, skb); 175 176 (void)pm_runtime_put_autosuspend(dev); 177 178 if (ret) { 179 if (ret != -E2BIG) 180 return NETDEV_TX_BUSY; 181 goto err_drop_skb; 182 } 183 184 stats->tx_packets++; 185 stats->tx_bytes += skb_len; 186 187 return NETDEV_TX_OK; 188 189 err_drop_skb: 190 dev_kfree_skb_any(skb); 191 stats->tx_dropped++; 192 193 return NETDEV_TX_OK; 194 } 195 196 void ipa_modem_skb_rx(struct net_device *netdev, struct sk_buff *skb) 197 { 198 struct net_device_stats *stats = &netdev->stats; 199 200 if (skb) { 201 skb->dev = netdev; 202 skb->protocol = htons(ETH_P_MAP); 203 stats->rx_packets++; 204 stats->rx_bytes += skb->len; 205 206 (void)netif_receive_skb(skb); 207 } else { 208 stats->rx_dropped++; 209 } 210 } 211 212 static const struct net_device_ops ipa_modem_ops = { 213 .ndo_open = ipa_open, 214 .ndo_stop = ipa_stop, 215 .ndo_start_xmit = ipa_start_xmit, 216 }; 217 218 /** ipa_modem_netdev_setup() - netdev setup function for the modem */ 219 static void ipa_modem_netdev_setup(struct net_device *netdev) 220 { 221 netdev->netdev_ops = &ipa_modem_ops; 222 223 netdev->header_ops = NULL; 224 netdev->type = ARPHRD_RAWIP; 225 netdev->hard_header_len = 0; 226 netdev->min_header_len = ETH_HLEN; 227 netdev->min_mtu = ETH_MIN_MTU; 228 netdev->max_mtu = IPA_MTU; 229 netdev->mtu = netdev->max_mtu; 230 netdev->addr_len = 0; 231 netdev->tx_queue_len = DEFAULT_TX_QUEUE_LEN; 232 netdev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); 233 netdev->priv_flags |= IFF_TX_SKB_SHARING; 234 eth_broadcast_addr(netdev->broadcast); 235 236 /* The endpoint is configured for QMAP */ 237 netdev->needed_headroom = sizeof(struct rmnet_map_header); 238 netdev->needed_tailroom = IPA_NETDEV_TAILROOM; 239 netdev->watchdog_timeo = IPA_NETDEV_TIMEOUT * HZ; 240 netdev->hw_features = NETIF_F_SG; 241 } 242 243 /** ipa_modem_suspend() - suspend callback 244 * @netdev: Network device 245 * 246 * Suspend the modem's endpoints. 247 */ 248 void ipa_modem_suspend(struct net_device *netdev) 249 { 250 struct ipa_priv *priv; 251 252 if (!(netdev->flags & IFF_UP)) 253 return; 254 255 priv = netdev_priv(netdev); 256 ipa_endpoint_suspend_one(priv->rx); 257 ipa_endpoint_suspend_one(priv->tx); 258 } 259 260 /** 261 * ipa_modem_wake_queue_work() - enable modem netdev queue 262 * @work: Work structure 263 * 264 * Re-enable transmit on the modem network device. This is called 265 * in (power management) work queue context, scheduled when resuming 266 * the modem. We can't enable the queue directly in ipa_modem_resume() 267 * because transmits restart the instant the queue is awakened; but the 268 * device power state won't be ACTIVE until *after* ipa_modem_resume() 269 * returns. 270 */ 271 static void ipa_modem_wake_queue_work(struct work_struct *work) 272 { 273 struct ipa_priv *priv = container_of(work, struct ipa_priv, work); 274 275 netif_wake_queue(priv->tx->netdev); 276 } 277 278 /** ipa_modem_resume() - resume callback for runtime_pm 279 * @dev: pointer to device 280 * 281 * Resume the modem's endpoints. 282 */ 283 void ipa_modem_resume(struct net_device *netdev) 284 { 285 struct ipa_priv *priv; 286 287 if (!(netdev->flags & IFF_UP)) 288 return; 289 290 priv = netdev_priv(netdev); 291 ipa_endpoint_resume_one(priv->tx); 292 ipa_endpoint_resume_one(priv->rx); 293 294 /* Arrange for the TX queue to be restarted */ 295 (void)queue_pm_work(&priv->work); 296 } 297 298 int ipa_modem_start(struct ipa *ipa) 299 { 300 enum ipa_modem_state state; 301 struct net_device *netdev; 302 struct ipa_priv *priv; 303 int ret; 304 305 /* Only attempt to start the modem if it's stopped */ 306 state = atomic_cmpxchg(&ipa->modem_state, IPA_MODEM_STATE_STOPPED, 307 IPA_MODEM_STATE_STARTING); 308 309 /* Silently ignore attempts when running, or when changing state */ 310 if (state != IPA_MODEM_STATE_STOPPED) 311 return 0; 312 313 netdev = alloc_netdev(sizeof(struct ipa_priv), IPA_NETDEV_NAME, 314 NET_NAME_UNKNOWN, ipa_modem_netdev_setup); 315 if (!netdev) { 316 ret = -ENOMEM; 317 goto out_set_state; 318 } 319 320 SET_NETDEV_DEV(netdev, ipa->dev); 321 priv = netdev_priv(netdev); 322 priv->ipa = ipa; 323 priv->tx = ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]; 324 priv->rx = ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]; 325 INIT_WORK(&priv->work, ipa_modem_wake_queue_work); 326 327 priv->tx->netdev = netdev; 328 priv->rx->netdev = netdev; 329 330 ipa->modem_netdev = netdev; 331 332 ret = register_netdev(netdev); 333 if (ret) { 334 ipa->modem_netdev = NULL; 335 priv->rx->netdev = NULL; 336 priv->tx->netdev = NULL; 337 338 free_netdev(netdev); 339 } 340 341 out_set_state: 342 if (ret) 343 atomic_set(&ipa->modem_state, IPA_MODEM_STATE_STOPPED); 344 else 345 atomic_set(&ipa->modem_state, IPA_MODEM_STATE_RUNNING); 346 smp_mb__after_atomic(); 347 348 return ret; 349 } 350 351 int ipa_modem_stop(struct ipa *ipa) 352 { 353 struct net_device *netdev = ipa->modem_netdev; 354 enum ipa_modem_state state; 355 356 /* Only attempt to stop the modem if it's running */ 357 state = atomic_cmpxchg(&ipa->modem_state, IPA_MODEM_STATE_RUNNING, 358 IPA_MODEM_STATE_STOPPING); 359 360 /* Silently ignore attempts when already stopped */ 361 if (state == IPA_MODEM_STATE_STOPPED) 362 return 0; 363 364 /* If we're somewhere between stopped and starting, we're busy */ 365 if (state != IPA_MODEM_STATE_RUNNING) 366 return -EBUSY; 367 368 /* Clean up the netdev and endpoints if it was started */ 369 if (netdev) { 370 struct ipa_priv *priv = netdev_priv(netdev); 371 372 cancel_work_sync(&priv->work); 373 /* If it was opened, stop it first */ 374 if (netdev->flags & IFF_UP) 375 (void)ipa_stop(netdev); 376 unregister_netdev(netdev); 377 378 ipa->modem_netdev = NULL; 379 priv->rx->netdev = NULL; 380 priv->tx->netdev = NULL; 381 382 free_netdev(netdev); 383 } 384 385 atomic_set(&ipa->modem_state, IPA_MODEM_STATE_STOPPED); 386 smp_mb__after_atomic(); 387 388 return 0; 389 } 390 391 /* Treat a "clean" modem stop the same as a crash */ 392 static void ipa_modem_crashed(struct ipa *ipa) 393 { 394 struct device *dev = ipa->dev; 395 int ret; 396 397 /* Prevent the modem from triggering a call to ipa_setup() */ 398 ipa_smp2p_irq_disable_setup(ipa); 399 400 ret = pm_runtime_get_sync(dev); 401 if (ret < 0) { 402 dev_err(dev, "error %d getting power to handle crash\n", ret); 403 goto out_power_put; 404 } 405 406 ipa_endpoint_modem_pause_all(ipa, true); 407 408 ipa_endpoint_modem_hol_block_clear_all(ipa); 409 410 ipa_table_reset(ipa, true); 411 412 ret = ipa_table_hash_flush(ipa); 413 if (ret) 414 dev_err(dev, "error %d flushing hash caches\n", ret); 415 416 ret = ipa_endpoint_modem_exception_reset_all(ipa); 417 if (ret) 418 dev_err(dev, "error %d resetting exception endpoint\n", ret); 419 420 ipa_endpoint_modem_pause_all(ipa, false); 421 422 ret = ipa_modem_stop(ipa); 423 if (ret) 424 dev_err(dev, "error %d stopping modem\n", ret); 425 426 /* Now prepare for the next modem boot */ 427 ret = ipa_mem_zero_modem(ipa); 428 if (ret) 429 dev_err(dev, "error %d zeroing modem memory regions\n", ret); 430 431 out_power_put: 432 (void)pm_runtime_put_autosuspend(dev); 433 } 434 435 static int ipa_modem_notify(struct notifier_block *nb, unsigned long action, 436 void *data) 437 { 438 struct ipa *ipa = container_of(nb, struct ipa, nb); 439 struct qcom_ssr_notify_data *notify_data = data; 440 struct device *dev = ipa->dev; 441 442 switch (action) { 443 case QCOM_SSR_BEFORE_POWERUP: 444 dev_info(dev, "received modem starting event\n"); 445 ipa_uc_power(ipa); 446 ipa_smp2p_notify_reset(ipa); 447 break; 448 449 case QCOM_SSR_AFTER_POWERUP: 450 dev_info(dev, "received modem running event\n"); 451 break; 452 453 case QCOM_SSR_BEFORE_SHUTDOWN: 454 dev_info(dev, "received modem %s event\n", 455 notify_data->crashed ? "crashed" : "stopping"); 456 if (ipa->setup_complete) 457 ipa_modem_crashed(ipa); 458 break; 459 460 case QCOM_SSR_AFTER_SHUTDOWN: 461 dev_info(dev, "received modem offline event\n"); 462 break; 463 464 default: 465 dev_err(dev, "received unrecognized event %lu\n", action); 466 break; 467 } 468 469 return NOTIFY_OK; 470 } 471 472 int ipa_modem_config(struct ipa *ipa) 473 { 474 void *notifier; 475 476 ipa->nb.notifier_call = ipa_modem_notify; 477 478 notifier = qcom_register_ssr_notifier("mpss", &ipa->nb); 479 if (IS_ERR(notifier)) 480 return PTR_ERR(notifier); 481 482 ipa->notifier = notifier; 483 484 return 0; 485 } 486 487 void ipa_modem_deconfig(struct ipa *ipa) 488 { 489 struct device *dev = ipa->dev; 490 int ret; 491 492 ret = qcom_unregister_ssr_notifier(ipa->notifier, &ipa->nb); 493 if (ret) 494 dev_err(dev, "error %d unregistering notifier", ret); 495 496 ipa->notifier = NULL; 497 memset(&ipa->nb, 0, sizeof(ipa->nb)); 498 } 499