1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2021 Intel Corporation 4 */ 5 6 #include <linux/etherdevice.h> 7 #include <linux/netdevice.h> 8 #include <linux/ieee80211.h> 9 #include <linux/rtnetlink.h> 10 #include <linux/module.h> 11 #include <linux/moduleparam.h> 12 #include <linux/mei_cl_bus.h> 13 #include <linux/rcupdate.h> 14 #include <linux/debugfs.h> 15 #include <linux/skbuff.h> 16 #include <linux/wait.h> 17 #include <linux/slab.h> 18 #include <linux/mm.h> 19 20 #include <net/cfg80211.h> 21 22 #include "internal.h" 23 #include "iwl-mei.h" 24 #include "trace.h" 25 #include "trace-data.h" 26 #include "sap.h" 27 28 MODULE_DESCRIPTION("The Intel(R) wireless / CSME firmware interface"); 29 MODULE_LICENSE("GPL"); 30 31 #define MEI_WLAN_UUID UUID_LE(0x13280904, 0x7792, 0x4fcb, \ 32 0xa1, 0xaa, 0x5e, 0x70, 0xcb, 0xb1, 0xe8, 0x65) 33 34 /* 35 * Since iwlwifi calls iwlmei without any context, hold a pointer to the 36 * mei_cl_device structure here. 37 * Define a mutex that will synchronize all the flows between iwlwifi and 38 * iwlmei. 39 * Note that iwlmei can't have several instances, so it ok to have static 40 * variables here. 41 */ 42 static struct mei_cl_device *iwl_mei_global_cldev; 43 static DEFINE_MUTEX(iwl_mei_mutex); 44 static unsigned long iwl_mei_status; 45 46 enum iwl_mei_status_bits { 47 IWL_MEI_STATUS_SAP_CONNECTED, 48 }; 49 50 bool iwl_mei_is_connected(void) 51 { 52 return test_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status); 53 } 54 EXPORT_SYMBOL_GPL(iwl_mei_is_connected); 55 56 #define SAP_VERSION 3 57 #define SAP_CONTROL_BLOCK_ID 0x21504153 /* SAP! in ASCII */ 58 59 struct iwl_sap_q_ctrl_blk { 60 __le32 wr_ptr; 61 __le32 rd_ptr; 62 __le32 size; 63 }; 64 65 enum iwl_sap_q_idx { 66 SAP_QUEUE_IDX_NOTIF = 0, 67 SAP_QUEUE_IDX_DATA, 68 SAP_QUEUE_IDX_MAX, 69 }; 70 71 struct iwl_sap_dir { 72 __le32 reserved; 73 struct iwl_sap_q_ctrl_blk q_ctrl_blk[SAP_QUEUE_IDX_MAX]; 74 }; 75 76 enum iwl_sap_dir_idx { 77 SAP_DIRECTION_HOST_TO_ME = 0, 78 SAP_DIRECTION_ME_TO_HOST, 79 SAP_DIRECTION_MAX, 80 }; 81 82 struct iwl_sap_shared_mem_ctrl_blk { 83 __le32 sap_id; 84 __le32 size; 85 struct iwl_sap_dir dir[SAP_DIRECTION_MAX]; 86 }; 87 88 /* 89 * The shared area has the following layout: 90 * 91 * +-----------------------------------+ 92 * |struct iwl_sap_shared_mem_ctrl_blk | 93 * +-----------------------------------+ 94 * |Host -> ME data queue | 95 * +-----------------------------------+ 96 * |Host -> ME notif queue | 97 * +-----------------------------------+ 98 * |ME -> Host data queue | 99 * +-----------------------------------+ 100 * |ME -> host notif queue | 101 * +-----------------------------------+ 102 * |SAP control block id (SAP!) | 103 * +-----------------------------------+ 104 */ 105 106 #define SAP_H2M_DATA_Q_SZ 48256 107 #define SAP_M2H_DATA_Q_SZ 24128 108 #define SAP_H2M_NOTIF_Q_SZ 2240 109 #define SAP_M2H_NOTIF_Q_SZ 62720 110 111 #define _IWL_MEI_SAP_SHARED_MEM_SZ \ 112 (sizeof(struct iwl_sap_shared_mem_ctrl_blk) + \ 113 SAP_H2M_DATA_Q_SZ + SAP_H2M_NOTIF_Q_SZ + \ 114 SAP_M2H_DATA_Q_SZ + SAP_M2H_NOTIF_Q_SZ + 4) 115 116 #define IWL_MEI_SAP_SHARED_MEM_SZ \ 117 (roundup(_IWL_MEI_SAP_SHARED_MEM_SZ, PAGE_SIZE)) 118 119 struct iwl_mei_shared_mem_ptrs { 120 struct iwl_sap_shared_mem_ctrl_blk *ctrl; 121 void *q_head[SAP_DIRECTION_MAX][SAP_QUEUE_IDX_MAX]; 122 }; 123 124 struct iwl_mei_filters { 125 struct rcu_head rcu_head; 126 struct iwl_sap_oob_filters filters; 127 }; 128 129 /** 130 * struct iwl_mei - holds the private date for iwl_mei 131 * 132 * @get_nvm_wq: the wait queue for the get_nvm flow 133 * @send_csa_msg_wk: used to defer the transmission of the CHECK_SHARED_AREA 134 * message. Used so that we can send CHECK_SHARED_AREA from atomic 135 * contexts. 136 * @get_ownership_wq: the wait queue for the get_ownership_flow 137 * @shared_mem: the memory that is shared between CSME and the host 138 * @cldev: the pointer to the MEI client device 139 * @nvm: the data returned by the CSME for the NVM 140 * @filters: the filters sent by CSME 141 * @got_ownership: true if we own the device 142 * @amt_enabled: true if CSME has wireless enabled 143 * @csa_throttled: when true, we can't send CHECK_SHARED_AREA over the MEI 144 * bus, but rather need to wait until send_csa_msg_wk runs 145 * @csme_taking_ownership: true when CSME is taking ownership. Used to remember 146 * to send CSME_OWNERSHIP_CONFIRMED when the driver completes its down 147 * flow. 148 * @csa_throttle_end_wk: used when &csa_throttled is true 149 * @data_q_lock: protects the access to the data queues which are 150 * accessed without the mutex. 151 * @sap_seq_no: the sequence number for the SAP messages 152 * @seq_no: the sequence number for the SAP messages 153 * @dbgfs_dir: the debugfs dir entry 154 */ 155 struct iwl_mei { 156 wait_queue_head_t get_nvm_wq; 157 struct work_struct send_csa_msg_wk; 158 wait_queue_head_t get_ownership_wq; 159 struct iwl_mei_shared_mem_ptrs shared_mem; 160 struct mei_cl_device *cldev; 161 struct iwl_mei_nvm *nvm; 162 struct iwl_mei_filters __rcu *filters; 163 bool got_ownership; 164 bool amt_enabled; 165 bool csa_throttled; 166 bool csme_taking_ownership; 167 struct delayed_work csa_throttle_end_wk; 168 spinlock_t data_q_lock; 169 170 atomic_t sap_seq_no; 171 atomic_t seq_no; 172 173 struct dentry *dbgfs_dir; 174 }; 175 176 /** 177 * iwl_mei_cache - cache for the parameters from iwlwifi 178 * @ops: Callbacks to iwlwifi. 179 * @netdev: The netdev that will be used to transmit / receive packets. 180 * @conn_info: The connection info message triggered by iwlwifi's association. 181 * @power_limit: pointer to an array of 10 elements (le16) represents the power 182 * restrictions per chain. 183 * @rf_kill: rf kill state. 184 * @mcc: MCC info 185 * @mac_address: interface MAC address. 186 * @nvm_address: NVM MAC address. 187 * @priv: A pointer to iwlwifi. 188 * 189 * This used to cache the configurations coming from iwlwifi's way. The data 190 * is cached here so that we can buffer the configuration even if we don't have 191 * a bind from the mei bus and hence, on iwl_mei structure. 192 */ 193 static struct { 194 const struct iwl_mei_ops *ops; 195 struct net_device __rcu *netdev; 196 const struct iwl_sap_notif_connection_info *conn_info; 197 const __le16 *power_limit; 198 u32 rf_kill; 199 u16 mcc; 200 u8 mac_address[6]; 201 u8 nvm_address[6]; 202 void *priv; 203 } iwl_mei_cache = { 204 .rf_kill = SAP_HW_RFKILL_DEASSERTED | SAP_SW_RFKILL_DEASSERTED 205 }; 206 207 static void iwl_mei_free_shared_mem(struct mei_cl_device *cldev) 208 { 209 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); 210 211 if (mei_cldev_dma_unmap(cldev)) 212 dev_err(&cldev->dev, "Coudln't unmap the shared mem properly\n"); 213 memset(&mei->shared_mem, 0, sizeof(mei->shared_mem)); 214 } 215 216 #define HBM_DMA_BUF_ID_WLAN 1 217 218 static int iwl_mei_alloc_shared_mem(struct mei_cl_device *cldev) 219 { 220 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); 221 struct iwl_mei_shared_mem_ptrs *mem = &mei->shared_mem; 222 223 mem->ctrl = mei_cldev_dma_map(cldev, HBM_DMA_BUF_ID_WLAN, 224 IWL_MEI_SAP_SHARED_MEM_SZ); 225 226 if (IS_ERR(mem->ctrl)) { 227 int ret = PTR_ERR(mem->ctrl); 228 229 dev_err(&cldev->dev, "Couldn't allocate the shared memory: %d\n", 230 ret); 231 mem->ctrl = NULL; 232 233 return ret; 234 } 235 236 memset(mem->ctrl, 0, IWL_MEI_SAP_SHARED_MEM_SZ); 237 238 return 0; 239 } 240 241 static void iwl_mei_init_shared_mem(struct iwl_mei *mei) 242 { 243 struct iwl_mei_shared_mem_ptrs *mem = &mei->shared_mem; 244 struct iwl_sap_dir *h2m; 245 struct iwl_sap_dir *m2h; 246 int dir, queue; 247 u8 *q_head; 248 249 mem->ctrl->sap_id = cpu_to_le32(SAP_CONTROL_BLOCK_ID); 250 251 mem->ctrl->size = cpu_to_le32(sizeof(*mem->ctrl)); 252 253 h2m = &mem->ctrl->dir[SAP_DIRECTION_HOST_TO_ME]; 254 m2h = &mem->ctrl->dir[SAP_DIRECTION_ME_TO_HOST]; 255 256 h2m->q_ctrl_blk[SAP_QUEUE_IDX_DATA].size = 257 cpu_to_le32(SAP_H2M_DATA_Q_SZ); 258 h2m->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF].size = 259 cpu_to_le32(SAP_H2M_NOTIF_Q_SZ); 260 m2h->q_ctrl_blk[SAP_QUEUE_IDX_DATA].size = 261 cpu_to_le32(SAP_M2H_DATA_Q_SZ); 262 m2h->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF].size = 263 cpu_to_le32(SAP_M2H_NOTIF_Q_SZ); 264 265 /* q_head points to the start of the first queue */ 266 q_head = (void *)(mem->ctrl + 1); 267 268 /* Initialize the queue heads */ 269 for (dir = 0; dir < SAP_DIRECTION_MAX; dir++) { 270 for (queue = 0; queue < SAP_QUEUE_IDX_MAX; queue++) { 271 mem->q_head[dir][queue] = q_head; 272 q_head += 273 le32_to_cpu(mem->ctrl->dir[dir].q_ctrl_blk[queue].size); 274 } 275 } 276 277 *(__le32 *)q_head = cpu_to_le32(SAP_CONTROL_BLOCK_ID); 278 } 279 280 static ssize_t iwl_mei_write_cyclic_buf(struct mei_cl_device *cldev, 281 struct iwl_sap_q_ctrl_blk *notif_q, 282 u8 *q_head, 283 const struct iwl_sap_hdr *hdr) 284 { 285 u32 rd = le32_to_cpu(READ_ONCE(notif_q->rd_ptr)); 286 u32 wr = le32_to_cpu(READ_ONCE(notif_q->wr_ptr)); 287 u32 q_sz = le32_to_cpu(notif_q->size); 288 size_t room_in_buf; 289 size_t tx_sz = sizeof(*hdr) + le16_to_cpu(hdr->len); 290 291 if (rd > q_sz || wr > q_sz) { 292 dev_err(&cldev->dev, 293 "Pointers are past the end of the buffer\n"); 294 return -EINVAL; 295 } 296 297 room_in_buf = wr >= rd ? q_sz - wr + rd : rd - wr; 298 299 /* we don't have enough room for the data to write */ 300 if (room_in_buf < tx_sz) { 301 dev_err(&cldev->dev, 302 "Not enough room in the buffer\n"); 303 return -ENOSPC; 304 } 305 306 if (wr + tx_sz <= q_sz) { 307 memcpy(q_head + wr, hdr, tx_sz); 308 } else { 309 memcpy(q_head + wr, hdr, q_sz - wr); 310 memcpy(q_head, (u8 *)hdr + q_sz - wr, tx_sz - (q_sz - wr)); 311 } 312 313 WRITE_ONCE(notif_q->wr_ptr, cpu_to_le32((wr + tx_sz) % q_sz)); 314 return 0; 315 } 316 317 static bool iwl_mei_host_to_me_data_pending(const struct iwl_mei *mei) 318 { 319 struct iwl_sap_q_ctrl_blk *notif_q; 320 struct iwl_sap_dir *dir; 321 322 dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_HOST_TO_ME]; 323 notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_DATA]; 324 325 if (READ_ONCE(notif_q->wr_ptr) != READ_ONCE(notif_q->rd_ptr)) 326 return true; 327 328 notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF]; 329 return READ_ONCE(notif_q->wr_ptr) != READ_ONCE(notif_q->rd_ptr); 330 } 331 332 static int iwl_mei_send_check_shared_area(struct mei_cl_device *cldev) 333 { 334 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); 335 struct iwl_sap_me_msg_start msg = { 336 .hdr.type = cpu_to_le32(SAP_ME_MSG_CHECK_SHARED_AREA), 337 .hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->seq_no)), 338 }; 339 int ret; 340 341 lockdep_assert_held(&iwl_mei_mutex); 342 343 if (mei->csa_throttled) 344 return 0; 345 346 trace_iwlmei_me_msg(&msg.hdr, true); 347 ret = mei_cldev_send(cldev, (void *)&msg, sizeof(msg)); 348 if (ret != sizeof(msg)) { 349 dev_err(&cldev->dev, 350 "failed to send the SAP_ME_MSG_CHECK_SHARED_AREA message %d\n", 351 ret); 352 return ret; 353 } 354 355 mei->csa_throttled = true; 356 357 schedule_delayed_work(&mei->csa_throttle_end_wk, 358 msecs_to_jiffies(100)); 359 360 return 0; 361 } 362 363 static void iwl_mei_csa_throttle_end_wk(struct work_struct *wk) 364 { 365 struct iwl_mei *mei = 366 container_of(wk, struct iwl_mei, csa_throttle_end_wk.work); 367 368 mutex_lock(&iwl_mei_mutex); 369 370 mei->csa_throttled = false; 371 372 if (iwl_mei_host_to_me_data_pending(mei)) 373 iwl_mei_send_check_shared_area(mei->cldev); 374 375 mutex_unlock(&iwl_mei_mutex); 376 } 377 378 static int iwl_mei_send_sap_msg_payload(struct mei_cl_device *cldev, 379 struct iwl_sap_hdr *hdr) 380 { 381 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); 382 struct iwl_sap_q_ctrl_blk *notif_q; 383 struct iwl_sap_dir *dir; 384 void *q_head; 385 int ret; 386 387 lockdep_assert_held(&iwl_mei_mutex); 388 389 if (!mei->shared_mem.ctrl) { 390 dev_err(&cldev->dev, 391 "No shared memory, can't send any SAP message\n"); 392 return -EINVAL; 393 } 394 395 if (!iwl_mei_is_connected()) { 396 dev_err(&cldev->dev, 397 "Can't send a SAP message if we're not connected\n"); 398 return -ENODEV; 399 } 400 401 hdr->seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no)); 402 dev_dbg(&cldev->dev, "Sending %d\n", hdr->type); 403 404 dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_HOST_TO_ME]; 405 notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF]; 406 q_head = mei->shared_mem.q_head[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_NOTIF]; 407 ret = iwl_mei_write_cyclic_buf(q_head, notif_q, q_head, hdr); 408 409 if (ret < 0) 410 return ret; 411 412 trace_iwlmei_sap_cmd(hdr, true); 413 414 return iwl_mei_send_check_shared_area(cldev); 415 } 416 417 void iwl_mei_add_data_to_ring(struct sk_buff *skb, bool cb_tx) 418 { 419 struct iwl_sap_q_ctrl_blk *notif_q; 420 struct iwl_sap_dir *dir; 421 struct iwl_mei *mei; 422 size_t room_in_buf; 423 size_t tx_sz; 424 size_t hdr_sz; 425 u32 q_sz; 426 u32 rd; 427 u32 wr; 428 void *q_head; 429 430 if (!iwl_mei_global_cldev) 431 return; 432 433 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); 434 435 /* 436 * We access this path for Rx packets (the more common case) 437 * and from Tx path when we send DHCP packets, the latter is 438 * very unlikely. 439 * Take the lock already here to make sure we see that remove() 440 * might have cleared the IWL_MEI_STATUS_SAP_CONNECTED bit. 441 */ 442 spin_lock_bh(&mei->data_q_lock); 443 444 if (!iwl_mei_is_connected()) { 445 spin_unlock_bh(&mei->data_q_lock); 446 return; 447 } 448 449 /* 450 * We are in a RCU critical section and the remove from the CSME bus 451 * which would free this memory waits for the readers to complete (this 452 * is done in netdev_rx_handler_unregister). 453 */ 454 dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_HOST_TO_ME]; 455 notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_DATA]; 456 q_head = mei->shared_mem.q_head[SAP_DIRECTION_HOST_TO_ME][SAP_QUEUE_IDX_DATA]; 457 458 rd = le32_to_cpu(READ_ONCE(notif_q->rd_ptr)); 459 wr = le32_to_cpu(READ_ONCE(notif_q->wr_ptr)); 460 q_sz = le32_to_cpu(notif_q->size); 461 hdr_sz = cb_tx ? sizeof(struct iwl_sap_cb_data) : 462 sizeof(struct iwl_sap_hdr); 463 tx_sz = skb->len + hdr_sz; 464 465 if (rd > q_sz || wr > q_sz) { 466 dev_err(&mei->cldev->dev, 467 "can't write the data: pointers are past the end of the buffer\n"); 468 goto out; 469 } 470 471 room_in_buf = wr >= rd ? q_sz - wr + rd : rd - wr; 472 473 /* we don't have enough room for the data to write */ 474 if (room_in_buf < tx_sz) { 475 dev_err(&mei->cldev->dev, 476 "Not enough room in the buffer for this data\n"); 477 goto out; 478 } 479 480 if (skb_headroom(skb) < hdr_sz) { 481 dev_err(&mei->cldev->dev, 482 "Not enough headroom in the skb to write the SAP header\n"); 483 goto out; 484 } 485 486 if (cb_tx) { 487 struct iwl_sap_cb_data *cb_hdr = skb_push(skb, sizeof(*cb_hdr)); 488 489 cb_hdr->hdr.type = cpu_to_le16(SAP_MSG_CB_DATA_PACKET); 490 cb_hdr->hdr.len = cpu_to_le16(skb->len - sizeof(cb_hdr->hdr)); 491 cb_hdr->hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no)); 492 cb_hdr->to_me_filt_status = cpu_to_le32(BIT(CB_TX_DHCP_FILT_IDX)); 493 cb_hdr->data_len = cpu_to_le32(skb->len - sizeof(*cb_hdr)); 494 trace_iwlmei_sap_data(skb, IWL_SAP_TX_DHCP); 495 } else { 496 struct iwl_sap_hdr *hdr = skb_push(skb, sizeof(*hdr)); 497 498 hdr->type = cpu_to_le16(SAP_MSG_DATA_PACKET); 499 hdr->len = cpu_to_le16(skb->len - sizeof(*hdr)); 500 hdr->seq_num = cpu_to_le32(atomic_inc_return(&mei->sap_seq_no)); 501 trace_iwlmei_sap_data(skb, IWL_SAP_TX_DATA_FROM_AIR); 502 } 503 504 if (wr + tx_sz <= q_sz) { 505 skb_copy_bits(skb, 0, q_head + wr, tx_sz); 506 } else { 507 skb_copy_bits(skb, 0, q_head + wr, q_sz - wr); 508 skb_copy_bits(skb, q_sz - wr, q_head, tx_sz - (q_sz - wr)); 509 } 510 511 WRITE_ONCE(notif_q->wr_ptr, cpu_to_le32((wr + tx_sz) % q_sz)); 512 513 out: 514 spin_unlock_bh(&mei->data_q_lock); 515 } 516 517 static int 518 iwl_mei_send_sap_msg(struct mei_cl_device *cldev, u16 type) 519 { 520 struct iwl_sap_hdr msg = { 521 .type = cpu_to_le16(type), 522 }; 523 524 return iwl_mei_send_sap_msg_payload(cldev, &msg); 525 } 526 527 static void iwl_mei_send_csa_msg_wk(struct work_struct *wk) 528 { 529 struct iwl_mei *mei = 530 container_of(wk, struct iwl_mei, send_csa_msg_wk); 531 532 if (!iwl_mei_is_connected()) 533 return; 534 535 mutex_lock(&iwl_mei_mutex); 536 537 iwl_mei_send_check_shared_area(mei->cldev); 538 539 mutex_unlock(&iwl_mei_mutex); 540 } 541 542 /* Called in a RCU read critical section from netif_receive_skb */ 543 static rx_handler_result_t iwl_mei_rx_handler(struct sk_buff **pskb) 544 { 545 struct sk_buff *skb = *pskb; 546 struct iwl_mei *mei = 547 rcu_dereference(skb->dev->rx_handler_data); 548 struct iwl_mei_filters *filters = rcu_dereference(mei->filters); 549 bool rx_for_csme = false; 550 rx_handler_result_t res; 551 552 /* 553 * remove() unregisters this handler and synchronize_net, so this 554 * should never happen. 555 */ 556 if (!iwl_mei_is_connected()) { 557 dev_err(&mei->cldev->dev, 558 "Got an Rx packet, but we're not connected to SAP?\n"); 559 return RX_HANDLER_PASS; 560 } 561 562 if (filters) 563 res = iwl_mei_rx_filter(skb, &filters->filters, &rx_for_csme); 564 else 565 res = RX_HANDLER_PASS; 566 567 /* 568 * The data is already on the ring of the shared area, all we 569 * need to do is to tell the CSME firmware to check what we have 570 * there. 571 */ 572 if (rx_for_csme) 573 schedule_work(&mei->send_csa_msg_wk); 574 575 if (res != RX_HANDLER_PASS) { 576 trace_iwlmei_sap_data(skb, IWL_SAP_RX_DATA_DROPPED_FROM_AIR); 577 dev_kfree_skb(skb); 578 } 579 580 return res; 581 } 582 583 static void 584 iwl_mei_handle_rx_start_ok(struct mei_cl_device *cldev, 585 const struct iwl_sap_me_msg_start_ok *rsp, 586 ssize_t len) 587 { 588 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); 589 590 if (len != sizeof(*rsp)) { 591 dev_err(&cldev->dev, 592 "got invalid SAP_ME_MSG_START_OK from CSME firmware\n"); 593 dev_err(&cldev->dev, 594 "size is incorrect: %zd instead of %zu\n", 595 len, sizeof(*rsp)); 596 return; 597 } 598 599 if (rsp->supported_version != SAP_VERSION) { 600 dev_err(&cldev->dev, 601 "didn't get the expected version: got %d\n", 602 rsp->supported_version); 603 return; 604 } 605 606 mutex_lock(&iwl_mei_mutex); 607 set_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status); 608 /* wifi driver has registered already */ 609 if (iwl_mei_cache.ops) { 610 iwl_mei_send_sap_msg(mei->cldev, 611 SAP_MSG_NOTIF_WIFIDR_UP); 612 iwl_mei_cache.ops->sap_connected(iwl_mei_cache.priv); 613 } 614 615 mutex_unlock(&iwl_mei_mutex); 616 } 617 618 static void iwl_mei_handle_csme_filters(struct mei_cl_device *cldev, 619 const struct iwl_sap_csme_filters *filters) 620 { 621 struct iwl_mei *mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); 622 struct iwl_mei_filters *new_filters; 623 struct iwl_mei_filters *old_filters; 624 625 old_filters = 626 rcu_dereference_protected(mei->filters, 627 lockdep_is_held(&iwl_mei_mutex)); 628 629 new_filters = kzalloc(sizeof(*new_filters), GFP_KERNEL); 630 631 /* Copy the OOB filters */ 632 new_filters->filters = filters->filters; 633 634 rcu_assign_pointer(mei->filters, new_filters); 635 636 if (old_filters) 637 kfree_rcu(old_filters, rcu_head); 638 } 639 640 static void 641 iwl_mei_handle_conn_status(struct mei_cl_device *cldev, 642 const struct iwl_sap_notif_conn_status *status) 643 { 644 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); 645 struct iwl_mei_conn_info conn_info = { 646 .lp_state = le32_to_cpu(status->link_prot_state), 647 .ssid_len = le32_to_cpu(status->conn_info.ssid_len), 648 .channel = status->conn_info.channel, 649 .band = status->conn_info.band, 650 .auth_mode = le32_to_cpu(status->conn_info.auth_mode), 651 .pairwise_cipher = le32_to_cpu(status->conn_info.pairwise_cipher), 652 }; 653 654 if (!iwl_mei_cache.ops || 655 conn_info.ssid_len > ARRAY_SIZE(conn_info.ssid)) 656 return; 657 658 memcpy(conn_info.ssid, status->conn_info.ssid, conn_info.ssid_len); 659 ether_addr_copy(conn_info.bssid, status->conn_info.bssid); 660 661 iwl_mei_cache.ops->me_conn_status(iwl_mei_cache.priv, &conn_info); 662 663 /* 664 * Update the Rfkill state in case the host does not own the device: 665 * if we are in Link Protection, ask to not touch the device, else, 666 * unblock rfkill. 667 * If the host owns the device, inform the user space whether it can 668 * roam. 669 */ 670 if (mei->got_ownership) 671 iwl_mei_cache.ops->roaming_forbidden(iwl_mei_cache.priv, 672 status->link_prot_state); 673 else 674 iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, 675 status->link_prot_state); 676 } 677 678 static void iwl_mei_set_init_conf(struct iwl_mei *mei) 679 { 680 struct iwl_sap_notif_host_link_up link_msg = { 681 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_HOST_LINK_UP), 682 .hdr.len = cpu_to_le16(sizeof(link_msg) - sizeof(link_msg.hdr)), 683 }; 684 struct iwl_sap_notif_country_code mcc_msg = { 685 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_COUNTRY_CODE), 686 .hdr.len = cpu_to_le16(sizeof(mcc_msg) - sizeof(mcc_msg.hdr)), 687 .mcc = cpu_to_le16(iwl_mei_cache.mcc), 688 }; 689 struct iwl_sap_notif_sar_limits sar_msg = { 690 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_SAR_LIMITS), 691 .hdr.len = cpu_to_le16(sizeof(sar_msg) - sizeof(sar_msg.hdr)), 692 }; 693 struct iwl_sap_notif_host_nic_info nic_info_msg = { 694 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_NIC_INFO), 695 .hdr.len = cpu_to_le16(sizeof(nic_info_msg) - sizeof(nic_info_msg.hdr)), 696 }; 697 struct iwl_sap_msg_dw rfkill_msg = { 698 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_RADIO_STATE), 699 .hdr.len = cpu_to_le16(sizeof(rfkill_msg) - sizeof(rfkill_msg.hdr)), 700 .val = cpu_to_le32(iwl_mei_cache.rf_kill), 701 }; 702 703 iwl_mei_send_sap_msg(mei->cldev, SAP_MSG_NOTIF_WHO_OWNS_NIC); 704 705 if (iwl_mei_cache.conn_info) { 706 link_msg.conn_info = *iwl_mei_cache.conn_info; 707 iwl_mei_send_sap_msg_payload(mei->cldev, &link_msg.hdr); 708 } 709 710 iwl_mei_send_sap_msg_payload(mei->cldev, &mcc_msg.hdr); 711 712 if (iwl_mei_cache.power_limit) { 713 memcpy(sar_msg.sar_chain_info_table, iwl_mei_cache.power_limit, 714 sizeof(sar_msg.sar_chain_info_table)); 715 iwl_mei_send_sap_msg_payload(mei->cldev, &sar_msg.hdr); 716 } 717 718 ether_addr_copy(nic_info_msg.mac_address, iwl_mei_cache.mac_address); 719 ether_addr_copy(nic_info_msg.nvm_address, iwl_mei_cache.nvm_address); 720 iwl_mei_send_sap_msg_payload(mei->cldev, &nic_info_msg.hdr); 721 722 iwl_mei_send_sap_msg_payload(mei->cldev, &rfkill_msg.hdr); 723 } 724 725 static void iwl_mei_handle_amt_state(struct mei_cl_device *cldev, 726 const struct iwl_sap_msg_dw *dw) 727 { 728 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); 729 struct net_device *netdev; 730 731 /* 732 * First take rtnl and only then the mutex to avoid an ABBA 733 * with iwl_mei_set_netdev() 734 */ 735 rtnl_lock(); 736 mutex_lock(&iwl_mei_mutex); 737 738 netdev = rcu_dereference_protected(iwl_mei_cache.netdev, 739 lockdep_is_held(&iwl_mei_mutex)); 740 741 if (mei->amt_enabled == !!le32_to_cpu(dw->val)) 742 goto out; 743 744 mei->amt_enabled = dw->val; 745 746 if (mei->amt_enabled) { 747 if (netdev) 748 netdev_rx_handler_register(netdev, iwl_mei_rx_handler, mei); 749 750 iwl_mei_set_init_conf(mei); 751 } else { 752 if (iwl_mei_cache.ops) 753 iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false); 754 if (netdev) 755 netdev_rx_handler_unregister(netdev); 756 } 757 758 out: 759 mutex_unlock(&iwl_mei_mutex); 760 rtnl_unlock(); 761 } 762 763 static void iwl_mei_handle_nic_owner(struct mei_cl_device *cldev, 764 const struct iwl_sap_msg_dw *dw) 765 { 766 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); 767 768 mei->got_ownership = dw->val != cpu_to_le32(SAP_NIC_OWNER_ME); 769 } 770 771 static void iwl_mei_handle_can_release_ownership(struct mei_cl_device *cldev, 772 const void *payload) 773 { 774 /* We can get ownership and driver is registered, go ahead */ 775 if (iwl_mei_cache.ops) 776 iwl_mei_send_sap_msg(cldev, 777 SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP); 778 } 779 780 static void iwl_mei_handle_csme_taking_ownership(struct mei_cl_device *cldev, 781 const void *payload) 782 { 783 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); 784 785 dev_info(&cldev->dev, "CSME takes ownership\n"); 786 787 mei->got_ownership = false; 788 789 /* 790 * Remember to send CSME_OWNERSHIP_CONFIRMED when the wifi driver 791 * is finished taking the device down. 792 */ 793 mei->csme_taking_ownership = true; 794 795 if (iwl_mei_cache.ops) 796 iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, true); 797 } 798 799 static void iwl_mei_handle_nvm(struct mei_cl_device *cldev, 800 const struct iwl_sap_nvm *sap_nvm) 801 { 802 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); 803 const struct iwl_mei_nvm *mei_nvm = (const void *)sap_nvm; 804 int i; 805 806 kfree(mei->nvm); 807 mei->nvm = kzalloc(sizeof(*mei_nvm), GFP_KERNEL); 808 if (!mei->nvm) 809 return; 810 811 ether_addr_copy(mei->nvm->hw_addr, sap_nvm->hw_addr); 812 mei->nvm->n_hw_addrs = sap_nvm->n_hw_addrs; 813 mei->nvm->radio_cfg = le32_to_cpu(sap_nvm->radio_cfg); 814 mei->nvm->caps = le32_to_cpu(sap_nvm->caps); 815 mei->nvm->nvm_version = le32_to_cpu(sap_nvm->nvm_version); 816 817 for (i = 0; i < ARRAY_SIZE(mei->nvm->channels); i++) 818 mei->nvm->channels[i] = le32_to_cpu(sap_nvm->channels[i]); 819 820 wake_up_all(&mei->get_nvm_wq); 821 } 822 823 static void iwl_mei_handle_rx_host_own_req(struct mei_cl_device *cldev, 824 const struct iwl_sap_msg_dw *dw) 825 { 826 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); 827 828 /* 829 * This means that we can't use the wifi device right now, CSME is not 830 * ready to let us use it. 831 */ 832 if (!dw->val) { 833 dev_info(&cldev->dev, "Ownership req denied\n"); 834 return; 835 } 836 837 mei->got_ownership = true; 838 wake_up_all(&mei->get_ownership_wq); 839 840 iwl_mei_send_sap_msg(cldev, 841 SAP_MSG_NOTIF_HOST_OWNERSHIP_CONFIRMED); 842 843 /* We can now start the connection, unblock rfkill */ 844 if (iwl_mei_cache.ops) 845 iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false); 846 } 847 848 static void iwl_mei_handle_ping(struct mei_cl_device *cldev, 849 const struct iwl_sap_hdr *hdr) 850 { 851 iwl_mei_send_sap_msg(cldev, SAP_MSG_NOTIF_PONG); 852 } 853 854 static void iwl_mei_handle_sap_msg(struct mei_cl_device *cldev, 855 const struct iwl_sap_hdr *hdr) 856 { 857 u16 len = le16_to_cpu(hdr->len) + sizeof(*hdr); 858 u16 type = le16_to_cpu(hdr->type); 859 860 dev_dbg(&cldev->dev, 861 "Got a new SAP message: type %d, len %d, seq %d\n", 862 le16_to_cpu(hdr->type), len, 863 le32_to_cpu(hdr->seq_num)); 864 865 #define SAP_MSG_HANDLER(_cmd, _handler, _sz) \ 866 case SAP_MSG_NOTIF_ ## _cmd: \ 867 if (len < _sz) { \ 868 dev_err(&cldev->dev, \ 869 "Bad size for %d: %u < %u\n", \ 870 le16_to_cpu(hdr->type), \ 871 (unsigned int)len, \ 872 (unsigned int)_sz); \ 873 break; \ 874 } \ 875 mutex_lock(&iwl_mei_mutex); \ 876 _handler(cldev, (const void *)hdr); \ 877 mutex_unlock(&iwl_mei_mutex); \ 878 break 879 880 #define SAP_MSG_HANDLER_NO_LOCK(_cmd, _handler, _sz) \ 881 case SAP_MSG_NOTIF_ ## _cmd: \ 882 if (len < _sz) { \ 883 dev_err(&cldev->dev, \ 884 "Bad size for %d: %u < %u\n", \ 885 le16_to_cpu(hdr->type), \ 886 (unsigned int)len, \ 887 (unsigned int)_sz); \ 888 break; \ 889 } \ 890 _handler(cldev, (const void *)hdr); \ 891 break 892 893 #define SAP_MSG_HANDLER_NO_HANDLER(_cmd, _sz) \ 894 case SAP_MSG_NOTIF_ ## _cmd: \ 895 if (len < _sz) { \ 896 dev_err(&cldev->dev, \ 897 "Bad size for %d: %u < %u\n", \ 898 le16_to_cpu(hdr->type), \ 899 (unsigned int)len, \ 900 (unsigned int)_sz); \ 901 break; \ 902 } \ 903 break 904 905 switch (type) { 906 SAP_MSG_HANDLER(PING, iwl_mei_handle_ping, 0); 907 SAP_MSG_HANDLER(CSME_FILTERS, 908 iwl_mei_handle_csme_filters, 909 sizeof(struct iwl_sap_csme_filters)); 910 SAP_MSG_HANDLER(CSME_CONN_STATUS, 911 iwl_mei_handle_conn_status, 912 sizeof(struct iwl_sap_notif_conn_status)); 913 SAP_MSG_HANDLER_NO_LOCK(AMT_STATE, 914 iwl_mei_handle_amt_state, 915 sizeof(struct iwl_sap_msg_dw)); 916 SAP_MSG_HANDLER_NO_HANDLER(PONG, 0); 917 SAP_MSG_HANDLER(NVM, iwl_mei_handle_nvm, 918 sizeof(struct iwl_sap_nvm)); 919 SAP_MSG_HANDLER(CSME_REPLY_TO_HOST_OWNERSHIP_REQ, 920 iwl_mei_handle_rx_host_own_req, 921 sizeof(struct iwl_sap_msg_dw)); 922 SAP_MSG_HANDLER(NIC_OWNER, iwl_mei_handle_nic_owner, 923 sizeof(struct iwl_sap_msg_dw)); 924 SAP_MSG_HANDLER(CSME_CAN_RELEASE_OWNERSHIP, 925 iwl_mei_handle_can_release_ownership, 0); 926 SAP_MSG_HANDLER(CSME_TAKING_OWNERSHIP, 927 iwl_mei_handle_csme_taking_ownership, 0); 928 default: 929 /* 930 * This is not really an error, there are message that we decided 931 * to ignore, yet, it is useful to be able to leave a note if debug 932 * is enabled. 933 */ 934 dev_dbg(&cldev->dev, "Unsupported message: type %d, len %d\n", 935 le16_to_cpu(hdr->type), len); 936 } 937 938 #undef SAP_MSG_HANDLER 939 #undef SAP_MSG_HANDLER_NO_LOCK 940 } 941 942 static void iwl_mei_read_from_q(const u8 *q_head, u32 q_sz, 943 u32 *_rd, u32 wr, 944 void *_buf, u32 len) 945 { 946 u8 *buf = _buf; 947 u32 rd = *_rd; 948 949 if (rd + len <= q_sz) { 950 memcpy(buf, q_head + rd, len); 951 rd += len; 952 } else { 953 memcpy(buf, q_head + rd, q_sz - rd); 954 memcpy(buf + q_sz - rd, q_head, len - (q_sz - rd)); 955 rd = len - (q_sz - rd); 956 } 957 958 *_rd = rd; 959 } 960 961 #define QOS_HDR_IV_SNAP_LEN (sizeof(struct ieee80211_qos_hdr) + \ 962 IEEE80211_TKIP_IV_LEN + \ 963 sizeof(rfc1042_header) + ETH_TLEN) 964 965 static void iwl_mei_handle_sap_data(struct mei_cl_device *cldev, 966 const u8 *q_head, u32 q_sz, 967 u32 rd, u32 wr, ssize_t valid_rx_sz, 968 struct sk_buff_head *tx_skbs) 969 { 970 struct iwl_sap_hdr hdr; 971 struct net_device *netdev = 972 rcu_dereference_protected(iwl_mei_cache.netdev, 973 lockdep_is_held(&iwl_mei_mutex)); 974 975 if (!netdev) 976 return; 977 978 while (valid_rx_sz >= sizeof(hdr)) { 979 struct ethhdr *ethhdr; 980 unsigned char *data; 981 struct sk_buff *skb; 982 u16 len; 983 984 iwl_mei_read_from_q(q_head, q_sz, &rd, wr, &hdr, sizeof(hdr)); 985 valid_rx_sz -= sizeof(hdr); 986 len = le16_to_cpu(hdr.len); 987 988 if (valid_rx_sz < len) { 989 dev_err(&cldev->dev, 990 "Data queue is corrupted: valid data len %zd, len %d\n", 991 valid_rx_sz, len); 992 break; 993 } 994 995 if (len < sizeof(*ethhdr)) { 996 dev_err(&cldev->dev, 997 "Data len is smaller than an ethernet header? len = %d\n", 998 len); 999 } 1000 1001 valid_rx_sz -= len; 1002 1003 if (le16_to_cpu(hdr.type) != SAP_MSG_DATA_PACKET) { 1004 dev_err(&cldev->dev, "Unsupported Rx data: type %d, len %d\n", 1005 le16_to_cpu(hdr.type), len); 1006 continue; 1007 } 1008 1009 /* We need enough room for the WiFi header + SNAP + IV */ 1010 skb = netdev_alloc_skb(netdev, len + QOS_HDR_IV_SNAP_LEN); 1011 1012 skb_reserve(skb, QOS_HDR_IV_SNAP_LEN); 1013 ethhdr = skb_push(skb, sizeof(*ethhdr)); 1014 1015 iwl_mei_read_from_q(q_head, q_sz, &rd, wr, 1016 ethhdr, sizeof(*ethhdr)); 1017 len -= sizeof(*ethhdr); 1018 1019 skb_reset_mac_header(skb); 1020 skb_reset_network_header(skb); 1021 skb->protocol = ethhdr->h_proto; 1022 1023 data = skb_put(skb, len); 1024 iwl_mei_read_from_q(q_head, q_sz, &rd, wr, data, len); 1025 1026 /* 1027 * Enqueue the skb here so that it can be sent later when we 1028 * do not hold the mutex. TX'ing a packet with a mutex held is 1029 * possible, but it wouldn't be nice to forbid the TX path to 1030 * call any of iwlmei's functions, since every API from iwlmei 1031 * needs the mutex. 1032 */ 1033 __skb_queue_tail(tx_skbs, skb); 1034 } 1035 } 1036 1037 static void iwl_mei_handle_sap_rx_cmd(struct mei_cl_device *cldev, 1038 const u8 *q_head, u32 q_sz, 1039 u32 rd, u32 wr, ssize_t valid_rx_sz) 1040 { 1041 struct page *p = alloc_page(GFP_KERNEL); 1042 struct iwl_sap_hdr *hdr; 1043 1044 if (!p) 1045 return; 1046 1047 hdr = page_address(p); 1048 1049 while (valid_rx_sz >= sizeof(*hdr)) { 1050 u16 len; 1051 1052 iwl_mei_read_from_q(q_head, q_sz, &rd, wr, hdr, sizeof(*hdr)); 1053 valid_rx_sz -= sizeof(*hdr); 1054 len = le16_to_cpu(hdr->len); 1055 1056 if (valid_rx_sz < len) 1057 break; 1058 1059 iwl_mei_read_from_q(q_head, q_sz, &rd, wr, hdr + 1, len); 1060 1061 trace_iwlmei_sap_cmd(hdr, false); 1062 iwl_mei_handle_sap_msg(cldev, hdr); 1063 valid_rx_sz -= len; 1064 } 1065 1066 /* valid_rx_sz must be 0 now... */ 1067 if (valid_rx_sz) 1068 dev_err(&cldev->dev, 1069 "More data in the buffer although we read it all\n"); 1070 1071 __free_page(p); 1072 } 1073 1074 static void iwl_mei_handle_sap_rx(struct mei_cl_device *cldev, 1075 struct iwl_sap_q_ctrl_blk *notif_q, 1076 const u8 *q_head, 1077 struct sk_buff_head *skbs) 1078 { 1079 u32 rd = le32_to_cpu(READ_ONCE(notif_q->rd_ptr)); 1080 u32 wr = le32_to_cpu(READ_ONCE(notif_q->wr_ptr)); 1081 u32 q_sz = le32_to_cpu(notif_q->size); 1082 ssize_t valid_rx_sz; 1083 1084 if (rd > q_sz || wr > q_sz) { 1085 dev_err(&cldev->dev, 1086 "Pointers are past the buffer limit\n"); 1087 return; 1088 } 1089 1090 if (rd == wr) 1091 return; 1092 1093 valid_rx_sz = wr > rd ? wr - rd : q_sz - rd + wr; 1094 1095 if (skbs) 1096 iwl_mei_handle_sap_data(cldev, q_head, q_sz, rd, wr, 1097 valid_rx_sz, skbs); 1098 else 1099 iwl_mei_handle_sap_rx_cmd(cldev, q_head, q_sz, rd, wr, 1100 valid_rx_sz); 1101 1102 /* Increment the read pointer to point to the write pointer */ 1103 WRITE_ONCE(notif_q->rd_ptr, cpu_to_le32(wr)); 1104 } 1105 1106 static void iwl_mei_handle_check_shared_area(struct mei_cl_device *cldev) 1107 { 1108 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); 1109 struct iwl_sap_q_ctrl_blk *notif_q; 1110 struct sk_buff_head tx_skbs; 1111 struct iwl_sap_dir *dir; 1112 void *q_head; 1113 1114 if (!mei->shared_mem.ctrl) 1115 return; 1116 1117 dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_ME_TO_HOST]; 1118 notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_NOTIF]; 1119 q_head = mei->shared_mem.q_head[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_NOTIF]; 1120 1121 /* 1122 * Do not hold the mutex here, but rather each and every message 1123 * handler takes it. 1124 * This allows message handlers to take it at a certain time. 1125 */ 1126 iwl_mei_handle_sap_rx(cldev, notif_q, q_head, NULL); 1127 1128 mutex_lock(&iwl_mei_mutex); 1129 dir = &mei->shared_mem.ctrl->dir[SAP_DIRECTION_ME_TO_HOST]; 1130 notif_q = &dir->q_ctrl_blk[SAP_QUEUE_IDX_DATA]; 1131 q_head = mei->shared_mem.q_head[SAP_DIRECTION_ME_TO_HOST][SAP_QUEUE_IDX_DATA]; 1132 1133 __skb_queue_head_init(&tx_skbs); 1134 1135 iwl_mei_handle_sap_rx(cldev, notif_q, q_head, &tx_skbs); 1136 1137 if (skb_queue_empty(&tx_skbs)) { 1138 mutex_unlock(&iwl_mei_mutex); 1139 return; 1140 } 1141 1142 /* 1143 * Take the RCU read lock before we unlock the mutex to make sure that 1144 * even if the netdev is replaced by another non-NULL netdev right after 1145 * we unlock the mutex, the old netdev will still be valid when we 1146 * transmit the frames. We can't allow to replace the netdev here because 1147 * the skbs hold a pointer to the netdev. 1148 */ 1149 rcu_read_lock(); 1150 1151 mutex_unlock(&iwl_mei_mutex); 1152 1153 if (!rcu_access_pointer(iwl_mei_cache.netdev)) { 1154 dev_err(&cldev->dev, "Can't Tx without a netdev\n"); 1155 skb_queue_purge(&tx_skbs); 1156 goto out; 1157 } 1158 1159 while (!skb_queue_empty(&tx_skbs)) { 1160 struct sk_buff *skb = __skb_dequeue(&tx_skbs); 1161 1162 trace_iwlmei_sap_data(skb, IWL_SAP_RX_DATA_TO_AIR); 1163 dev_queue_xmit(skb); 1164 } 1165 1166 out: 1167 rcu_read_unlock(); 1168 } 1169 1170 static void iwl_mei_rx(struct mei_cl_device *cldev) 1171 { 1172 struct iwl_sap_me_msg_hdr *hdr; 1173 u8 msg[100]; 1174 ssize_t ret; 1175 1176 ret = mei_cldev_recv(cldev, (u8 *)&msg, sizeof(msg)); 1177 if (ret < 0) { 1178 dev_err(&cldev->dev, "failed to receive data: %zd\n", ret); 1179 return; 1180 } 1181 1182 if (ret == 0) { 1183 dev_err(&cldev->dev, "got an empty response\n"); 1184 return; 1185 } 1186 1187 hdr = (void *)msg; 1188 trace_iwlmei_me_msg(hdr, false); 1189 1190 switch (le32_to_cpu(hdr->type)) { 1191 case SAP_ME_MSG_START_OK: 1192 BUILD_BUG_ON(sizeof(struct iwl_sap_me_msg_start_ok) > 1193 sizeof(msg)); 1194 1195 iwl_mei_handle_rx_start_ok(cldev, (void *)msg, ret); 1196 break; 1197 case SAP_ME_MSG_CHECK_SHARED_AREA: 1198 iwl_mei_handle_check_shared_area(cldev); 1199 break; 1200 default: 1201 dev_err(&cldev->dev, "got a RX notification: %d\n", 1202 le32_to_cpu(hdr->type)); 1203 break; 1204 } 1205 } 1206 1207 static int iwl_mei_send_start(struct mei_cl_device *cldev) 1208 { 1209 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); 1210 struct iwl_sap_me_msg_start msg = { 1211 .hdr.type = cpu_to_le32(SAP_ME_MSG_START), 1212 .hdr.seq_num = cpu_to_le32(atomic_inc_return(&mei->seq_no)), 1213 .hdr.len = cpu_to_le32(sizeof(msg)), 1214 .supported_versions[0] = SAP_VERSION, 1215 .init_data_seq_num = cpu_to_le16(0x100), 1216 .init_notif_seq_num = cpu_to_le16(0x800), 1217 }; 1218 int ret; 1219 1220 trace_iwlmei_me_msg(&msg.hdr, true); 1221 ret = mei_cldev_send(cldev, (void *)&msg, sizeof(msg)); 1222 if (ret != sizeof(msg)) { 1223 dev_err(&cldev->dev, 1224 "failed to send the SAP_ME_MSG_START message %d\n", 1225 ret); 1226 return ret; 1227 } 1228 1229 return 0; 1230 } 1231 1232 static int iwl_mei_enable(struct mei_cl_device *cldev) 1233 { 1234 int ret; 1235 1236 ret = mei_cldev_enable(cldev); 1237 if (ret < 0) { 1238 dev_err(&cldev->dev, "failed to enable the device: %d\n", ret); 1239 return ret; 1240 } 1241 1242 ret = mei_cldev_register_rx_cb(cldev, iwl_mei_rx); 1243 if (ret) { 1244 dev_err(&cldev->dev, 1245 "failed to register to the rx cb: %d\n", ret); 1246 mei_cldev_disable(cldev); 1247 return ret; 1248 } 1249 1250 return 0; 1251 } 1252 1253 struct iwl_mei_nvm *iwl_mei_get_nvm(void) 1254 { 1255 struct iwl_mei_nvm *nvm = NULL; 1256 struct iwl_mei *mei; 1257 int ret; 1258 1259 mutex_lock(&iwl_mei_mutex); 1260 1261 if (!iwl_mei_is_connected()) 1262 goto out; 1263 1264 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); 1265 1266 if (!mei) 1267 goto out; 1268 1269 ret = iwl_mei_send_sap_msg(iwl_mei_global_cldev, 1270 SAP_MSG_NOTIF_GET_NVM); 1271 if (ret) 1272 goto out; 1273 1274 mutex_unlock(&iwl_mei_mutex); 1275 1276 ret = wait_event_timeout(mei->get_nvm_wq, mei->nvm, 2 * HZ); 1277 if (!ret) 1278 return NULL; 1279 1280 mutex_lock(&iwl_mei_mutex); 1281 1282 if (!iwl_mei_is_connected()) 1283 goto out; 1284 1285 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); 1286 1287 if (!mei) 1288 goto out; 1289 1290 if (mei->nvm) 1291 nvm = kmemdup(mei->nvm, sizeof(*mei->nvm), GFP_KERNEL); 1292 1293 out: 1294 mutex_unlock(&iwl_mei_mutex); 1295 return nvm; 1296 } 1297 EXPORT_SYMBOL_GPL(iwl_mei_get_nvm); 1298 1299 int iwl_mei_get_ownership(void) 1300 { 1301 struct iwl_mei *mei; 1302 int ret; 1303 1304 mutex_lock(&iwl_mei_mutex); 1305 1306 /* In case we didn't have a bind */ 1307 if (!iwl_mei_is_connected()) { 1308 ret = 0; 1309 goto out; 1310 } 1311 1312 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); 1313 1314 if (!mei) { 1315 ret = -ENODEV; 1316 goto out; 1317 } 1318 1319 if (!mei->amt_enabled) { 1320 ret = 0; 1321 goto out; 1322 } 1323 1324 if (mei->got_ownership) { 1325 ret = 0; 1326 goto out; 1327 } 1328 1329 ret = iwl_mei_send_sap_msg(mei->cldev, 1330 SAP_MSG_NOTIF_HOST_ASKS_FOR_NIC_OWNERSHIP); 1331 if (ret) 1332 goto out; 1333 1334 mutex_unlock(&iwl_mei_mutex); 1335 1336 ret = wait_event_timeout(mei->get_ownership_wq, 1337 mei->got_ownership, HZ / 2); 1338 if (!ret) 1339 return -ETIMEDOUT; 1340 1341 mutex_lock(&iwl_mei_mutex); 1342 1343 /* In case we didn't have a bind */ 1344 if (!iwl_mei_is_connected()) { 1345 ret = 0; 1346 goto out; 1347 } 1348 1349 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); 1350 1351 if (!mei) { 1352 ret = -ENODEV; 1353 goto out; 1354 } 1355 1356 ret = !mei->got_ownership; 1357 1358 out: 1359 mutex_unlock(&iwl_mei_mutex); 1360 return ret; 1361 } 1362 EXPORT_SYMBOL_GPL(iwl_mei_get_ownership); 1363 1364 void iwl_mei_host_associated(const struct iwl_mei_conn_info *conn_info, 1365 const struct iwl_mei_colloc_info *colloc_info) 1366 { 1367 struct iwl_sap_notif_host_link_up msg = { 1368 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_HOST_LINK_UP), 1369 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)), 1370 .conn_info = { 1371 .ssid_len = cpu_to_le32(conn_info->ssid_len), 1372 .channel = conn_info->channel, 1373 .band = conn_info->band, 1374 .pairwise_cipher = cpu_to_le32(conn_info->pairwise_cipher), 1375 .auth_mode = cpu_to_le32(conn_info->auth_mode), 1376 }, 1377 }; 1378 struct iwl_mei *mei; 1379 1380 if (conn_info->ssid_len > ARRAY_SIZE(msg.conn_info.ssid)) 1381 return; 1382 1383 memcpy(msg.conn_info.ssid, conn_info->ssid, conn_info->ssid_len); 1384 memcpy(msg.conn_info.bssid, conn_info->bssid, ETH_ALEN); 1385 1386 if (colloc_info) { 1387 msg.colloc_channel = colloc_info->channel; 1388 msg.colloc_band = colloc_info->channel <= 14 ? 0 : 1; 1389 memcpy(msg.colloc_bssid, colloc_info->bssid, ETH_ALEN); 1390 } 1391 1392 mutex_lock(&iwl_mei_mutex); 1393 1394 if (!iwl_mei_is_connected()) 1395 goto out; 1396 1397 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); 1398 1399 if (!mei) 1400 goto out; 1401 1402 if (!mei->amt_enabled) 1403 goto out; 1404 1405 iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr); 1406 1407 out: 1408 kfree(iwl_mei_cache.conn_info); 1409 iwl_mei_cache.conn_info = 1410 kmemdup(&msg.conn_info, sizeof(msg.conn_info), GFP_KERNEL); 1411 mutex_unlock(&iwl_mei_mutex); 1412 } 1413 EXPORT_SYMBOL_GPL(iwl_mei_host_associated); 1414 1415 void iwl_mei_host_disassociated(void) 1416 { 1417 struct iwl_mei *mei; 1418 struct iwl_sap_notif_host_link_down msg = { 1419 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_HOST_LINK_DOWN), 1420 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)), 1421 .type = HOST_LINK_DOWN_TYPE_LONG, 1422 }; 1423 1424 mutex_lock(&iwl_mei_mutex); 1425 1426 if (!iwl_mei_is_connected()) 1427 goto out; 1428 1429 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); 1430 1431 if (!mei) 1432 goto out; 1433 1434 iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr); 1435 1436 out: 1437 kfree(iwl_mei_cache.conn_info); 1438 iwl_mei_cache.conn_info = NULL; 1439 mutex_unlock(&iwl_mei_mutex); 1440 } 1441 EXPORT_SYMBOL_GPL(iwl_mei_host_disassociated); 1442 1443 void iwl_mei_set_rfkill_state(bool hw_rfkill, bool sw_rfkill) 1444 { 1445 struct iwl_mei *mei; 1446 u32 rfkill_state = 0; 1447 struct iwl_sap_msg_dw msg = { 1448 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_RADIO_STATE), 1449 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)), 1450 }; 1451 1452 if (!sw_rfkill) 1453 rfkill_state |= SAP_SW_RFKILL_DEASSERTED; 1454 1455 if (!hw_rfkill) 1456 rfkill_state |= SAP_HW_RFKILL_DEASSERTED; 1457 1458 mutex_lock(&iwl_mei_mutex); 1459 1460 if (!iwl_mei_is_connected()) 1461 goto out; 1462 1463 msg.val = cpu_to_le32(rfkill_state); 1464 1465 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); 1466 1467 if (!mei) 1468 goto out; 1469 1470 iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr); 1471 1472 out: 1473 iwl_mei_cache.rf_kill = rfkill_state; 1474 mutex_unlock(&iwl_mei_mutex); 1475 } 1476 EXPORT_SYMBOL_GPL(iwl_mei_set_rfkill_state); 1477 1478 void iwl_mei_set_nic_info(const u8 *mac_address, const u8 *nvm_address) 1479 { 1480 struct iwl_mei *mei; 1481 struct iwl_sap_notif_host_nic_info msg = { 1482 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_NIC_INFO), 1483 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)), 1484 }; 1485 1486 mutex_lock(&iwl_mei_mutex); 1487 1488 if (!iwl_mei_is_connected()) 1489 goto out; 1490 1491 ether_addr_copy(msg.mac_address, mac_address); 1492 ether_addr_copy(msg.nvm_address, nvm_address); 1493 1494 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); 1495 1496 if (!mei) 1497 goto out; 1498 1499 iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr); 1500 1501 out: 1502 ether_addr_copy(iwl_mei_cache.mac_address, mac_address); 1503 ether_addr_copy(iwl_mei_cache.nvm_address, nvm_address); 1504 mutex_unlock(&iwl_mei_mutex); 1505 } 1506 EXPORT_SYMBOL_GPL(iwl_mei_set_nic_info); 1507 1508 void iwl_mei_set_country_code(u16 mcc) 1509 { 1510 struct iwl_mei *mei; 1511 struct iwl_sap_notif_country_code msg = { 1512 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_COUNTRY_CODE), 1513 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)), 1514 .mcc = cpu_to_le16(mcc), 1515 }; 1516 1517 mutex_lock(&iwl_mei_mutex); 1518 1519 if (!iwl_mei_is_connected()) 1520 goto out; 1521 1522 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); 1523 1524 if (!mei) 1525 goto out; 1526 1527 iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr); 1528 1529 out: 1530 iwl_mei_cache.mcc = mcc; 1531 mutex_unlock(&iwl_mei_mutex); 1532 } 1533 EXPORT_SYMBOL_GPL(iwl_mei_set_country_code); 1534 1535 void iwl_mei_set_power_limit(const __le16 *power_limit) 1536 { 1537 struct iwl_mei *mei; 1538 struct iwl_sap_notif_sar_limits msg = { 1539 .hdr.type = cpu_to_le16(SAP_MSG_NOTIF_SAR_LIMITS), 1540 .hdr.len = cpu_to_le16(sizeof(msg) - sizeof(msg.hdr)), 1541 }; 1542 1543 mutex_lock(&iwl_mei_mutex); 1544 1545 if (!iwl_mei_is_connected()) 1546 goto out; 1547 1548 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); 1549 1550 if (!mei) 1551 goto out; 1552 1553 memcpy(msg.sar_chain_info_table, power_limit, sizeof(msg.sar_chain_info_table)); 1554 1555 iwl_mei_send_sap_msg_payload(mei->cldev, &msg.hdr); 1556 1557 out: 1558 kfree(iwl_mei_cache.power_limit); 1559 iwl_mei_cache.power_limit = kmemdup(power_limit, 1560 sizeof(msg.sar_chain_info_table), GFP_KERNEL); 1561 mutex_unlock(&iwl_mei_mutex); 1562 } 1563 EXPORT_SYMBOL_GPL(iwl_mei_set_power_limit); 1564 1565 void iwl_mei_set_netdev(struct net_device *netdev) 1566 { 1567 struct iwl_mei *mei; 1568 1569 mutex_lock(&iwl_mei_mutex); 1570 1571 if (!iwl_mei_is_connected()) { 1572 rcu_assign_pointer(iwl_mei_cache.netdev, netdev); 1573 goto out; 1574 } 1575 1576 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); 1577 1578 if (!mei) 1579 goto out; 1580 1581 if (!netdev) { 1582 struct net_device *dev = 1583 rcu_dereference_protected(iwl_mei_cache.netdev, 1584 lockdep_is_held(&iwl_mei_mutex)); 1585 1586 if (!dev) 1587 goto out; 1588 1589 netdev_rx_handler_unregister(dev); 1590 } 1591 1592 rcu_assign_pointer(iwl_mei_cache.netdev, netdev); 1593 1594 if (netdev && mei->amt_enabled) 1595 netdev_rx_handler_register(netdev, iwl_mei_rx_handler, mei); 1596 1597 out: 1598 mutex_unlock(&iwl_mei_mutex); 1599 } 1600 EXPORT_SYMBOL_GPL(iwl_mei_set_netdev); 1601 1602 void iwl_mei_device_down(void) 1603 { 1604 struct iwl_mei *mei; 1605 1606 mutex_lock(&iwl_mei_mutex); 1607 1608 if (!iwl_mei_is_connected()) 1609 goto out; 1610 1611 mei = mei_cldev_get_drvdata(iwl_mei_global_cldev); 1612 1613 if (!mei) 1614 goto out; 1615 1616 if (!mei->csme_taking_ownership) 1617 goto out; 1618 1619 iwl_mei_send_sap_msg(mei->cldev, 1620 SAP_MSG_NOTIF_CSME_OWNERSHIP_CONFIRMED); 1621 mei->csme_taking_ownership = false; 1622 out: 1623 mutex_unlock(&iwl_mei_mutex); 1624 } 1625 EXPORT_SYMBOL_GPL(iwl_mei_device_down); 1626 1627 int iwl_mei_register(void *priv, const struct iwl_mei_ops *ops) 1628 { 1629 int ret; 1630 1631 /* 1632 * We must have a non-NULL priv pointer to not crash when there are 1633 * multiple WiFi devices. 1634 */ 1635 if (!priv) 1636 return -EINVAL; 1637 1638 mutex_lock(&iwl_mei_mutex); 1639 1640 /* do not allow registration if someone else already registered */ 1641 if (iwl_mei_cache.priv || iwl_mei_cache.ops) { 1642 ret = -EBUSY; 1643 goto out; 1644 } 1645 1646 iwl_mei_cache.priv = priv; 1647 iwl_mei_cache.ops = ops; 1648 1649 if (iwl_mei_global_cldev) { 1650 struct iwl_mei *mei = 1651 mei_cldev_get_drvdata(iwl_mei_global_cldev); 1652 1653 /* we have already a SAP connection */ 1654 if (iwl_mei_is_connected()) 1655 iwl_mei_send_sap_msg(mei->cldev, 1656 SAP_MSG_NOTIF_WIFIDR_UP); 1657 } 1658 ret = 0; 1659 1660 out: 1661 mutex_unlock(&iwl_mei_mutex); 1662 return ret; 1663 } 1664 EXPORT_SYMBOL_GPL(iwl_mei_register); 1665 1666 void iwl_mei_start_unregister(void) 1667 { 1668 mutex_lock(&iwl_mei_mutex); 1669 1670 /* At this point, the wifi driver should have removed the netdev */ 1671 if (rcu_access_pointer(iwl_mei_cache.netdev)) 1672 pr_err("Still had a netdev pointer set upon unregister\n"); 1673 1674 kfree(iwl_mei_cache.conn_info); 1675 iwl_mei_cache.conn_info = NULL; 1676 kfree(iwl_mei_cache.power_limit); 1677 iwl_mei_cache.power_limit = NULL; 1678 iwl_mei_cache.ops = NULL; 1679 /* leave iwl_mei_cache.priv non-NULL to prevent any new registration */ 1680 1681 mutex_unlock(&iwl_mei_mutex); 1682 } 1683 EXPORT_SYMBOL_GPL(iwl_mei_start_unregister); 1684 1685 void iwl_mei_unregister_complete(void) 1686 { 1687 mutex_lock(&iwl_mei_mutex); 1688 1689 iwl_mei_cache.priv = NULL; 1690 1691 if (iwl_mei_global_cldev) { 1692 struct iwl_mei *mei = 1693 mei_cldev_get_drvdata(iwl_mei_global_cldev); 1694 1695 iwl_mei_send_sap_msg(mei->cldev, SAP_MSG_NOTIF_WIFIDR_DOWN); 1696 } 1697 1698 mutex_unlock(&iwl_mei_mutex); 1699 } 1700 EXPORT_SYMBOL_GPL(iwl_mei_unregister_complete); 1701 1702 #if IS_ENABLED(CONFIG_DEBUG_FS) 1703 1704 static ssize_t 1705 iwl_mei_dbgfs_send_start_message_write(struct file *file, 1706 const char __user *user_buf, 1707 size_t count, loff_t *ppos) 1708 { 1709 int ret; 1710 1711 mutex_lock(&iwl_mei_mutex); 1712 1713 if (!iwl_mei_global_cldev) { 1714 ret = -ENODEV; 1715 goto out; 1716 } 1717 1718 ret = iwl_mei_send_start(iwl_mei_global_cldev); 1719 1720 out: 1721 mutex_unlock(&iwl_mei_mutex); 1722 return ret ?: count; 1723 } 1724 1725 static const struct file_operations iwl_mei_dbgfs_send_start_message_ops = { 1726 .write = iwl_mei_dbgfs_send_start_message_write, 1727 .open = simple_open, 1728 .llseek = default_llseek, 1729 }; 1730 1731 static ssize_t iwl_mei_dbgfs_req_ownership_write(struct file *file, 1732 const char __user *user_buf, 1733 size_t count, loff_t *ppos) 1734 { 1735 iwl_mei_get_ownership(); 1736 1737 return count; 1738 } 1739 1740 static const struct file_operations iwl_mei_dbgfs_req_ownership_ops = { 1741 .write = iwl_mei_dbgfs_req_ownership_write, 1742 .open = simple_open, 1743 .llseek = default_llseek, 1744 }; 1745 1746 static void iwl_mei_dbgfs_register(struct iwl_mei *mei) 1747 { 1748 mei->dbgfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL); 1749 1750 if (!mei->dbgfs_dir) 1751 return; 1752 1753 debugfs_create_ulong("status", S_IRUSR, 1754 mei->dbgfs_dir, &iwl_mei_status); 1755 debugfs_create_file("send_start_message", S_IWUSR, mei->dbgfs_dir, 1756 mei, &iwl_mei_dbgfs_send_start_message_ops); 1757 debugfs_create_file("req_ownserhip", S_IWUSR, mei->dbgfs_dir, 1758 mei, &iwl_mei_dbgfs_req_ownership_ops); 1759 } 1760 1761 static void iwl_mei_dbgfs_unregister(struct iwl_mei *mei) 1762 { 1763 debugfs_remove_recursive(mei->dbgfs_dir); 1764 mei->dbgfs_dir = NULL; 1765 } 1766 1767 #else 1768 1769 static void iwl_mei_dbgfs_register(struct iwl_mei *mei) {} 1770 static void iwl_mei_dbgfs_unregister(struct iwl_mei *mei) {} 1771 1772 #endif /* CONFIG_DEBUG_FS */ 1773 1774 /** 1775 * iwl_mei_probe - the probe function called by the mei bus enumeration 1776 * 1777 * This allocates the data needed by iwlmei and sets a pointer to this data 1778 * into the mei_cl_device's drvdata. 1779 * It starts the SAP protocol by sending the SAP_ME_MSG_START without 1780 * waiting for the answer. The answer will be caught later by the Rx callback. 1781 */ 1782 static int iwl_mei_probe(struct mei_cl_device *cldev, 1783 const struct mei_cl_device_id *id) 1784 { 1785 struct iwl_mei *mei; 1786 int ret; 1787 1788 mei = devm_kzalloc(&cldev->dev, sizeof(*mei), GFP_KERNEL); 1789 if (!mei) 1790 return -ENOMEM; 1791 1792 init_waitqueue_head(&mei->get_nvm_wq); 1793 INIT_WORK(&mei->send_csa_msg_wk, iwl_mei_send_csa_msg_wk); 1794 INIT_DELAYED_WORK(&mei->csa_throttle_end_wk, 1795 iwl_mei_csa_throttle_end_wk); 1796 init_waitqueue_head(&mei->get_ownership_wq); 1797 spin_lock_init(&mei->data_q_lock); 1798 1799 mei_cldev_set_drvdata(cldev, mei); 1800 mei->cldev = cldev; 1801 1802 ret = iwl_mei_alloc_shared_mem(cldev); 1803 if (ret) 1804 goto free; 1805 1806 iwl_mei_init_shared_mem(mei); 1807 1808 ret = iwl_mei_enable(cldev); 1809 if (ret) 1810 goto free_shared_mem; 1811 1812 iwl_mei_dbgfs_register(mei); 1813 1814 /* 1815 * We now have a Rx function in place, start the SAP procotol 1816 * we expect to get the SAP_ME_MSG_START_OK response later on. 1817 */ 1818 mutex_lock(&iwl_mei_mutex); 1819 ret = iwl_mei_send_start(cldev); 1820 mutex_unlock(&iwl_mei_mutex); 1821 if (ret) 1822 goto debugfs_unregister; 1823 1824 /* must be last */ 1825 iwl_mei_global_cldev = cldev; 1826 1827 return 0; 1828 1829 debugfs_unregister: 1830 iwl_mei_dbgfs_unregister(mei); 1831 mei_cldev_disable(cldev); 1832 free_shared_mem: 1833 iwl_mei_free_shared_mem(cldev); 1834 free: 1835 mei_cldev_set_drvdata(cldev, NULL); 1836 devm_kfree(&cldev->dev, mei); 1837 1838 return ret; 1839 } 1840 1841 #define SEND_SAP_MAX_WAIT_ITERATION 10 1842 1843 static void iwl_mei_remove(struct mei_cl_device *cldev) 1844 { 1845 struct iwl_mei *mei = mei_cldev_get_drvdata(cldev); 1846 int i; 1847 1848 /* 1849 * We are being removed while the bus is active, it means we are 1850 * going to suspend/ shutdown, so the NIC will disappear. 1851 */ 1852 if (mei_cldev_enabled(cldev) && iwl_mei_cache.ops) 1853 iwl_mei_cache.ops->nic_stolen(iwl_mei_cache.priv); 1854 1855 if (rcu_access_pointer(iwl_mei_cache.netdev)) { 1856 struct net_device *dev; 1857 1858 /* 1859 * First take rtnl and only then the mutex to avoid an ABBA 1860 * with iwl_mei_set_netdev() 1861 */ 1862 rtnl_lock(); 1863 mutex_lock(&iwl_mei_mutex); 1864 1865 /* 1866 * If we are suspending and the wifi driver hasn't removed it's netdev 1867 * yet, do it now. In any case, don't change the cache.netdev pointer. 1868 */ 1869 dev = rcu_dereference_protected(iwl_mei_cache.netdev, 1870 lockdep_is_held(&iwl_mei_mutex)); 1871 1872 netdev_rx_handler_unregister(dev); 1873 mutex_unlock(&iwl_mei_mutex); 1874 rtnl_unlock(); 1875 } 1876 1877 mutex_lock(&iwl_mei_mutex); 1878 1879 /* 1880 * Tell CSME that we are going down so that it won't access the 1881 * memory anymore, make sure this message goes through immediately. 1882 */ 1883 mei->csa_throttled = false; 1884 iwl_mei_send_sap_msg(mei->cldev, 1885 SAP_MSG_NOTIF_HOST_GOES_DOWN); 1886 1887 for (i = 0; i < SEND_SAP_MAX_WAIT_ITERATION; i++) { 1888 if (!iwl_mei_host_to_me_data_pending(mei)) 1889 break; 1890 1891 msleep(5); 1892 } 1893 1894 /* 1895 * If we couldn't make sure that CSME saw the HOST_GOES_DOWN message, 1896 * it means that it will probably keep reading memory that we are going 1897 * to unmap and free, expect IOMMU error messages. 1898 */ 1899 if (i == SEND_SAP_MAX_WAIT_ITERATION) 1900 dev_err(&mei->cldev->dev, 1901 "Couldn't get ACK from CSME on HOST_GOES_DOWN message\n"); 1902 1903 mutex_unlock(&iwl_mei_mutex); 1904 1905 /* 1906 * This looks strange, but this lock is taken here to make sure that 1907 * iwl_mei_add_data_to_ring called from the Tx path sees that we 1908 * clear the IWL_MEI_STATUS_SAP_CONNECTED bit. 1909 * Rx isn't a problem because the rx_handler can't be called after 1910 * having been unregistered. 1911 */ 1912 spin_lock_bh(&mei->data_q_lock); 1913 clear_bit(IWL_MEI_STATUS_SAP_CONNECTED, &iwl_mei_status); 1914 spin_unlock_bh(&mei->data_q_lock); 1915 1916 if (iwl_mei_cache.ops) 1917 iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false); 1918 1919 /* 1920 * mei_cldev_disable will return only after all the MEI Rx is done. 1921 * It must be called when iwl_mei_mutex is *not* held, since it waits 1922 * for our Rx handler to complete. 1923 * After it returns, no new Rx will start. 1924 */ 1925 mei_cldev_disable(cldev); 1926 1927 /* 1928 * Since the netdev was already removed and the netdev's removal 1929 * includes a call to synchronize_net() so that we know there won't be 1930 * any new Rx that will trigger the following workers. 1931 */ 1932 cancel_work_sync(&mei->send_csa_msg_wk); 1933 cancel_delayed_work_sync(&mei->csa_throttle_end_wk); 1934 1935 /* 1936 * If someone waits for the ownership, let him know that we are going 1937 * down and that we are not connected anymore. He'll be able to take 1938 * the device. 1939 */ 1940 wake_up_all(&mei->get_ownership_wq); 1941 1942 mutex_lock(&iwl_mei_mutex); 1943 1944 iwl_mei_global_cldev = NULL; 1945 1946 wake_up_all(&mei->get_nvm_wq); 1947 1948 iwl_mei_free_shared_mem(cldev); 1949 1950 iwl_mei_dbgfs_unregister(mei); 1951 1952 mei_cldev_set_drvdata(cldev, NULL); 1953 1954 kfree(mei->nvm); 1955 1956 kfree(rcu_access_pointer(mei->filters)); 1957 1958 devm_kfree(&cldev->dev, mei); 1959 1960 mutex_unlock(&iwl_mei_mutex); 1961 } 1962 1963 static const struct mei_cl_device_id iwl_mei_tbl[] = { 1964 { KBUILD_MODNAME, MEI_WLAN_UUID, MEI_CL_VERSION_ANY}, 1965 1966 /* required last entry */ 1967 { } 1968 }; 1969 1970 /* 1971 * Do not export the device table because this module is loaded by 1972 * iwlwifi's dependency. 1973 */ 1974 1975 static struct mei_cl_driver iwl_mei_cl_driver = { 1976 .id_table = iwl_mei_tbl, 1977 .name = KBUILD_MODNAME, 1978 .probe = iwl_mei_probe, 1979 .remove = iwl_mei_remove, 1980 }; 1981 1982 module_mei_cl_driver(iwl_mei_cl_driver); 1983