1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (c) 2014 Broadcom Corporation 4 */ 5 6 /******************************************************************************* 7 * Communicates with the dongle by using dcmd codes. 8 * For certain dcmd codes, the dongle interprets string data from the host. 9 ******************************************************************************/ 10 11 #include <linux/types.h> 12 #include <linux/netdevice.h> 13 #include <linux/etherdevice.h> 14 15 #include <brcmu_utils.h> 16 #include <brcmu_wifi.h> 17 18 #include "core.h" 19 #include "debug.h" 20 #include "proto.h" 21 #include "msgbuf.h" 22 #include "commonring.h" 23 #include "flowring.h" 24 #include "bus.h" 25 #include "tracepoint.h" 26 27 28 #define MSGBUF_IOCTL_RESP_TIMEOUT msecs_to_jiffies(2000) 29 30 #define MSGBUF_TYPE_GEN_STATUS 0x1 31 #define MSGBUF_TYPE_RING_STATUS 0x2 32 #define MSGBUF_TYPE_FLOW_RING_CREATE 0x3 33 #define MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT 0x4 34 #define MSGBUF_TYPE_FLOW_RING_DELETE 0x5 35 #define MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT 0x6 36 #define MSGBUF_TYPE_FLOW_RING_FLUSH 0x7 37 #define MSGBUF_TYPE_FLOW_RING_FLUSH_CMPLT 0x8 38 #define MSGBUF_TYPE_IOCTLPTR_REQ 0x9 39 #define MSGBUF_TYPE_IOCTLPTR_REQ_ACK 0xA 40 #define MSGBUF_TYPE_IOCTLRESP_BUF_POST 0xB 41 #define MSGBUF_TYPE_IOCTL_CMPLT 0xC 42 #define MSGBUF_TYPE_EVENT_BUF_POST 0xD 43 #define MSGBUF_TYPE_WL_EVENT 0xE 44 #define MSGBUF_TYPE_TX_POST 0xF 45 #define MSGBUF_TYPE_TX_STATUS 0x10 46 #define MSGBUF_TYPE_RXBUF_POST 0x11 47 #define MSGBUF_TYPE_RX_CMPLT 0x12 48 #define MSGBUF_TYPE_LPBK_DMAXFER 0x13 49 #define MSGBUF_TYPE_LPBK_DMAXFER_CMPLT 0x14 50 51 #define NR_TX_PKTIDS 2048 52 #define NR_RX_PKTIDS 1024 53 54 #define BRCMF_IOCTL_REQ_PKTID 0xFFFE 55 56 #define BRCMF_MSGBUF_MAX_PKT_SIZE 2048 57 #define BRCMF_MSGBUF_MAX_CTL_PKT_SIZE 8192 58 #define BRCMF_MSGBUF_RXBUFPOST_THRESHOLD 32 59 #define BRCMF_MSGBUF_MAX_IOCTLRESPBUF_POST 8 60 #define BRCMF_MSGBUF_MAX_EVENTBUF_POST 8 61 62 #define BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3 0x01 63 #define BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_11 0x02 64 #define BRCMF_MSGBUF_PKT_FLAGS_FRAME_MASK 0x07 65 #define BRCMF_MSGBUF_PKT_FLAGS_PRIO_SHIFT 5 66 67 #define BRCMF_MSGBUF_TX_FLUSH_CNT1 32 68 #define BRCMF_MSGBUF_TX_FLUSH_CNT2 96 69 70 #define BRCMF_MSGBUF_DELAY_TXWORKER_THRS 96 71 #define BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS 32 72 #define BRCMF_MSGBUF_UPDATE_RX_PTR_THRS 48 73 74 #define BRCMF_MAX_TXSTATUS_WAIT_RETRIES 10 75 76 struct msgbuf_common_hdr { 77 u8 msgtype; 78 u8 ifidx; 79 u8 flags; 80 u8 rsvd0; 81 __le32 request_id; 82 }; 83 84 struct msgbuf_ioctl_req_hdr { 85 struct msgbuf_common_hdr msg; 86 __le32 cmd; 87 __le16 trans_id; 88 __le16 input_buf_len; 89 __le16 output_buf_len; 90 __le16 rsvd0[3]; 91 struct msgbuf_buf_addr req_buf_addr; 92 __le32 rsvd1[2]; 93 }; 94 95 struct msgbuf_tx_msghdr { 96 struct msgbuf_common_hdr msg; 97 u8 txhdr[ETH_HLEN]; 98 u8 flags; 99 u8 seg_cnt; 100 struct msgbuf_buf_addr metadata_buf_addr; 101 struct msgbuf_buf_addr data_buf_addr; 102 __le16 metadata_buf_len; 103 __le16 data_len; 104 __le32 rsvd0; 105 }; 106 107 struct msgbuf_rx_bufpost { 108 struct msgbuf_common_hdr msg; 109 __le16 metadata_buf_len; 110 __le16 data_buf_len; 111 __le32 rsvd0; 112 struct msgbuf_buf_addr metadata_buf_addr; 113 struct msgbuf_buf_addr data_buf_addr; 114 }; 115 116 struct msgbuf_rx_ioctl_resp_or_event { 117 struct msgbuf_common_hdr msg; 118 __le16 host_buf_len; 119 __le16 rsvd0[3]; 120 struct msgbuf_buf_addr host_buf_addr; 121 __le32 rsvd1[4]; 122 }; 123 124 struct msgbuf_completion_hdr { 125 __le16 status; 126 __le16 flow_ring_id; 127 }; 128 129 /* Data struct for the MSGBUF_TYPE_GEN_STATUS */ 130 struct msgbuf_gen_status { 131 struct msgbuf_common_hdr msg; 132 struct msgbuf_completion_hdr compl_hdr; 133 __le16 write_idx; 134 __le32 rsvd0[3]; 135 }; 136 137 /* Data struct for the MSGBUF_TYPE_RING_STATUS */ 138 struct msgbuf_ring_status { 139 struct msgbuf_common_hdr msg; 140 struct msgbuf_completion_hdr compl_hdr; 141 __le16 write_idx; 142 __le16 rsvd0[5]; 143 }; 144 145 struct msgbuf_rx_event { 146 struct msgbuf_common_hdr msg; 147 struct msgbuf_completion_hdr compl_hdr; 148 __le16 event_data_len; 149 __le16 seqnum; 150 __le16 rsvd0[4]; 151 }; 152 153 struct msgbuf_ioctl_resp_hdr { 154 struct msgbuf_common_hdr msg; 155 struct msgbuf_completion_hdr compl_hdr; 156 __le16 resp_len; 157 __le16 trans_id; 158 __le32 cmd; 159 __le32 rsvd0; 160 }; 161 162 struct msgbuf_tx_status { 163 struct msgbuf_common_hdr msg; 164 struct msgbuf_completion_hdr compl_hdr; 165 __le16 metadata_len; 166 __le16 tx_status; 167 }; 168 169 struct msgbuf_rx_complete { 170 struct msgbuf_common_hdr msg; 171 struct msgbuf_completion_hdr compl_hdr; 172 __le16 metadata_len; 173 __le16 data_len; 174 __le16 data_offset; 175 __le16 flags; 176 __le32 rx_status_0; 177 __le32 rx_status_1; 178 __le32 rsvd0; 179 }; 180 181 struct msgbuf_tx_flowring_create_req { 182 struct msgbuf_common_hdr msg; 183 u8 da[ETH_ALEN]; 184 u8 sa[ETH_ALEN]; 185 u8 tid; 186 u8 if_flags; 187 __le16 flow_ring_id; 188 u8 tc; 189 u8 priority; 190 __le16 int_vector; 191 __le16 max_items; 192 __le16 len_item; 193 struct msgbuf_buf_addr flow_ring_addr; 194 }; 195 196 struct msgbuf_tx_flowring_delete_req { 197 struct msgbuf_common_hdr msg; 198 __le16 flow_ring_id; 199 __le16 reason; 200 __le32 rsvd0[7]; 201 }; 202 203 struct msgbuf_flowring_create_resp { 204 struct msgbuf_common_hdr msg; 205 struct msgbuf_completion_hdr compl_hdr; 206 __le32 rsvd0[3]; 207 }; 208 209 struct msgbuf_flowring_delete_resp { 210 struct msgbuf_common_hdr msg; 211 struct msgbuf_completion_hdr compl_hdr; 212 __le32 rsvd0[3]; 213 }; 214 215 struct msgbuf_flowring_flush_resp { 216 struct msgbuf_common_hdr msg; 217 struct msgbuf_completion_hdr compl_hdr; 218 __le32 rsvd0[3]; 219 }; 220 221 struct brcmf_msgbuf_work_item { 222 struct list_head queue; 223 u32 flowid; 224 int ifidx; 225 u8 sa[ETH_ALEN]; 226 u8 da[ETH_ALEN]; 227 }; 228 229 struct brcmf_msgbuf { 230 struct brcmf_pub *drvr; 231 232 struct brcmf_commonring **commonrings; 233 struct brcmf_commonring **flowrings; 234 dma_addr_t *flowring_dma_handle; 235 236 u16 max_flowrings; 237 u16 max_submissionrings; 238 u16 max_completionrings; 239 240 u16 rx_dataoffset; 241 u32 max_rxbufpost; 242 u16 rx_metadata_offset; 243 u32 rxbufpost; 244 245 u32 max_ioctlrespbuf; 246 u32 cur_ioctlrespbuf; 247 u32 max_eventbuf; 248 u32 cur_eventbuf; 249 250 void *ioctbuf; 251 dma_addr_t ioctbuf_handle; 252 u32 ioctbuf_phys_hi; 253 u32 ioctbuf_phys_lo; 254 int ioctl_resp_status; 255 u32 ioctl_resp_ret_len; 256 u32 ioctl_resp_pktid; 257 258 u16 data_seq_no; 259 u16 ioctl_seq_no; 260 u32 reqid; 261 wait_queue_head_t ioctl_resp_wait; 262 bool ctl_completed; 263 264 struct brcmf_msgbuf_pktids *tx_pktids; 265 struct brcmf_msgbuf_pktids *rx_pktids; 266 struct brcmf_flowring *flow; 267 268 struct workqueue_struct *txflow_wq; 269 struct work_struct txflow_work; 270 unsigned long *flow_map; 271 unsigned long *txstatus_done_map; 272 273 struct work_struct flowring_work; 274 spinlock_t flowring_work_lock; 275 struct list_head work_queue; 276 }; 277 278 struct brcmf_msgbuf_pktid { 279 atomic_t allocated; 280 u16 data_offset; 281 struct sk_buff *skb; 282 dma_addr_t physaddr; 283 }; 284 285 struct brcmf_msgbuf_pktids { 286 u32 array_size; 287 u32 last_allocated_idx; 288 enum dma_data_direction direction; 289 struct brcmf_msgbuf_pktid *array; 290 }; 291 292 static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf *msgbuf); 293 294 295 static struct brcmf_msgbuf_pktids * 296 brcmf_msgbuf_init_pktids(u32 nr_array_entries, 297 enum dma_data_direction direction) 298 { 299 struct brcmf_msgbuf_pktid *array; 300 struct brcmf_msgbuf_pktids *pktids; 301 302 array = kcalloc(nr_array_entries, sizeof(*array), GFP_KERNEL); 303 if (!array) 304 return NULL; 305 306 pktids = kzalloc(sizeof(*pktids), GFP_KERNEL); 307 if (!pktids) { 308 kfree(array); 309 return NULL; 310 } 311 pktids->array = array; 312 pktids->array_size = nr_array_entries; 313 314 return pktids; 315 } 316 317 318 static int 319 brcmf_msgbuf_alloc_pktid(struct device *dev, 320 struct brcmf_msgbuf_pktids *pktids, 321 struct sk_buff *skb, u16 data_offset, 322 dma_addr_t *physaddr, u32 *idx) 323 { 324 struct brcmf_msgbuf_pktid *array; 325 u32 count; 326 327 array = pktids->array; 328 329 *physaddr = dma_map_single(dev, skb->data + data_offset, 330 skb->len - data_offset, pktids->direction); 331 332 if (dma_mapping_error(dev, *physaddr)) { 333 brcmf_err("dma_map_single failed !!\n"); 334 return -ENOMEM; 335 } 336 337 *idx = pktids->last_allocated_idx; 338 339 count = 0; 340 do { 341 (*idx)++; 342 if (*idx == pktids->array_size) 343 *idx = 0; 344 if (array[*idx].allocated.counter == 0) 345 if (atomic_cmpxchg(&array[*idx].allocated, 0, 1) == 0) 346 break; 347 count++; 348 } while (count < pktids->array_size); 349 350 if (count == pktids->array_size) { 351 dma_unmap_single(dev, *physaddr, skb->len - data_offset, 352 pktids->direction); 353 return -ENOMEM; 354 } 355 356 array[*idx].data_offset = data_offset; 357 array[*idx].physaddr = *physaddr; 358 array[*idx].skb = skb; 359 360 pktids->last_allocated_idx = *idx; 361 362 return 0; 363 } 364 365 366 static struct sk_buff * 367 brcmf_msgbuf_get_pktid(struct device *dev, struct brcmf_msgbuf_pktids *pktids, 368 u32 idx) 369 { 370 struct brcmf_msgbuf_pktid *pktid; 371 struct sk_buff *skb; 372 373 if (idx >= pktids->array_size) { 374 brcmf_err("Invalid packet id %d (max %d)\n", idx, 375 pktids->array_size); 376 return NULL; 377 } 378 if (pktids->array[idx].allocated.counter) { 379 pktid = &pktids->array[idx]; 380 dma_unmap_single(dev, pktid->physaddr, 381 pktid->skb->len - pktid->data_offset, 382 pktids->direction); 383 skb = pktid->skb; 384 pktid->allocated.counter = 0; 385 return skb; 386 } else { 387 brcmf_err("Invalid packet id %d (not in use)\n", idx); 388 } 389 390 return NULL; 391 } 392 393 394 static void 395 brcmf_msgbuf_release_array(struct device *dev, 396 struct brcmf_msgbuf_pktids *pktids) 397 { 398 struct brcmf_msgbuf_pktid *array; 399 struct brcmf_msgbuf_pktid *pktid; 400 u32 count; 401 402 array = pktids->array; 403 count = 0; 404 do { 405 if (array[count].allocated.counter) { 406 pktid = &array[count]; 407 dma_unmap_single(dev, pktid->physaddr, 408 pktid->skb->len - pktid->data_offset, 409 pktids->direction); 410 brcmu_pkt_buf_free_skb(pktid->skb); 411 } 412 count++; 413 } while (count < pktids->array_size); 414 415 kfree(array); 416 kfree(pktids); 417 } 418 419 420 static void brcmf_msgbuf_release_pktids(struct brcmf_msgbuf *msgbuf) 421 { 422 if (msgbuf->rx_pktids) 423 brcmf_msgbuf_release_array(msgbuf->drvr->bus_if->dev, 424 msgbuf->rx_pktids); 425 if (msgbuf->tx_pktids) 426 brcmf_msgbuf_release_array(msgbuf->drvr->bus_if->dev, 427 msgbuf->tx_pktids); 428 } 429 430 431 static int brcmf_msgbuf_tx_ioctl(struct brcmf_pub *drvr, int ifidx, 432 uint cmd, void *buf, uint len) 433 { 434 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 435 struct brcmf_commonring *commonring; 436 struct msgbuf_ioctl_req_hdr *request; 437 u16 buf_len; 438 void *ret_ptr; 439 int err; 440 441 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT]; 442 brcmf_commonring_lock(commonring); 443 ret_ptr = brcmf_commonring_reserve_for_write(commonring); 444 if (!ret_ptr) { 445 bphy_err(drvr, "Failed to reserve space in commonring\n"); 446 brcmf_commonring_unlock(commonring); 447 return -ENOMEM; 448 } 449 450 msgbuf->reqid++; 451 452 request = (struct msgbuf_ioctl_req_hdr *)ret_ptr; 453 request->msg.msgtype = MSGBUF_TYPE_IOCTLPTR_REQ; 454 request->msg.ifidx = (u8)ifidx; 455 request->msg.flags = 0; 456 request->msg.request_id = cpu_to_le32(BRCMF_IOCTL_REQ_PKTID); 457 request->cmd = cpu_to_le32(cmd); 458 request->output_buf_len = cpu_to_le16(len); 459 request->trans_id = cpu_to_le16(msgbuf->reqid); 460 461 buf_len = min_t(u16, len, BRCMF_TX_IOCTL_MAX_MSG_SIZE); 462 request->input_buf_len = cpu_to_le16(buf_len); 463 request->req_buf_addr.high_addr = cpu_to_le32(msgbuf->ioctbuf_phys_hi); 464 request->req_buf_addr.low_addr = cpu_to_le32(msgbuf->ioctbuf_phys_lo); 465 if (buf) 466 memcpy(msgbuf->ioctbuf, buf, buf_len); 467 else 468 memset(msgbuf->ioctbuf, 0, buf_len); 469 470 err = brcmf_commonring_write_complete(commonring); 471 brcmf_commonring_unlock(commonring); 472 473 return err; 474 } 475 476 477 static int brcmf_msgbuf_ioctl_resp_wait(struct brcmf_msgbuf *msgbuf) 478 { 479 return wait_event_timeout(msgbuf->ioctl_resp_wait, 480 msgbuf->ctl_completed, 481 MSGBUF_IOCTL_RESP_TIMEOUT); 482 } 483 484 485 static void brcmf_msgbuf_ioctl_resp_wake(struct brcmf_msgbuf *msgbuf) 486 { 487 msgbuf->ctl_completed = true; 488 wake_up(&msgbuf->ioctl_resp_wait); 489 } 490 491 492 static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx, 493 uint cmd, void *buf, uint len, int *fwerr) 494 { 495 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 496 struct sk_buff *skb = NULL; 497 int timeout; 498 int err; 499 500 brcmf_dbg(MSGBUF, "ifidx=%d, cmd=%d, len=%d\n", ifidx, cmd, len); 501 *fwerr = 0; 502 msgbuf->ctl_completed = false; 503 err = brcmf_msgbuf_tx_ioctl(drvr, ifidx, cmd, buf, len); 504 if (err) 505 return err; 506 507 timeout = brcmf_msgbuf_ioctl_resp_wait(msgbuf); 508 if (!timeout) { 509 bphy_err(drvr, "Timeout on response for query command\n"); 510 return -EIO; 511 } 512 513 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, 514 msgbuf->rx_pktids, 515 msgbuf->ioctl_resp_pktid); 516 if (msgbuf->ioctl_resp_ret_len != 0) { 517 if (!skb) 518 return -EBADF; 519 520 memcpy(buf, skb->data, (len < msgbuf->ioctl_resp_ret_len) ? 521 len : msgbuf->ioctl_resp_ret_len); 522 } 523 brcmu_pkt_buf_free_skb(skb); 524 525 *fwerr = msgbuf->ioctl_resp_status; 526 return 0; 527 } 528 529 530 static int brcmf_msgbuf_set_dcmd(struct brcmf_pub *drvr, int ifidx, 531 uint cmd, void *buf, uint len, int *fwerr) 532 { 533 return brcmf_msgbuf_query_dcmd(drvr, ifidx, cmd, buf, len, fwerr); 534 } 535 536 537 static int brcmf_msgbuf_hdrpull(struct brcmf_pub *drvr, bool do_fws, 538 struct sk_buff *skb, struct brcmf_if **ifp) 539 { 540 return -ENODEV; 541 } 542 543 static void brcmf_msgbuf_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb) 544 { 545 } 546 547 static void 548 brcmf_msgbuf_remove_flowring(struct brcmf_msgbuf *msgbuf, u16 flowid) 549 { 550 u32 dma_sz; 551 void *dma_buf; 552 553 brcmf_dbg(MSGBUF, "Removing flowring %d\n", flowid); 554 555 dma_sz = BRCMF_H2D_TXFLOWRING_MAX_ITEM * BRCMF_H2D_TXFLOWRING_ITEMSIZE; 556 dma_buf = msgbuf->flowrings[flowid]->buf_addr; 557 dma_free_coherent(msgbuf->drvr->bus_if->dev, dma_sz, dma_buf, 558 msgbuf->flowring_dma_handle[flowid]); 559 560 brcmf_flowring_delete(msgbuf->flow, flowid); 561 } 562 563 564 static struct brcmf_msgbuf_work_item * 565 brcmf_msgbuf_dequeue_work(struct brcmf_msgbuf *msgbuf) 566 { 567 struct brcmf_msgbuf_work_item *work = NULL; 568 ulong flags; 569 570 spin_lock_irqsave(&msgbuf->flowring_work_lock, flags); 571 if (!list_empty(&msgbuf->work_queue)) { 572 work = list_first_entry(&msgbuf->work_queue, 573 struct brcmf_msgbuf_work_item, queue); 574 list_del(&work->queue); 575 } 576 spin_unlock_irqrestore(&msgbuf->flowring_work_lock, flags); 577 578 return work; 579 } 580 581 582 static u32 583 brcmf_msgbuf_flowring_create_worker(struct brcmf_msgbuf *msgbuf, 584 struct brcmf_msgbuf_work_item *work) 585 { 586 struct brcmf_pub *drvr = msgbuf->drvr; 587 struct msgbuf_tx_flowring_create_req *create; 588 struct brcmf_commonring *commonring; 589 void *ret_ptr; 590 u32 flowid; 591 void *dma_buf; 592 u32 dma_sz; 593 u64 address; 594 int err; 595 596 flowid = work->flowid; 597 dma_sz = BRCMF_H2D_TXFLOWRING_MAX_ITEM * BRCMF_H2D_TXFLOWRING_ITEMSIZE; 598 dma_buf = dma_alloc_coherent(msgbuf->drvr->bus_if->dev, dma_sz, 599 &msgbuf->flowring_dma_handle[flowid], 600 GFP_KERNEL); 601 if (!dma_buf) { 602 bphy_err(drvr, "dma_alloc_coherent failed\n"); 603 brcmf_flowring_delete(msgbuf->flow, flowid); 604 return BRCMF_FLOWRING_INVALID_ID; 605 } 606 607 brcmf_commonring_config(msgbuf->flowrings[flowid], 608 BRCMF_H2D_TXFLOWRING_MAX_ITEM, 609 BRCMF_H2D_TXFLOWRING_ITEMSIZE, dma_buf); 610 611 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT]; 612 brcmf_commonring_lock(commonring); 613 ret_ptr = brcmf_commonring_reserve_for_write(commonring); 614 if (!ret_ptr) { 615 bphy_err(drvr, "Failed to reserve space in commonring\n"); 616 brcmf_commonring_unlock(commonring); 617 brcmf_msgbuf_remove_flowring(msgbuf, flowid); 618 return BRCMF_FLOWRING_INVALID_ID; 619 } 620 621 create = (struct msgbuf_tx_flowring_create_req *)ret_ptr; 622 create->msg.msgtype = MSGBUF_TYPE_FLOW_RING_CREATE; 623 create->msg.ifidx = work->ifidx; 624 create->msg.request_id = 0; 625 create->tid = brcmf_flowring_tid(msgbuf->flow, flowid); 626 create->flow_ring_id = cpu_to_le16(flowid + 627 BRCMF_H2D_MSGRING_FLOWRING_IDSTART); 628 memcpy(create->sa, work->sa, ETH_ALEN); 629 memcpy(create->da, work->da, ETH_ALEN); 630 address = (u64)msgbuf->flowring_dma_handle[flowid]; 631 create->flow_ring_addr.high_addr = cpu_to_le32(address >> 32); 632 create->flow_ring_addr.low_addr = cpu_to_le32(address & 0xffffffff); 633 create->max_items = cpu_to_le16(BRCMF_H2D_TXFLOWRING_MAX_ITEM); 634 create->len_item = cpu_to_le16(BRCMF_H2D_TXFLOWRING_ITEMSIZE); 635 636 brcmf_dbg(MSGBUF, "Send Flow Create Req flow ID %d for peer %pM prio %d ifindex %d\n", 637 flowid, work->da, create->tid, work->ifidx); 638 639 err = brcmf_commonring_write_complete(commonring); 640 brcmf_commonring_unlock(commonring); 641 if (err) { 642 bphy_err(drvr, "Failed to write commonring\n"); 643 brcmf_msgbuf_remove_flowring(msgbuf, flowid); 644 return BRCMF_FLOWRING_INVALID_ID; 645 } 646 647 return flowid; 648 } 649 650 651 static void brcmf_msgbuf_flowring_worker(struct work_struct *work) 652 { 653 struct brcmf_msgbuf *msgbuf; 654 struct brcmf_msgbuf_work_item *create; 655 656 msgbuf = container_of(work, struct brcmf_msgbuf, flowring_work); 657 658 while ((create = brcmf_msgbuf_dequeue_work(msgbuf))) { 659 brcmf_msgbuf_flowring_create_worker(msgbuf, create); 660 kfree(create); 661 } 662 } 663 664 665 static u32 brcmf_msgbuf_flowring_create(struct brcmf_msgbuf *msgbuf, int ifidx, 666 struct sk_buff *skb) 667 { 668 struct brcmf_msgbuf_work_item *create; 669 struct ethhdr *eh = (struct ethhdr *)(skb->data); 670 u32 flowid; 671 ulong flags; 672 673 create = kzalloc(sizeof(*create), GFP_ATOMIC); 674 if (create == NULL) 675 return BRCMF_FLOWRING_INVALID_ID; 676 677 flowid = brcmf_flowring_create(msgbuf->flow, eh->h_dest, 678 skb->priority, ifidx); 679 if (flowid == BRCMF_FLOWRING_INVALID_ID) { 680 kfree(create); 681 return flowid; 682 } 683 684 create->flowid = flowid; 685 create->ifidx = ifidx; 686 memcpy(create->sa, eh->h_source, ETH_ALEN); 687 memcpy(create->da, eh->h_dest, ETH_ALEN); 688 689 spin_lock_irqsave(&msgbuf->flowring_work_lock, flags); 690 list_add_tail(&create->queue, &msgbuf->work_queue); 691 spin_unlock_irqrestore(&msgbuf->flowring_work_lock, flags); 692 schedule_work(&msgbuf->flowring_work); 693 694 return flowid; 695 } 696 697 698 static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u16 flowid) 699 { 700 struct brcmf_flowring *flow = msgbuf->flow; 701 struct brcmf_pub *drvr = msgbuf->drvr; 702 struct brcmf_commonring *commonring; 703 void *ret_ptr; 704 u32 count; 705 struct sk_buff *skb; 706 dma_addr_t physaddr; 707 u32 pktid; 708 struct msgbuf_tx_msghdr *tx_msghdr; 709 u64 address; 710 711 commonring = msgbuf->flowrings[flowid]; 712 if (!brcmf_commonring_write_available(commonring)) 713 return; 714 715 brcmf_commonring_lock(commonring); 716 717 count = BRCMF_MSGBUF_TX_FLUSH_CNT2 - BRCMF_MSGBUF_TX_FLUSH_CNT1; 718 while (brcmf_flowring_qlen(flow, flowid)) { 719 skb = brcmf_flowring_dequeue(flow, flowid); 720 if (skb == NULL) { 721 bphy_err(drvr, "No SKB, but qlen %d\n", 722 brcmf_flowring_qlen(flow, flowid)); 723 break; 724 } 725 skb_orphan(skb); 726 if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev, 727 msgbuf->tx_pktids, skb, ETH_HLEN, 728 &physaddr, &pktid)) { 729 brcmf_flowring_reinsert(flow, flowid, skb); 730 bphy_err(drvr, "No PKTID available !!\n"); 731 break; 732 } 733 ret_ptr = brcmf_commonring_reserve_for_write(commonring); 734 if (!ret_ptr) { 735 brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, 736 msgbuf->tx_pktids, pktid); 737 brcmf_flowring_reinsert(flow, flowid, skb); 738 break; 739 } 740 count++; 741 742 tx_msghdr = (struct msgbuf_tx_msghdr *)ret_ptr; 743 744 tx_msghdr->msg.msgtype = MSGBUF_TYPE_TX_POST; 745 tx_msghdr->msg.request_id = cpu_to_le32(pktid + 1); 746 tx_msghdr->msg.ifidx = brcmf_flowring_ifidx_get(flow, flowid); 747 tx_msghdr->flags = BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3; 748 tx_msghdr->flags |= (skb->priority & 0x07) << 749 BRCMF_MSGBUF_PKT_FLAGS_PRIO_SHIFT; 750 tx_msghdr->seg_cnt = 1; 751 memcpy(tx_msghdr->txhdr, skb->data, ETH_HLEN); 752 tx_msghdr->data_len = cpu_to_le16(skb->len - ETH_HLEN); 753 address = (u64)physaddr; 754 tx_msghdr->data_buf_addr.high_addr = cpu_to_le32(address >> 32); 755 tx_msghdr->data_buf_addr.low_addr = 756 cpu_to_le32(address & 0xffffffff); 757 tx_msghdr->metadata_buf_len = 0; 758 tx_msghdr->metadata_buf_addr.high_addr = 0; 759 tx_msghdr->metadata_buf_addr.low_addr = 0; 760 atomic_inc(&commonring->outstanding_tx); 761 if (count >= BRCMF_MSGBUF_TX_FLUSH_CNT2) { 762 brcmf_commonring_write_complete(commonring); 763 count = 0; 764 } 765 } 766 if (count) 767 brcmf_commonring_write_complete(commonring); 768 brcmf_commonring_unlock(commonring); 769 } 770 771 772 static void brcmf_msgbuf_txflow_worker(struct work_struct *worker) 773 { 774 struct brcmf_msgbuf *msgbuf; 775 u32 flowid; 776 777 msgbuf = container_of(worker, struct brcmf_msgbuf, txflow_work); 778 for_each_set_bit(flowid, msgbuf->flow_map, msgbuf->max_flowrings) { 779 clear_bit(flowid, msgbuf->flow_map); 780 brcmf_msgbuf_txflow(msgbuf, flowid); 781 } 782 } 783 784 785 static int brcmf_msgbuf_schedule_txdata(struct brcmf_msgbuf *msgbuf, u32 flowid, 786 bool force) 787 { 788 struct brcmf_commonring *commonring; 789 790 set_bit(flowid, msgbuf->flow_map); 791 commonring = msgbuf->flowrings[flowid]; 792 if ((force) || (atomic_read(&commonring->outstanding_tx) < 793 BRCMF_MSGBUF_DELAY_TXWORKER_THRS)) 794 queue_work(msgbuf->txflow_wq, &msgbuf->txflow_work); 795 796 return 0; 797 } 798 799 800 static int brcmf_msgbuf_tx_queue_data(struct brcmf_pub *drvr, int ifidx, 801 struct sk_buff *skb) 802 { 803 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 804 struct brcmf_flowring *flow = msgbuf->flow; 805 struct ethhdr *eh = (struct ethhdr *)(skb->data); 806 u32 flowid; 807 u32 queue_count; 808 bool force; 809 810 flowid = brcmf_flowring_lookup(flow, eh->h_dest, skb->priority, ifidx); 811 if (flowid == BRCMF_FLOWRING_INVALID_ID) { 812 flowid = brcmf_msgbuf_flowring_create(msgbuf, ifidx, skb); 813 if (flowid == BRCMF_FLOWRING_INVALID_ID) { 814 return -ENOMEM; 815 } else { 816 brcmf_flowring_enqueue(flow, flowid, skb); 817 return 0; 818 } 819 } 820 queue_count = brcmf_flowring_enqueue(flow, flowid, skb); 821 force = ((queue_count % BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) == 0); 822 brcmf_msgbuf_schedule_txdata(msgbuf, flowid, force); 823 824 return 0; 825 } 826 827 828 static void 829 brcmf_msgbuf_configure_addr_mode(struct brcmf_pub *drvr, int ifidx, 830 enum proto_addr_mode addr_mode) 831 { 832 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 833 834 brcmf_flowring_configure_addr_mode(msgbuf->flow, ifidx, addr_mode); 835 } 836 837 838 static void 839 brcmf_msgbuf_delete_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN]) 840 { 841 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 842 843 brcmf_flowring_delete_peer(msgbuf->flow, ifidx, peer); 844 } 845 846 847 static void 848 brcmf_msgbuf_add_tdls_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN]) 849 { 850 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 851 852 brcmf_flowring_add_tdls_peer(msgbuf->flow, ifidx, peer); 853 } 854 855 856 static void 857 brcmf_msgbuf_process_ioctl_complete(struct brcmf_msgbuf *msgbuf, void *buf) 858 { 859 struct msgbuf_ioctl_resp_hdr *ioctl_resp; 860 861 ioctl_resp = (struct msgbuf_ioctl_resp_hdr *)buf; 862 863 msgbuf->ioctl_resp_status = 864 (s16)le16_to_cpu(ioctl_resp->compl_hdr.status); 865 msgbuf->ioctl_resp_ret_len = le16_to_cpu(ioctl_resp->resp_len); 866 msgbuf->ioctl_resp_pktid = le32_to_cpu(ioctl_resp->msg.request_id); 867 868 brcmf_msgbuf_ioctl_resp_wake(msgbuf); 869 870 if (msgbuf->cur_ioctlrespbuf) 871 msgbuf->cur_ioctlrespbuf--; 872 brcmf_msgbuf_rxbuf_ioctlresp_post(msgbuf); 873 } 874 875 876 static void 877 brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf) 878 { 879 struct brcmf_commonring *commonring; 880 struct msgbuf_tx_status *tx_status; 881 u32 idx; 882 struct sk_buff *skb; 883 u16 flowid; 884 885 tx_status = (struct msgbuf_tx_status *)buf; 886 idx = le32_to_cpu(tx_status->msg.request_id) - 1; 887 flowid = le16_to_cpu(tx_status->compl_hdr.flow_ring_id); 888 flowid -= BRCMF_H2D_MSGRING_FLOWRING_IDSTART; 889 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, 890 msgbuf->tx_pktids, idx); 891 if (!skb) 892 return; 893 894 set_bit(flowid, msgbuf->txstatus_done_map); 895 commonring = msgbuf->flowrings[flowid]; 896 atomic_dec(&commonring->outstanding_tx); 897 898 brcmf_txfinalize(brcmf_get_ifp(msgbuf->drvr, tx_status->msg.ifidx), 899 skb, true); 900 } 901 902 903 static u32 brcmf_msgbuf_rxbuf_data_post(struct brcmf_msgbuf *msgbuf, u32 count) 904 { 905 struct brcmf_pub *drvr = msgbuf->drvr; 906 struct brcmf_commonring *commonring; 907 void *ret_ptr; 908 struct sk_buff *skb; 909 u16 alloced; 910 u32 pktlen; 911 dma_addr_t physaddr; 912 struct msgbuf_rx_bufpost *rx_bufpost; 913 u64 address; 914 u32 pktid; 915 u32 i; 916 917 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_RXPOST_SUBMIT]; 918 ret_ptr = brcmf_commonring_reserve_for_write_multiple(commonring, 919 count, 920 &alloced); 921 if (!ret_ptr) { 922 brcmf_dbg(MSGBUF, "Failed to reserve space in commonring\n"); 923 return 0; 924 } 925 926 for (i = 0; i < alloced; i++) { 927 rx_bufpost = (struct msgbuf_rx_bufpost *)ret_ptr; 928 memset(rx_bufpost, 0, sizeof(*rx_bufpost)); 929 930 skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_PKT_SIZE); 931 932 if (skb == NULL) { 933 bphy_err(drvr, "Failed to alloc SKB\n"); 934 brcmf_commonring_write_cancel(commonring, alloced - i); 935 break; 936 } 937 938 pktlen = skb->len; 939 if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev, 940 msgbuf->rx_pktids, skb, 0, 941 &physaddr, &pktid)) { 942 dev_kfree_skb_any(skb); 943 bphy_err(drvr, "No PKTID available !!\n"); 944 brcmf_commonring_write_cancel(commonring, alloced - i); 945 break; 946 } 947 948 if (msgbuf->rx_metadata_offset) { 949 address = (u64)physaddr; 950 rx_bufpost->metadata_buf_len = 951 cpu_to_le16(msgbuf->rx_metadata_offset); 952 rx_bufpost->metadata_buf_addr.high_addr = 953 cpu_to_le32(address >> 32); 954 rx_bufpost->metadata_buf_addr.low_addr = 955 cpu_to_le32(address & 0xffffffff); 956 957 skb_pull(skb, msgbuf->rx_metadata_offset); 958 pktlen = skb->len; 959 physaddr += msgbuf->rx_metadata_offset; 960 } 961 rx_bufpost->msg.msgtype = MSGBUF_TYPE_RXBUF_POST; 962 rx_bufpost->msg.request_id = cpu_to_le32(pktid); 963 964 address = (u64)physaddr; 965 rx_bufpost->data_buf_len = cpu_to_le16((u16)pktlen); 966 rx_bufpost->data_buf_addr.high_addr = 967 cpu_to_le32(address >> 32); 968 rx_bufpost->data_buf_addr.low_addr = 969 cpu_to_le32(address & 0xffffffff); 970 971 ret_ptr += brcmf_commonring_len_item(commonring); 972 } 973 974 if (i) 975 brcmf_commonring_write_complete(commonring); 976 977 return i; 978 } 979 980 981 static void 982 brcmf_msgbuf_rxbuf_data_fill(struct brcmf_msgbuf *msgbuf) 983 { 984 u32 fillbufs; 985 u32 retcount; 986 987 fillbufs = msgbuf->max_rxbufpost - msgbuf->rxbufpost; 988 989 while (fillbufs) { 990 retcount = brcmf_msgbuf_rxbuf_data_post(msgbuf, fillbufs); 991 if (!retcount) 992 break; 993 msgbuf->rxbufpost += retcount; 994 fillbufs -= retcount; 995 } 996 } 997 998 999 static void 1000 brcmf_msgbuf_update_rxbufpost_count(struct brcmf_msgbuf *msgbuf, u16 rxcnt) 1001 { 1002 msgbuf->rxbufpost -= rxcnt; 1003 if (msgbuf->rxbufpost <= (msgbuf->max_rxbufpost - 1004 BRCMF_MSGBUF_RXBUFPOST_THRESHOLD)) 1005 brcmf_msgbuf_rxbuf_data_fill(msgbuf); 1006 } 1007 1008 1009 static u32 1010 brcmf_msgbuf_rxbuf_ctrl_post(struct brcmf_msgbuf *msgbuf, bool event_buf, 1011 u32 count) 1012 { 1013 struct brcmf_pub *drvr = msgbuf->drvr; 1014 struct brcmf_commonring *commonring; 1015 void *ret_ptr; 1016 struct sk_buff *skb; 1017 u16 alloced; 1018 u32 pktlen; 1019 dma_addr_t physaddr; 1020 struct msgbuf_rx_ioctl_resp_or_event *rx_bufpost; 1021 u64 address; 1022 u32 pktid; 1023 u32 i; 1024 1025 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT]; 1026 brcmf_commonring_lock(commonring); 1027 ret_ptr = brcmf_commonring_reserve_for_write_multiple(commonring, 1028 count, 1029 &alloced); 1030 if (!ret_ptr) { 1031 bphy_err(drvr, "Failed to reserve space in commonring\n"); 1032 brcmf_commonring_unlock(commonring); 1033 return 0; 1034 } 1035 1036 for (i = 0; i < alloced; i++) { 1037 rx_bufpost = (struct msgbuf_rx_ioctl_resp_or_event *)ret_ptr; 1038 memset(rx_bufpost, 0, sizeof(*rx_bufpost)); 1039 1040 skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_CTL_PKT_SIZE); 1041 1042 if (skb == NULL) { 1043 bphy_err(drvr, "Failed to alloc SKB\n"); 1044 brcmf_commonring_write_cancel(commonring, alloced - i); 1045 break; 1046 } 1047 1048 pktlen = skb->len; 1049 if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev, 1050 msgbuf->rx_pktids, skb, 0, 1051 &physaddr, &pktid)) { 1052 dev_kfree_skb_any(skb); 1053 bphy_err(drvr, "No PKTID available !!\n"); 1054 brcmf_commonring_write_cancel(commonring, alloced - i); 1055 break; 1056 } 1057 if (event_buf) 1058 rx_bufpost->msg.msgtype = MSGBUF_TYPE_EVENT_BUF_POST; 1059 else 1060 rx_bufpost->msg.msgtype = 1061 MSGBUF_TYPE_IOCTLRESP_BUF_POST; 1062 rx_bufpost->msg.request_id = cpu_to_le32(pktid); 1063 1064 address = (u64)physaddr; 1065 rx_bufpost->host_buf_len = cpu_to_le16((u16)pktlen); 1066 rx_bufpost->host_buf_addr.high_addr = 1067 cpu_to_le32(address >> 32); 1068 rx_bufpost->host_buf_addr.low_addr = 1069 cpu_to_le32(address & 0xffffffff); 1070 1071 ret_ptr += brcmf_commonring_len_item(commonring); 1072 } 1073 1074 if (i) 1075 brcmf_commonring_write_complete(commonring); 1076 1077 brcmf_commonring_unlock(commonring); 1078 1079 return i; 1080 } 1081 1082 1083 static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf *msgbuf) 1084 { 1085 u32 count; 1086 1087 count = msgbuf->max_ioctlrespbuf - msgbuf->cur_ioctlrespbuf; 1088 count = brcmf_msgbuf_rxbuf_ctrl_post(msgbuf, false, count); 1089 msgbuf->cur_ioctlrespbuf += count; 1090 } 1091 1092 1093 static void brcmf_msgbuf_rxbuf_event_post(struct brcmf_msgbuf *msgbuf) 1094 { 1095 u32 count; 1096 1097 count = msgbuf->max_eventbuf - msgbuf->cur_eventbuf; 1098 count = brcmf_msgbuf_rxbuf_ctrl_post(msgbuf, true, count); 1099 msgbuf->cur_eventbuf += count; 1100 } 1101 1102 1103 static void brcmf_msgbuf_process_event(struct brcmf_msgbuf *msgbuf, void *buf) 1104 { 1105 struct brcmf_pub *drvr = msgbuf->drvr; 1106 struct msgbuf_rx_event *event; 1107 u32 idx; 1108 u16 buflen; 1109 struct sk_buff *skb; 1110 struct brcmf_if *ifp; 1111 1112 event = (struct msgbuf_rx_event *)buf; 1113 idx = le32_to_cpu(event->msg.request_id); 1114 buflen = le16_to_cpu(event->event_data_len); 1115 1116 if (msgbuf->cur_eventbuf) 1117 msgbuf->cur_eventbuf--; 1118 brcmf_msgbuf_rxbuf_event_post(msgbuf); 1119 1120 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, 1121 msgbuf->rx_pktids, idx); 1122 if (!skb) 1123 return; 1124 1125 if (msgbuf->rx_dataoffset) 1126 skb_pull(skb, msgbuf->rx_dataoffset); 1127 1128 skb_trim(skb, buflen); 1129 1130 ifp = brcmf_get_ifp(msgbuf->drvr, event->msg.ifidx); 1131 if (!ifp || !ifp->ndev) { 1132 bphy_err(drvr, "Received pkt for invalid ifidx %d\n", 1133 event->msg.ifidx); 1134 goto exit; 1135 } 1136 1137 skb->protocol = eth_type_trans(skb, ifp->ndev); 1138 1139 brcmf_fweh_process_skb(ifp->drvr, skb, 0, GFP_KERNEL); 1140 1141 exit: 1142 brcmu_pkt_buf_free_skb(skb); 1143 } 1144 1145 1146 static void 1147 brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf) 1148 { 1149 struct brcmf_pub *drvr = msgbuf->drvr; 1150 struct msgbuf_rx_complete *rx_complete; 1151 struct sk_buff *skb; 1152 u16 data_offset; 1153 u16 buflen; 1154 u16 flags; 1155 u32 idx; 1156 struct brcmf_if *ifp; 1157 1158 brcmf_msgbuf_update_rxbufpost_count(msgbuf, 1); 1159 1160 rx_complete = (struct msgbuf_rx_complete *)buf; 1161 data_offset = le16_to_cpu(rx_complete->data_offset); 1162 buflen = le16_to_cpu(rx_complete->data_len); 1163 idx = le32_to_cpu(rx_complete->msg.request_id); 1164 flags = le16_to_cpu(rx_complete->flags); 1165 1166 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, 1167 msgbuf->rx_pktids, idx); 1168 if (!skb) 1169 return; 1170 1171 if (data_offset) 1172 skb_pull(skb, data_offset); 1173 else if (msgbuf->rx_dataoffset) 1174 skb_pull(skb, msgbuf->rx_dataoffset); 1175 1176 skb_trim(skb, buflen); 1177 1178 if ((flags & BRCMF_MSGBUF_PKT_FLAGS_FRAME_MASK) == 1179 BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_11) { 1180 ifp = msgbuf->drvr->mon_if; 1181 1182 if (!ifp) { 1183 bphy_err(drvr, "Received unexpected monitor pkt\n"); 1184 brcmu_pkt_buf_free_skb(skb); 1185 return; 1186 } 1187 1188 brcmf_netif_mon_rx(ifp, skb); 1189 return; 1190 } 1191 1192 ifp = brcmf_get_ifp(msgbuf->drvr, rx_complete->msg.ifidx); 1193 if (!ifp || !ifp->ndev) { 1194 bphy_err(drvr, "Received pkt for invalid ifidx %d\n", 1195 rx_complete->msg.ifidx); 1196 brcmu_pkt_buf_free_skb(skb); 1197 return; 1198 } 1199 1200 skb->protocol = eth_type_trans(skb, ifp->ndev); 1201 brcmf_netif_rx(ifp, skb); 1202 } 1203 1204 static void brcmf_msgbuf_process_gen_status(struct brcmf_msgbuf *msgbuf, 1205 void *buf) 1206 { 1207 struct msgbuf_gen_status *gen_status = buf; 1208 struct brcmf_pub *drvr = msgbuf->drvr; 1209 int err; 1210 1211 err = le16_to_cpu(gen_status->compl_hdr.status); 1212 if (err) 1213 bphy_err(drvr, "Firmware reported general error: %d\n", err); 1214 } 1215 1216 static void brcmf_msgbuf_process_ring_status(struct brcmf_msgbuf *msgbuf, 1217 void *buf) 1218 { 1219 struct msgbuf_ring_status *ring_status = buf; 1220 struct brcmf_pub *drvr = msgbuf->drvr; 1221 int err; 1222 1223 err = le16_to_cpu(ring_status->compl_hdr.status); 1224 if (err) { 1225 int ring = le16_to_cpu(ring_status->compl_hdr.flow_ring_id); 1226 1227 bphy_err(drvr, "Firmware reported ring %d error: %d\n", ring, 1228 err); 1229 } 1230 } 1231 1232 static void 1233 brcmf_msgbuf_process_flow_ring_create_response(struct brcmf_msgbuf *msgbuf, 1234 void *buf) 1235 { 1236 struct brcmf_pub *drvr = msgbuf->drvr; 1237 struct msgbuf_flowring_create_resp *flowring_create_resp; 1238 u16 status; 1239 u16 flowid; 1240 1241 flowring_create_resp = (struct msgbuf_flowring_create_resp *)buf; 1242 1243 flowid = le16_to_cpu(flowring_create_resp->compl_hdr.flow_ring_id); 1244 flowid -= BRCMF_H2D_MSGRING_FLOWRING_IDSTART; 1245 status = le16_to_cpu(flowring_create_resp->compl_hdr.status); 1246 1247 if (status) { 1248 bphy_err(drvr, "Flowring creation failed, code %d\n", status); 1249 brcmf_msgbuf_remove_flowring(msgbuf, flowid); 1250 return; 1251 } 1252 brcmf_dbg(MSGBUF, "Flowring %d Create response status %d\n", flowid, 1253 status); 1254 1255 brcmf_flowring_open(msgbuf->flow, flowid); 1256 1257 brcmf_msgbuf_schedule_txdata(msgbuf, flowid, true); 1258 } 1259 1260 1261 static void 1262 brcmf_msgbuf_process_flow_ring_delete_response(struct brcmf_msgbuf *msgbuf, 1263 void *buf) 1264 { 1265 struct brcmf_pub *drvr = msgbuf->drvr; 1266 struct msgbuf_flowring_delete_resp *flowring_delete_resp; 1267 u16 status; 1268 u16 flowid; 1269 1270 flowring_delete_resp = (struct msgbuf_flowring_delete_resp *)buf; 1271 1272 flowid = le16_to_cpu(flowring_delete_resp->compl_hdr.flow_ring_id); 1273 flowid -= BRCMF_H2D_MSGRING_FLOWRING_IDSTART; 1274 status = le16_to_cpu(flowring_delete_resp->compl_hdr.status); 1275 1276 if (status) { 1277 bphy_err(drvr, "Flowring deletion failed, code %d\n", status); 1278 brcmf_flowring_delete(msgbuf->flow, flowid); 1279 return; 1280 } 1281 brcmf_dbg(MSGBUF, "Flowring %d Delete response status %d\n", flowid, 1282 status); 1283 1284 brcmf_msgbuf_remove_flowring(msgbuf, flowid); 1285 } 1286 1287 1288 static void brcmf_msgbuf_process_msgtype(struct brcmf_msgbuf *msgbuf, void *buf) 1289 { 1290 struct brcmf_pub *drvr = msgbuf->drvr; 1291 struct msgbuf_common_hdr *msg; 1292 1293 msg = (struct msgbuf_common_hdr *)buf; 1294 switch (msg->msgtype) { 1295 case MSGBUF_TYPE_GEN_STATUS: 1296 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_GEN_STATUS\n"); 1297 brcmf_msgbuf_process_gen_status(msgbuf, buf); 1298 break; 1299 case MSGBUF_TYPE_RING_STATUS: 1300 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_RING_STATUS\n"); 1301 brcmf_msgbuf_process_ring_status(msgbuf, buf); 1302 break; 1303 case MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT: 1304 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT\n"); 1305 brcmf_msgbuf_process_flow_ring_create_response(msgbuf, buf); 1306 break; 1307 case MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT: 1308 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT\n"); 1309 brcmf_msgbuf_process_flow_ring_delete_response(msgbuf, buf); 1310 break; 1311 case MSGBUF_TYPE_IOCTLPTR_REQ_ACK: 1312 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_IOCTLPTR_REQ_ACK\n"); 1313 break; 1314 case MSGBUF_TYPE_IOCTL_CMPLT: 1315 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_IOCTL_CMPLT\n"); 1316 brcmf_msgbuf_process_ioctl_complete(msgbuf, buf); 1317 break; 1318 case MSGBUF_TYPE_WL_EVENT: 1319 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_WL_EVENT\n"); 1320 brcmf_msgbuf_process_event(msgbuf, buf); 1321 break; 1322 case MSGBUF_TYPE_TX_STATUS: 1323 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_TX_STATUS\n"); 1324 brcmf_msgbuf_process_txstatus(msgbuf, buf); 1325 break; 1326 case MSGBUF_TYPE_RX_CMPLT: 1327 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_RX_CMPLT\n"); 1328 brcmf_msgbuf_process_rx_complete(msgbuf, buf); 1329 break; 1330 default: 1331 bphy_err(drvr, "Unsupported msgtype %d\n", msg->msgtype); 1332 break; 1333 } 1334 } 1335 1336 1337 static void brcmf_msgbuf_process_rx(struct brcmf_msgbuf *msgbuf, 1338 struct brcmf_commonring *commonring) 1339 { 1340 void *buf; 1341 u16 count; 1342 u16 processed; 1343 1344 again: 1345 buf = brcmf_commonring_get_read_ptr(commonring, &count); 1346 if (buf == NULL) 1347 return; 1348 1349 processed = 0; 1350 while (count) { 1351 brcmf_msgbuf_process_msgtype(msgbuf, 1352 buf + msgbuf->rx_dataoffset); 1353 buf += brcmf_commonring_len_item(commonring); 1354 processed++; 1355 if (processed == BRCMF_MSGBUF_UPDATE_RX_PTR_THRS) { 1356 brcmf_commonring_read_complete(commonring, processed); 1357 processed = 0; 1358 } 1359 count--; 1360 } 1361 if (processed) 1362 brcmf_commonring_read_complete(commonring, processed); 1363 1364 if (commonring->r_ptr == 0) 1365 goto again; 1366 } 1367 1368 1369 int brcmf_proto_msgbuf_rx_trigger(struct device *dev) 1370 { 1371 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 1372 struct brcmf_pub *drvr = bus_if->drvr; 1373 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 1374 struct brcmf_commonring *commonring; 1375 void *buf; 1376 u32 flowid; 1377 int qlen; 1378 1379 buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_RX_COMPLETE]; 1380 brcmf_msgbuf_process_rx(msgbuf, buf); 1381 buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_TX_COMPLETE]; 1382 brcmf_msgbuf_process_rx(msgbuf, buf); 1383 buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_CONTROL_COMPLETE]; 1384 brcmf_msgbuf_process_rx(msgbuf, buf); 1385 1386 for_each_set_bit(flowid, msgbuf->txstatus_done_map, 1387 msgbuf->max_flowrings) { 1388 clear_bit(flowid, msgbuf->txstatus_done_map); 1389 commonring = msgbuf->flowrings[flowid]; 1390 qlen = brcmf_flowring_qlen(msgbuf->flow, flowid); 1391 if ((qlen > BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) || 1392 ((qlen) && (atomic_read(&commonring->outstanding_tx) < 1393 BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS))) 1394 brcmf_msgbuf_schedule_txdata(msgbuf, flowid, true); 1395 } 1396 1397 return 0; 1398 } 1399 1400 1401 void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid) 1402 { 1403 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 1404 struct msgbuf_tx_flowring_delete_req *delete; 1405 struct brcmf_commonring *commonring; 1406 struct brcmf_commonring *commonring_del = msgbuf->flowrings[flowid]; 1407 struct brcmf_flowring *flow = msgbuf->flow; 1408 void *ret_ptr; 1409 u8 ifidx; 1410 int err; 1411 int retry = BRCMF_MAX_TXSTATUS_WAIT_RETRIES; 1412 1413 /* make sure it is not in txflow */ 1414 brcmf_commonring_lock(commonring_del); 1415 flow->rings[flowid]->status = RING_CLOSING; 1416 brcmf_commonring_unlock(commonring_del); 1417 1418 /* wait for commonring txflow finished */ 1419 while (retry && atomic_read(&commonring_del->outstanding_tx)) { 1420 usleep_range(5000, 10000); 1421 retry--; 1422 } 1423 if (!retry) { 1424 brcmf_err("timed out waiting for txstatus\n"); 1425 atomic_set(&commonring_del->outstanding_tx, 0); 1426 } 1427 1428 /* no need to submit if firmware can not be reached */ 1429 if (drvr->bus_if->state != BRCMF_BUS_UP) { 1430 brcmf_dbg(MSGBUF, "bus down, flowring will be removed\n"); 1431 brcmf_msgbuf_remove_flowring(msgbuf, flowid); 1432 return; 1433 } 1434 1435 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT]; 1436 brcmf_commonring_lock(commonring); 1437 ret_ptr = brcmf_commonring_reserve_for_write(commonring); 1438 if (!ret_ptr) { 1439 bphy_err(drvr, "FW unaware, flowring will be removed !!\n"); 1440 brcmf_commonring_unlock(commonring); 1441 brcmf_msgbuf_remove_flowring(msgbuf, flowid); 1442 return; 1443 } 1444 1445 delete = (struct msgbuf_tx_flowring_delete_req *)ret_ptr; 1446 1447 ifidx = brcmf_flowring_ifidx_get(msgbuf->flow, flowid); 1448 1449 delete->msg.msgtype = MSGBUF_TYPE_FLOW_RING_DELETE; 1450 delete->msg.ifidx = ifidx; 1451 delete->msg.request_id = 0; 1452 1453 delete->flow_ring_id = cpu_to_le16(flowid + 1454 BRCMF_H2D_MSGRING_FLOWRING_IDSTART); 1455 delete->reason = 0; 1456 1457 brcmf_dbg(MSGBUF, "Send Flow Delete Req flow ID %d, ifindex %d\n", 1458 flowid, ifidx); 1459 1460 err = brcmf_commonring_write_complete(commonring); 1461 brcmf_commonring_unlock(commonring); 1462 if (err) { 1463 bphy_err(drvr, "Failed to submit RING_DELETE, flowring will be removed\n"); 1464 brcmf_msgbuf_remove_flowring(msgbuf, flowid); 1465 } 1466 } 1467 1468 #ifdef DEBUG 1469 static int brcmf_msgbuf_stats_read(struct seq_file *seq, void *data) 1470 { 1471 struct brcmf_bus *bus_if = dev_get_drvdata(seq->private); 1472 struct brcmf_pub *drvr = bus_if->drvr; 1473 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 1474 struct brcmf_commonring *commonring; 1475 u16 i; 1476 struct brcmf_flowring_ring *ring; 1477 struct brcmf_flowring_hash *hash; 1478 1479 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT]; 1480 seq_printf(seq, "h2d_ctl_submit: rp %4u, wp %4u, depth %4u\n", 1481 commonring->r_ptr, commonring->w_ptr, commonring->depth); 1482 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_RXPOST_SUBMIT]; 1483 seq_printf(seq, "h2d_rx_submit: rp %4u, wp %4u, depth %4u\n", 1484 commonring->r_ptr, commonring->w_ptr, commonring->depth); 1485 commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_CONTROL_COMPLETE]; 1486 seq_printf(seq, "d2h_ctl_cmplt: rp %4u, wp %4u, depth %4u\n", 1487 commonring->r_ptr, commonring->w_ptr, commonring->depth); 1488 commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_TX_COMPLETE]; 1489 seq_printf(seq, "d2h_tx_cmplt: rp %4u, wp %4u, depth %4u\n", 1490 commonring->r_ptr, commonring->w_ptr, commonring->depth); 1491 commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_RX_COMPLETE]; 1492 seq_printf(seq, "d2h_rx_cmplt: rp %4u, wp %4u, depth %4u\n", 1493 commonring->r_ptr, commonring->w_ptr, commonring->depth); 1494 1495 seq_printf(seq, "\nh2d_flowrings: depth %u\n", 1496 BRCMF_H2D_TXFLOWRING_MAX_ITEM); 1497 seq_puts(seq, "Active flowrings:\n"); 1498 for (i = 0; i < msgbuf->flow->nrofrings; i++) { 1499 if (!msgbuf->flow->rings[i]) 1500 continue; 1501 ring = msgbuf->flow->rings[i]; 1502 if (ring->status != RING_OPEN) 1503 continue; 1504 commonring = msgbuf->flowrings[i]; 1505 hash = &msgbuf->flow->hash[ring->hash_id]; 1506 seq_printf(seq, "id %3u: rp %4u, wp %4u, qlen %4u, blocked %u\n" 1507 " ifidx %u, fifo %u, da %pM\n", 1508 i, commonring->r_ptr, commonring->w_ptr, 1509 skb_queue_len(&ring->skblist), ring->blocked, 1510 hash->ifidx, hash->fifo, hash->mac); 1511 } 1512 1513 return 0; 1514 } 1515 #else 1516 static int brcmf_msgbuf_stats_read(struct seq_file *seq, void *data) 1517 { 1518 return 0; 1519 } 1520 #endif 1521 1522 static void brcmf_msgbuf_debugfs_create(struct brcmf_pub *drvr) 1523 { 1524 brcmf_debugfs_add_entry(drvr, "msgbuf_stats", brcmf_msgbuf_stats_read); 1525 } 1526 1527 int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr) 1528 { 1529 struct brcmf_bus_msgbuf *if_msgbuf; 1530 struct brcmf_msgbuf *msgbuf; 1531 u64 address; 1532 u32 count; 1533 1534 if_msgbuf = drvr->bus_if->msgbuf; 1535 1536 if (if_msgbuf->max_flowrings >= BRCMF_FLOWRING_HASHSIZE) { 1537 bphy_err(drvr, "driver not configured for this many flowrings %d\n", 1538 if_msgbuf->max_flowrings); 1539 if_msgbuf->max_flowrings = BRCMF_FLOWRING_HASHSIZE - 1; 1540 } 1541 1542 msgbuf = kzalloc(sizeof(*msgbuf), GFP_KERNEL); 1543 if (!msgbuf) 1544 goto fail; 1545 1546 msgbuf->txflow_wq = create_singlethread_workqueue("msgbuf_txflow"); 1547 if (msgbuf->txflow_wq == NULL) { 1548 bphy_err(drvr, "workqueue creation failed\n"); 1549 goto fail; 1550 } 1551 INIT_WORK(&msgbuf->txflow_work, brcmf_msgbuf_txflow_worker); 1552 count = BITS_TO_LONGS(if_msgbuf->max_flowrings); 1553 count = count * sizeof(unsigned long); 1554 msgbuf->flow_map = kzalloc(count, GFP_KERNEL); 1555 if (!msgbuf->flow_map) 1556 goto fail; 1557 1558 msgbuf->txstatus_done_map = kzalloc(count, GFP_KERNEL); 1559 if (!msgbuf->txstatus_done_map) 1560 goto fail; 1561 1562 msgbuf->drvr = drvr; 1563 msgbuf->ioctbuf = dma_alloc_coherent(drvr->bus_if->dev, 1564 BRCMF_TX_IOCTL_MAX_MSG_SIZE, 1565 &msgbuf->ioctbuf_handle, 1566 GFP_KERNEL); 1567 if (!msgbuf->ioctbuf) 1568 goto fail; 1569 address = (u64)msgbuf->ioctbuf_handle; 1570 msgbuf->ioctbuf_phys_hi = address >> 32; 1571 msgbuf->ioctbuf_phys_lo = address & 0xffffffff; 1572 1573 drvr->proto->hdrpull = brcmf_msgbuf_hdrpull; 1574 drvr->proto->query_dcmd = brcmf_msgbuf_query_dcmd; 1575 drvr->proto->set_dcmd = brcmf_msgbuf_set_dcmd; 1576 drvr->proto->tx_queue_data = brcmf_msgbuf_tx_queue_data; 1577 drvr->proto->configure_addr_mode = brcmf_msgbuf_configure_addr_mode; 1578 drvr->proto->delete_peer = brcmf_msgbuf_delete_peer; 1579 drvr->proto->add_tdls_peer = brcmf_msgbuf_add_tdls_peer; 1580 drvr->proto->rxreorder = brcmf_msgbuf_rxreorder; 1581 drvr->proto->debugfs_create = brcmf_msgbuf_debugfs_create; 1582 drvr->proto->pd = msgbuf; 1583 1584 init_waitqueue_head(&msgbuf->ioctl_resp_wait); 1585 1586 msgbuf->commonrings = 1587 (struct brcmf_commonring **)if_msgbuf->commonrings; 1588 msgbuf->flowrings = (struct brcmf_commonring **)if_msgbuf->flowrings; 1589 msgbuf->max_flowrings = if_msgbuf->max_flowrings; 1590 msgbuf->flowring_dma_handle = 1591 kcalloc(msgbuf->max_flowrings, 1592 sizeof(*msgbuf->flowring_dma_handle), GFP_KERNEL); 1593 if (!msgbuf->flowring_dma_handle) 1594 goto fail; 1595 1596 msgbuf->rx_dataoffset = if_msgbuf->rx_dataoffset; 1597 msgbuf->max_rxbufpost = if_msgbuf->max_rxbufpost; 1598 1599 msgbuf->max_ioctlrespbuf = BRCMF_MSGBUF_MAX_IOCTLRESPBUF_POST; 1600 msgbuf->max_eventbuf = BRCMF_MSGBUF_MAX_EVENTBUF_POST; 1601 1602 msgbuf->tx_pktids = brcmf_msgbuf_init_pktids(NR_TX_PKTIDS, 1603 DMA_TO_DEVICE); 1604 if (!msgbuf->tx_pktids) 1605 goto fail; 1606 msgbuf->rx_pktids = brcmf_msgbuf_init_pktids(NR_RX_PKTIDS, 1607 DMA_FROM_DEVICE); 1608 if (!msgbuf->rx_pktids) 1609 goto fail; 1610 1611 msgbuf->flow = brcmf_flowring_attach(drvr->bus_if->dev, 1612 if_msgbuf->max_flowrings); 1613 if (!msgbuf->flow) 1614 goto fail; 1615 1616 1617 brcmf_dbg(MSGBUF, "Feeding buffers, rx data %d, rx event %d, rx ioctl resp %d\n", 1618 msgbuf->max_rxbufpost, msgbuf->max_eventbuf, 1619 msgbuf->max_ioctlrespbuf); 1620 count = 0; 1621 do { 1622 brcmf_msgbuf_rxbuf_data_fill(msgbuf); 1623 if (msgbuf->max_rxbufpost != msgbuf->rxbufpost) 1624 msleep(10); 1625 else 1626 break; 1627 count++; 1628 } while (count < 10); 1629 brcmf_msgbuf_rxbuf_event_post(msgbuf); 1630 brcmf_msgbuf_rxbuf_ioctlresp_post(msgbuf); 1631 1632 INIT_WORK(&msgbuf->flowring_work, brcmf_msgbuf_flowring_worker); 1633 spin_lock_init(&msgbuf->flowring_work_lock); 1634 INIT_LIST_HEAD(&msgbuf->work_queue); 1635 1636 return 0; 1637 1638 fail: 1639 if (msgbuf) { 1640 kfree(msgbuf->flow_map); 1641 kfree(msgbuf->txstatus_done_map); 1642 brcmf_msgbuf_release_pktids(msgbuf); 1643 kfree(msgbuf->flowring_dma_handle); 1644 if (msgbuf->ioctbuf) 1645 dma_free_coherent(drvr->bus_if->dev, 1646 BRCMF_TX_IOCTL_MAX_MSG_SIZE, 1647 msgbuf->ioctbuf, 1648 msgbuf->ioctbuf_handle); 1649 if (msgbuf->txflow_wq) 1650 destroy_workqueue(msgbuf->txflow_wq); 1651 kfree(msgbuf); 1652 } 1653 return -ENOMEM; 1654 } 1655 1656 1657 void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr) 1658 { 1659 struct brcmf_msgbuf *msgbuf; 1660 struct brcmf_msgbuf_work_item *work; 1661 1662 brcmf_dbg(TRACE, "Enter\n"); 1663 if (drvr->proto->pd) { 1664 msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; 1665 cancel_work_sync(&msgbuf->flowring_work); 1666 while (!list_empty(&msgbuf->work_queue)) { 1667 work = list_first_entry(&msgbuf->work_queue, 1668 struct brcmf_msgbuf_work_item, 1669 queue); 1670 list_del(&work->queue); 1671 kfree(work); 1672 } 1673 kfree(msgbuf->flow_map); 1674 kfree(msgbuf->txstatus_done_map); 1675 if (msgbuf->txflow_wq) 1676 destroy_workqueue(msgbuf->txflow_wq); 1677 1678 brcmf_flowring_detach(msgbuf->flow); 1679 dma_free_coherent(drvr->bus_if->dev, 1680 BRCMF_TX_IOCTL_MAX_MSG_SIZE, 1681 msgbuf->ioctbuf, msgbuf->ioctbuf_handle); 1682 brcmf_msgbuf_release_pktids(msgbuf); 1683 kfree(msgbuf->flowring_dma_handle); 1684 kfree(msgbuf); 1685 drvr->proto->pd = NULL; 1686 } 1687 } 1688