1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (c) 2005-2011 Atheros Communications Inc. 4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. 5 * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. 6 * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. 7 */ 8 #include "core.h" 9 #include "debug.h" 10 #include "mac.h" 11 #include "hw.h" 12 #include "wmi.h" 13 #include "wmi-ops.h" 14 #include "wmi-tlv.h" 15 #include "p2p.h" 16 #include "testmode.h" 17 #include "txrx.h" 18 #include <linux/bitfield.h> 19 20 /***************/ 21 /* TLV helpers */ 22 /**************/ 23 24 struct wmi_tlv_policy { 25 size_t min_len; 26 }; 27 28 static const struct wmi_tlv_policy wmi_tlv_policies[] = { 29 [WMI_TLV_TAG_ARRAY_BYTE] 30 = { .min_len = 0 }, 31 [WMI_TLV_TAG_ARRAY_UINT32] 32 = { .min_len = 0 }, 33 [WMI_TLV_TAG_STRUCT_SCAN_EVENT] 34 = { .min_len = sizeof(struct wmi_scan_event) }, 35 [WMI_TLV_TAG_STRUCT_MGMT_RX_HDR] 36 = { .min_len = sizeof(struct wmi_tlv_mgmt_rx_ev) }, 37 [WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT] 38 = { .min_len = sizeof(struct wmi_chan_info_event) }, 39 [WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT] 40 = { .min_len = sizeof(struct wmi_vdev_start_response_event) }, 41 [WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT] 42 = { .min_len = sizeof(struct wmi_peer_sta_kickout_event) }, 43 [WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT] 44 = { .min_len = sizeof(struct wmi_host_swba_event) }, 45 [WMI_TLV_TAG_STRUCT_TIM_INFO] 46 = { .min_len = sizeof(struct wmi_tim_info) }, 47 [WMI_TLV_TAG_STRUCT_P2P_NOA_INFO] 48 = { .min_len = sizeof(struct wmi_p2p_noa_info) }, 49 [WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT] 50 = { .min_len = sizeof(struct wmi_tlv_svc_rdy_ev) }, 51 [WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES] 52 = { .min_len = sizeof(struct hal_reg_capabilities) }, 53 [WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ] 54 = { .min_len = sizeof(struct wlan_host_mem_req) }, 55 [WMI_TLV_TAG_STRUCT_READY_EVENT] 56 = { .min_len = sizeof(struct wmi_tlv_rdy_ev) }, 57 [WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT] 58 = { .min_len = sizeof(struct wmi_tlv_bcn_tx_status_ev) }, 59 [WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT] 60 = { .min_len = sizeof(struct wmi_tlv_diag_data_ev) }, 61 [WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT] 62 = { .min_len = sizeof(struct wmi_tlv_p2p_noa_ev) }, 63 [WMI_TLV_TAG_STRUCT_ROAM_EVENT] 64 = { .min_len = sizeof(struct wmi_tlv_roam_ev) }, 65 [WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO] 66 = { .min_len = sizeof(struct wmi_tlv_wow_event_info) }, 67 [WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT] 68 = { .min_len = sizeof(struct wmi_tlv_tx_pause_ev) }, 69 }; 70 71 static int 72 ath10k_wmi_tlv_iter(struct ath10k *ar, const void *ptr, size_t len, 73 int (*iter)(struct ath10k *ar, u16 tag, u16 len, 74 const void *ptr, void *data), 75 void *data) 76 { 77 const void *begin = ptr; 78 const struct wmi_tlv *tlv; 79 u16 tlv_tag, tlv_len; 80 int ret; 81 82 while (len > 0) { 83 if (len < sizeof(*tlv)) { 84 ath10k_dbg(ar, ATH10K_DBG_WMI, 85 "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n", 86 ptr - begin, len, sizeof(*tlv)); 87 return -EINVAL; 88 } 89 90 tlv = ptr; 91 tlv_tag = __le16_to_cpu(tlv->tag); 92 tlv_len = __le16_to_cpu(tlv->len); 93 ptr += sizeof(*tlv); 94 len -= sizeof(*tlv); 95 96 if (tlv_len > len) { 97 ath10k_dbg(ar, ATH10K_DBG_WMI, 98 "wmi tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n", 99 tlv_tag, ptr - begin, len, tlv_len); 100 return -EINVAL; 101 } 102 103 if (tlv_tag < ARRAY_SIZE(wmi_tlv_policies) && 104 wmi_tlv_policies[tlv_tag].min_len && 105 wmi_tlv_policies[tlv_tag].min_len > tlv_len) { 106 ath10k_dbg(ar, ATH10K_DBG_WMI, 107 "wmi tlv parse failure of tag %u at byte %zd (%u bytes is less than min length %zu)\n", 108 tlv_tag, ptr - begin, tlv_len, 109 wmi_tlv_policies[tlv_tag].min_len); 110 return -EINVAL; 111 } 112 113 ret = iter(ar, tlv_tag, tlv_len, ptr, data); 114 if (ret) 115 return ret; 116 117 ptr += tlv_len; 118 len -= tlv_len; 119 } 120 121 return 0; 122 } 123 124 static int ath10k_wmi_tlv_iter_parse(struct ath10k *ar, u16 tag, u16 len, 125 const void *ptr, void *data) 126 { 127 const void **tb = data; 128 129 if (tag < WMI_TLV_TAG_MAX) 130 tb[tag] = ptr; 131 132 return 0; 133 } 134 135 static int ath10k_wmi_tlv_parse(struct ath10k *ar, const void **tb, 136 const void *ptr, size_t len) 137 { 138 return ath10k_wmi_tlv_iter(ar, ptr, len, ath10k_wmi_tlv_iter_parse, 139 (void *)tb); 140 } 141 142 static const void ** 143 ath10k_wmi_tlv_parse_alloc(struct ath10k *ar, const void *ptr, 144 size_t len, gfp_t gfp) 145 { 146 const void **tb; 147 int ret; 148 149 tb = kzalloc_objs(*tb, WMI_TLV_TAG_MAX, gfp); 150 if (!tb) 151 return ERR_PTR(-ENOMEM); 152 153 ret = ath10k_wmi_tlv_parse(ar, tb, ptr, len); 154 if (ret) { 155 kfree(tb); 156 return ERR_PTR(ret); 157 } 158 159 return tb; 160 } 161 162 static u16 ath10k_wmi_tlv_len(const void *ptr) 163 { 164 return __le16_to_cpu((((const struct wmi_tlv *)ptr) - 1)->len); 165 } 166 167 /**************/ 168 /* TLV events */ 169 /**************/ 170 static int ath10k_wmi_tlv_event_bcn_tx_status(struct ath10k *ar, 171 struct sk_buff *skb) 172 { 173 const void **tb; 174 const struct wmi_tlv_bcn_tx_status_ev *ev; 175 struct ath10k_vif *arvif; 176 u32 vdev_id, tx_status; 177 int ret; 178 179 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); 180 if (IS_ERR(tb)) { 181 ret = PTR_ERR(tb); 182 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); 183 return ret; 184 } 185 186 ev = tb[WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT]; 187 if (!ev) { 188 kfree(tb); 189 return -EPROTO; 190 } 191 192 tx_status = __le32_to_cpu(ev->tx_status); 193 vdev_id = __le32_to_cpu(ev->vdev_id); 194 195 switch (tx_status) { 196 case WMI_TLV_BCN_TX_STATUS_OK: 197 break; 198 case WMI_TLV_BCN_TX_STATUS_XRETRY: 199 case WMI_TLV_BCN_TX_STATUS_DROP: 200 case WMI_TLV_BCN_TX_STATUS_FILTERED: 201 /* FIXME: It's probably worth telling mac80211 to stop the 202 * interface as it is crippled. 203 */ 204 ath10k_warn(ar, "received bcn tmpl tx status on vdev %i: %d", 205 vdev_id, tx_status); 206 break; 207 } 208 209 arvif = ath10k_get_arvif(ar, vdev_id); 210 if (arvif && arvif->is_up && arvif->vif->bss_conf.csa_active) 211 ieee80211_queue_work(ar->hw, &arvif->ap_csa_work); 212 213 kfree(tb); 214 return 0; 215 } 216 217 static void ath10k_wmi_tlv_event_vdev_delete_resp(struct ath10k *ar, 218 struct sk_buff *skb) 219 { 220 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_DELETE_RESP_EVENTID\n"); 221 complete(&ar->vdev_delete_done); 222 } 223 224 static int ath10k_wmi_tlv_parse_peer_stats_info(struct ath10k *ar, u16 tag, u16 len, 225 const void *ptr, void *data) 226 { 227 const struct wmi_tlv_peer_stats_info *stat = ptr; 228 u32 vdev_id = *(u32 *)data; 229 struct ath10k_sta *arsta; 230 struct ath10k_peer *peer; 231 232 if (tag != WMI_TLV_TAG_STRUCT_PEER_STATS_INFO) 233 return -EPROTO; 234 235 ath10k_dbg(ar, ATH10K_DBG_WMI, 236 "wmi tlv stats peer addr %pMF rx rate code 0x%x bit rate %d kbps\n", 237 stat->peer_macaddr.addr, 238 __le32_to_cpu(stat->last_rx_rate_code), 239 __le32_to_cpu(stat->last_rx_bitrate_kbps)); 240 241 ath10k_dbg(ar, ATH10K_DBG_WMI, 242 "wmi tlv stats tx rate code 0x%x bit rate %d kbps\n", 243 __le32_to_cpu(stat->last_tx_rate_code), 244 __le32_to_cpu(stat->last_tx_bitrate_kbps)); 245 246 guard(spinlock_bh)(&ar->data_lock); 247 248 peer = ath10k_peer_find(ar, vdev_id, stat->peer_macaddr.addr); 249 if (!peer || !peer->sta) { 250 ath10k_warn(ar, "not found %s with vdev id %u mac addr %pM for peer stats\n", 251 peer ? "sta" : "peer", vdev_id, stat->peer_macaddr.addr); 252 return -EINVAL; 253 } 254 255 arsta = (struct ath10k_sta *)peer->sta->drv_priv; 256 arsta->rx_rate_code = __le32_to_cpu(stat->last_rx_rate_code); 257 arsta->rx_bitrate_kbps = __le32_to_cpu(stat->last_rx_bitrate_kbps); 258 arsta->tx_rate_code = __le32_to_cpu(stat->last_tx_rate_code); 259 arsta->tx_bitrate_kbps = __le32_to_cpu(stat->last_tx_bitrate_kbps); 260 261 return 0; 262 } 263 264 static int ath10k_wmi_tlv_op_pull_peer_stats_info(struct ath10k *ar, 265 struct sk_buff *skb) 266 { 267 const void **tb; 268 const struct wmi_tlv_peer_stats_info_ev *ev; 269 const void *data; 270 u32 num_peer_stats; 271 u32 vdev_id; 272 int ret; 273 274 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); 275 if (IS_ERR(tb)) { 276 ret = PTR_ERR(tb); 277 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); 278 return ret; 279 } 280 281 ev = tb[WMI_TLV_TAG_STRUCT_PEER_STATS_INFO_EVENT]; 282 data = tb[WMI_TLV_TAG_ARRAY_STRUCT]; 283 284 if (!ev || !data) { 285 kfree(tb); 286 return -EPROTO; 287 } 288 289 num_peer_stats = __le32_to_cpu(ev->num_peers); 290 vdev_id = __le32_to_cpu(ev->vdev_id); 291 292 ath10k_dbg(ar, ATH10K_DBG_WMI, 293 "wmi tlv peer stats info update peer vdev id %d peers %i more data %d\n", 294 vdev_id, 295 num_peer_stats, 296 __le32_to_cpu(ev->more_data)); 297 298 ret = ath10k_wmi_tlv_iter(ar, data, ath10k_wmi_tlv_len(data), 299 ath10k_wmi_tlv_parse_peer_stats_info, &vdev_id); 300 if (ret) 301 ath10k_warn(ar, "failed to parse stats info tlv: %d\n", ret); 302 303 kfree(tb); 304 return 0; 305 } 306 307 static void ath10k_wmi_tlv_event_peer_stats_info(struct ath10k *ar, 308 struct sk_buff *skb) 309 { 310 ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PEER_STATS_INFO_EVENTID\n"); 311 ath10k_wmi_tlv_op_pull_peer_stats_info(ar, skb); 312 complete(&ar->peer_stats_info_complete); 313 } 314 315 static int ath10k_wmi_tlv_event_diag_data(struct ath10k *ar, 316 struct sk_buff *skb) 317 { 318 const void **tb; 319 const struct wmi_tlv_diag_data_ev *ev; 320 const struct wmi_tlv_diag_item *item; 321 const void *data; 322 int ret, num_items, len; 323 324 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); 325 if (IS_ERR(tb)) { 326 ret = PTR_ERR(tb); 327 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); 328 return ret; 329 } 330 331 ev = tb[WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT]; 332 data = tb[WMI_TLV_TAG_ARRAY_BYTE]; 333 if (!ev || !data) { 334 kfree(tb); 335 return -EPROTO; 336 } 337 338 num_items = __le32_to_cpu(ev->num_items); 339 len = ath10k_wmi_tlv_len(data); 340 341 while (num_items--) { 342 if (len == 0) 343 break; 344 if (len < sizeof(*item)) { 345 ath10k_warn(ar, "failed to parse diag data: can't fit item header\n"); 346 break; 347 } 348 349 item = data; 350 351 if (len < sizeof(*item) + __le16_to_cpu(item->len)) { 352 ath10k_warn(ar, "failed to parse diag data: item is too long\n"); 353 break; 354 } 355 356 trace_ath10k_wmi_diag_container(ar, 357 item->type, 358 __le32_to_cpu(item->timestamp), 359 __le32_to_cpu(item->code), 360 __le16_to_cpu(item->len), 361 item->payload); 362 363 len -= sizeof(*item); 364 len -= roundup(__le16_to_cpu(item->len), 4); 365 366 data += sizeof(*item); 367 data += roundup(__le16_to_cpu(item->len), 4); 368 } 369 370 if (num_items != -1 || len != 0) 371 ath10k_warn(ar, "failed to parse diag data event: num_items %d len %d\n", 372 num_items, len); 373 374 kfree(tb); 375 return 0; 376 } 377 378 static int ath10k_wmi_tlv_event_diag(struct ath10k *ar, 379 struct sk_buff *skb) 380 { 381 const void **tb; 382 const void *data; 383 int ret, len; 384 385 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); 386 if (IS_ERR(tb)) { 387 ret = PTR_ERR(tb); 388 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); 389 return ret; 390 } 391 392 data = tb[WMI_TLV_TAG_ARRAY_BYTE]; 393 if (!data) { 394 kfree(tb); 395 return -EPROTO; 396 } 397 len = ath10k_wmi_tlv_len(data); 398 399 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv diag event len %d\n", len); 400 trace_ath10k_wmi_diag(ar, data, len); 401 402 kfree(tb); 403 return 0; 404 } 405 406 static int ath10k_wmi_tlv_event_p2p_noa(struct ath10k *ar, 407 struct sk_buff *skb) 408 { 409 const void **tb; 410 const struct wmi_tlv_p2p_noa_ev *ev; 411 const struct wmi_p2p_noa_info *noa; 412 int ret, vdev_id; 413 414 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); 415 if (IS_ERR(tb)) { 416 ret = PTR_ERR(tb); 417 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); 418 return ret; 419 } 420 421 ev = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT]; 422 noa = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_INFO]; 423 424 if (!ev || !noa) { 425 kfree(tb); 426 return -EPROTO; 427 } 428 429 vdev_id = __le32_to_cpu(ev->vdev_id); 430 431 ath10k_dbg(ar, ATH10K_DBG_WMI, 432 "wmi tlv p2p noa vdev_id %i descriptors %u\n", 433 vdev_id, noa->num_descriptors); 434 435 ath10k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa); 436 kfree(tb); 437 return 0; 438 } 439 440 static int ath10k_wmi_tlv_event_tx_pause(struct ath10k *ar, 441 struct sk_buff *skb) 442 { 443 const void **tb; 444 const struct wmi_tlv_tx_pause_ev *ev; 445 int ret, vdev_id; 446 u32 pause_id, action, vdev_map, peer_id, tid_map; 447 448 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); 449 if (IS_ERR(tb)) { 450 ret = PTR_ERR(tb); 451 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); 452 return ret; 453 } 454 455 ev = tb[WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT]; 456 if (!ev) { 457 kfree(tb); 458 return -EPROTO; 459 } 460 461 pause_id = __le32_to_cpu(ev->pause_id); 462 action = __le32_to_cpu(ev->action); 463 vdev_map = __le32_to_cpu(ev->vdev_map); 464 peer_id = __le32_to_cpu(ev->peer_id); 465 tid_map = __le32_to_cpu(ev->tid_map); 466 467 ath10k_dbg(ar, ATH10K_DBG_WMI, 468 "wmi tlv tx pause pause_id %u action %u vdev_map 0x%08x peer_id %u tid_map 0x%08x\n", 469 pause_id, action, vdev_map, peer_id, tid_map); 470 471 switch (pause_id) { 472 case WMI_TLV_TX_PAUSE_ID_MCC: 473 case WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA: 474 case WMI_TLV_TX_PAUSE_ID_P2P_GO_PS: 475 case WMI_TLV_TX_PAUSE_ID_AP_PS: 476 case WMI_TLV_TX_PAUSE_ID_IBSS_PS: 477 for (vdev_id = 0; vdev_map; vdev_id++) { 478 if (!(vdev_map & BIT(vdev_id))) 479 continue; 480 481 vdev_map &= ~BIT(vdev_id); 482 ath10k_mac_handle_tx_pause_vdev(ar, vdev_id, pause_id, 483 action); 484 } 485 break; 486 case WMI_TLV_TX_PAUSE_ID_AP_PEER_PS: 487 case WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD: 488 case WMI_TLV_TX_PAUSE_ID_STA_ADD_BA: 489 case WMI_TLV_TX_PAUSE_ID_HOST: 490 ath10k_dbg(ar, ATH10K_DBG_MAC, 491 "mac ignoring unsupported tx pause id %d\n", 492 pause_id); 493 break; 494 default: 495 ath10k_dbg(ar, ATH10K_DBG_MAC, 496 "mac ignoring unknown tx pause vdev %d\n", 497 pause_id); 498 break; 499 } 500 501 kfree(tb); 502 return 0; 503 } 504 505 static void ath10k_wmi_tlv_event_rfkill_state_change(struct ath10k *ar, 506 struct sk_buff *skb) 507 { 508 const struct wmi_tlv_rfkill_state_change_ev *ev; 509 const void **tb; 510 bool radio; 511 int ret; 512 513 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); 514 if (IS_ERR(tb)) { 515 ret = PTR_ERR(tb); 516 ath10k_warn(ar, 517 "failed to parse rfkill state change event: %d\n", 518 ret); 519 return; 520 } 521 522 ev = tb[WMI_TLV_TAG_STRUCT_RFKILL_EVENT]; 523 if (!ev) { 524 kfree(tb); 525 return; 526 } 527 528 ath10k_dbg(ar, ATH10K_DBG_MAC, 529 "wmi tlv rfkill state change gpio %d type %d radio_state %d\n", 530 __le32_to_cpu(ev->gpio_pin_num), 531 __le32_to_cpu(ev->int_type), 532 __le32_to_cpu(ev->radio_state)); 533 534 radio = (__le32_to_cpu(ev->radio_state) == WMI_TLV_RFKILL_RADIO_STATE_ON); 535 536 spin_lock_bh(&ar->data_lock); 537 538 if (!radio) 539 ar->hw_rfkill_on = true; 540 541 spin_unlock_bh(&ar->data_lock); 542 543 /* notify cfg80211 radio state change */ 544 ath10k_mac_rfkill_enable_radio(ar, radio); 545 wiphy_rfkill_set_hw_state(ar->hw->wiphy, !radio); 546 } 547 548 static int ath10k_wmi_tlv_event_temperature(struct ath10k *ar, 549 struct sk_buff *skb) 550 { 551 const struct wmi_tlv_pdev_temperature_event *ev; 552 553 ev = (struct wmi_tlv_pdev_temperature_event *)skb->data; 554 if (WARN_ON(skb->len < sizeof(*ev))) 555 return -EPROTO; 556 557 ath10k_thermal_event_temperature(ar, __le32_to_cpu(ev->temperature)); 558 return 0; 559 } 560 561 static void ath10k_wmi_event_tdls_peer(struct ath10k *ar, struct sk_buff *skb) 562 { 563 struct ieee80211_sta *station; 564 const struct wmi_tlv_tdls_peer_event *ev; 565 const void **tb; 566 struct ath10k_vif *arvif; 567 568 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); 569 if (IS_ERR(tb)) { 570 ath10k_warn(ar, "tdls peer failed to parse tlv"); 571 return; 572 } 573 ev = tb[WMI_TLV_TAG_STRUCT_TDLS_PEER_EVENT]; 574 if (!ev) { 575 kfree(tb); 576 ath10k_warn(ar, "tdls peer NULL event"); 577 return; 578 } 579 580 switch (__le32_to_cpu(ev->peer_reason)) { 581 case WMI_TDLS_TEARDOWN_REASON_TX: 582 case WMI_TDLS_TEARDOWN_REASON_RSSI: 583 case WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT: 584 rcu_read_lock(); 585 station = ieee80211_find_sta_by_ifaddr(ar->hw, 586 ev->peer_macaddr.addr, 587 NULL); 588 if (!station) { 589 ath10k_warn(ar, "did not find station from tdls peer event"); 590 goto exit; 591 } 592 593 arvif = ath10k_get_arvif(ar, __le32_to_cpu(ev->vdev_id)); 594 if (!arvif) { 595 ath10k_warn(ar, "no vif for vdev_id %d found", 596 __le32_to_cpu(ev->vdev_id)); 597 goto exit; 598 } 599 600 ieee80211_tdls_oper_request( 601 arvif->vif, station->addr, 602 NL80211_TDLS_TEARDOWN, 603 WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE, 604 GFP_ATOMIC 605 ); 606 break; 607 default: 608 kfree(tb); 609 return; 610 } 611 612 exit: 613 rcu_read_unlock(); 614 kfree(tb); 615 } 616 617 static int ath10k_wmi_tlv_event_peer_delete_resp(struct ath10k *ar, 618 struct sk_buff *skb) 619 { 620 struct wmi_peer_delete_resp_ev_arg *arg; 621 struct wmi_tlv *tlv_hdr; 622 623 tlv_hdr = (struct wmi_tlv *)skb->data; 624 arg = (struct wmi_peer_delete_resp_ev_arg *)tlv_hdr->value; 625 626 ath10k_dbg(ar, ATH10K_DBG_WMI, "vdev id %d", arg->vdev_id); 627 ath10k_dbg(ar, ATH10K_DBG_WMI, "peer mac addr %pM", &arg->peer_addr); 628 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer delete response\n"); 629 630 complete(&ar->peer_delete_done); 631 632 return 0; 633 } 634 635 /***********/ 636 /* TLV ops */ 637 /***********/ 638 639 static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb) 640 { 641 struct wmi_cmd_hdr *cmd_hdr; 642 enum wmi_tlv_event_id id; 643 bool consumed; 644 645 cmd_hdr = (struct wmi_cmd_hdr *)skb->data; 646 id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID); 647 648 if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL) 649 goto out; 650 651 trace_ath10k_wmi_event(ar, id, skb->data, skb->len); 652 653 consumed = ath10k_tm_event_wmi(ar, id, skb); 654 655 /* Ready event must be handled normally also in UTF mode so that we 656 * know the UTF firmware has booted, others we are just bypass WMI 657 * events to testmode. 658 */ 659 if (consumed && id != WMI_TLV_READY_EVENTID) { 660 ath10k_dbg(ar, ATH10K_DBG_WMI, 661 "wmi tlv testmode consumed 0x%x\n", id); 662 goto out; 663 } 664 665 switch (id) { 666 case WMI_TLV_MGMT_RX_EVENTID: 667 ath10k_wmi_event_mgmt_rx(ar, skb); 668 /* mgmt_rx() owns the skb now! */ 669 return; 670 case WMI_TLV_SCAN_EVENTID: 671 ath10k_wmi_event_scan(ar, skb); 672 break; 673 case WMI_TLV_CHAN_INFO_EVENTID: 674 ath10k_wmi_event_chan_info(ar, skb); 675 break; 676 case WMI_TLV_ECHO_EVENTID: 677 ath10k_wmi_event_echo(ar, skb); 678 break; 679 case WMI_TLV_DEBUG_MESG_EVENTID: 680 ath10k_wmi_event_debug_mesg(ar, skb); 681 break; 682 case WMI_TLV_UPDATE_STATS_EVENTID: 683 ath10k_wmi_event_update_stats(ar, skb); 684 break; 685 case WMI_TLV_PEER_STATS_INFO_EVENTID: 686 ath10k_wmi_tlv_event_peer_stats_info(ar, skb); 687 break; 688 case WMI_TLV_VDEV_START_RESP_EVENTID: 689 ath10k_wmi_event_vdev_start_resp(ar, skb); 690 break; 691 case WMI_TLV_VDEV_STOPPED_EVENTID: 692 ath10k_wmi_event_vdev_stopped(ar, skb); 693 break; 694 case WMI_TLV_VDEV_DELETE_RESP_EVENTID: 695 ath10k_wmi_tlv_event_vdev_delete_resp(ar, skb); 696 break; 697 case WMI_TLV_PEER_STA_KICKOUT_EVENTID: 698 ath10k_wmi_event_peer_sta_kickout(ar, skb); 699 break; 700 case WMI_TLV_HOST_SWBA_EVENTID: 701 ath10k_wmi_event_host_swba(ar, skb); 702 break; 703 case WMI_TLV_TBTTOFFSET_UPDATE_EVENTID: 704 ath10k_wmi_event_tbttoffset_update(ar, skb); 705 break; 706 case WMI_TLV_PHYERR_EVENTID: 707 ath10k_wmi_event_phyerr(ar, skb); 708 break; 709 case WMI_TLV_ROAM_EVENTID: 710 ath10k_wmi_event_roam(ar, skb); 711 break; 712 case WMI_TLV_PROFILE_MATCH: 713 ath10k_wmi_event_profile_match(ar, skb); 714 break; 715 case WMI_TLV_DEBUG_PRINT_EVENTID: 716 ath10k_wmi_event_debug_print(ar, skb); 717 break; 718 case WMI_TLV_PDEV_QVIT_EVENTID: 719 ath10k_wmi_event_pdev_qvit(ar, skb); 720 break; 721 case WMI_TLV_WLAN_PROFILE_DATA_EVENTID: 722 ath10k_wmi_event_wlan_profile_data(ar, skb); 723 break; 724 case WMI_TLV_RTT_MEASUREMENT_REPORT_EVENTID: 725 ath10k_wmi_event_rtt_measurement_report(ar, skb); 726 break; 727 case WMI_TLV_TSF_MEASUREMENT_REPORT_EVENTID: 728 ath10k_wmi_event_tsf_measurement_report(ar, skb); 729 break; 730 case WMI_TLV_RTT_ERROR_REPORT_EVENTID: 731 ath10k_wmi_event_rtt_error_report(ar, skb); 732 break; 733 case WMI_TLV_WOW_WAKEUP_HOST_EVENTID: 734 ath10k_wmi_event_wow_wakeup_host(ar, skb); 735 break; 736 case WMI_TLV_DCS_INTERFERENCE_EVENTID: 737 ath10k_wmi_event_dcs_interference(ar, skb); 738 break; 739 case WMI_TLV_PDEV_TPC_CONFIG_EVENTID: 740 ath10k_wmi_event_pdev_tpc_config(ar, skb); 741 break; 742 case WMI_TLV_PDEV_FTM_INTG_EVENTID: 743 ath10k_wmi_event_pdev_ftm_intg(ar, skb); 744 break; 745 case WMI_TLV_GTK_OFFLOAD_STATUS_EVENTID: 746 ath10k_wmi_event_gtk_offload_status(ar, skb); 747 break; 748 case WMI_TLV_GTK_REKEY_FAIL_EVENTID: 749 ath10k_wmi_event_gtk_rekey_fail(ar, skb); 750 break; 751 case WMI_TLV_TX_DELBA_COMPLETE_EVENTID: 752 ath10k_wmi_event_delba_complete(ar, skb); 753 break; 754 case WMI_TLV_TX_ADDBA_COMPLETE_EVENTID: 755 ath10k_wmi_event_addba_complete(ar, skb); 756 break; 757 case WMI_TLV_VDEV_INSTALL_KEY_COMPLETE_EVENTID: 758 ath10k_wmi_event_vdev_install_key_complete(ar, skb); 759 break; 760 case WMI_TLV_SERVICE_READY_EVENTID: 761 ath10k_wmi_event_service_ready(ar, skb); 762 return; 763 case WMI_TLV_READY_EVENTID: 764 ath10k_wmi_event_ready(ar, skb); 765 break; 766 case WMI_TLV_SERVICE_AVAILABLE_EVENTID: 767 ath10k_wmi_event_service_available(ar, skb); 768 break; 769 case WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID: 770 ath10k_wmi_tlv_event_bcn_tx_status(ar, skb); 771 break; 772 case WMI_TLV_DIAG_DATA_CONTAINER_EVENTID: 773 ath10k_wmi_tlv_event_diag_data(ar, skb); 774 break; 775 case WMI_TLV_DIAG_EVENTID: 776 ath10k_wmi_tlv_event_diag(ar, skb); 777 break; 778 case WMI_TLV_P2P_NOA_EVENTID: 779 ath10k_wmi_tlv_event_p2p_noa(ar, skb); 780 break; 781 case WMI_TLV_TX_PAUSE_EVENTID: 782 ath10k_wmi_tlv_event_tx_pause(ar, skb); 783 break; 784 case WMI_TLV_RFKILL_STATE_CHANGE_EVENTID: 785 ath10k_wmi_tlv_event_rfkill_state_change(ar, skb); 786 break; 787 case WMI_TLV_PDEV_TEMPERATURE_EVENTID: 788 ath10k_wmi_tlv_event_temperature(ar, skb); 789 break; 790 case WMI_TLV_TDLS_PEER_EVENTID: 791 ath10k_wmi_event_tdls_peer(ar, skb); 792 break; 793 case WMI_TLV_PEER_DELETE_RESP_EVENTID: 794 ath10k_wmi_tlv_event_peer_delete_resp(ar, skb); 795 break; 796 case WMI_TLV_MGMT_TX_COMPLETION_EVENTID: 797 ath10k_wmi_event_mgmt_tx_compl(ar, skb); 798 break; 799 case WMI_TLV_MGMT_TX_BUNDLE_COMPLETION_EVENTID: 800 ath10k_wmi_event_mgmt_tx_bundle_compl(ar, skb); 801 break; 802 default: 803 ath10k_dbg(ar, ATH10K_DBG_WMI, "Unknown eventid: %d\n", id); 804 break; 805 } 806 807 out: 808 dev_kfree_skb(skb); 809 } 810 811 static int ath10k_wmi_tlv_op_pull_scan_ev(struct ath10k *ar, 812 struct sk_buff *skb, 813 struct wmi_scan_ev_arg *arg) 814 { 815 const void **tb; 816 const struct wmi_scan_event *ev; 817 int ret; 818 819 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); 820 if (IS_ERR(tb)) { 821 ret = PTR_ERR(tb); 822 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); 823 return ret; 824 } 825 826 ev = tb[WMI_TLV_TAG_STRUCT_SCAN_EVENT]; 827 if (!ev) { 828 kfree(tb); 829 return -EPROTO; 830 } 831 832 arg->event_type = ev->event_type; 833 arg->reason = ev->reason; 834 arg->channel_freq = ev->channel_freq; 835 arg->scan_req_id = ev->scan_req_id; 836 arg->scan_id = ev->scan_id; 837 arg->vdev_id = ev->vdev_id; 838 839 kfree(tb); 840 return 0; 841 } 842 843 static int 844 ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev(struct ath10k *ar, struct sk_buff *skb, 845 struct wmi_tlv_mgmt_tx_compl_ev_arg *arg) 846 { 847 const void **tb; 848 const struct wmi_tlv_mgmt_tx_compl_ev *ev; 849 int ret; 850 851 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); 852 if (IS_ERR(tb)) { 853 ret = PTR_ERR(tb); 854 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); 855 return ret; 856 } 857 858 ev = tb[WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_EVENT]; 859 if (!ev) { 860 kfree(tb); 861 return -EPROTO; 862 } 863 864 arg->desc_id = ev->desc_id; 865 arg->status = ev->status; 866 arg->pdev_id = ev->pdev_id; 867 arg->ppdu_id = ev->ppdu_id; 868 869 if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map)) 870 arg->ack_rssi = ev->ack_rssi; 871 872 kfree(tb); 873 return 0; 874 } 875 876 struct wmi_tlv_tx_bundle_compl_parse { 877 const __le32 *num_reports; 878 const __le32 *desc_ids; 879 const __le32 *status; 880 const __le32 *ppdu_ids; 881 const __le32 *ack_rssi; 882 bool desc_ids_done; 883 bool status_done; 884 bool ppdu_ids_done; 885 bool ack_rssi_done; 886 }; 887 888 static int 889 ath10k_wmi_tlv_mgmt_tx_bundle_compl_parse(struct ath10k *ar, u16 tag, u16 len, 890 const void *ptr, void *data) 891 { 892 struct wmi_tlv_tx_bundle_compl_parse *bundle_tx_compl = data; 893 894 switch (tag) { 895 case WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_BUNDLE_EVENT: 896 bundle_tx_compl->num_reports = ptr; 897 break; 898 case WMI_TLV_TAG_ARRAY_UINT32: 899 if (!bundle_tx_compl->desc_ids_done) { 900 bundle_tx_compl->desc_ids_done = true; 901 bundle_tx_compl->desc_ids = ptr; 902 } else if (!bundle_tx_compl->status_done) { 903 bundle_tx_compl->status_done = true; 904 bundle_tx_compl->status = ptr; 905 } else if (!bundle_tx_compl->ppdu_ids_done) { 906 bundle_tx_compl->ppdu_ids_done = true; 907 bundle_tx_compl->ppdu_ids = ptr; 908 } else if (!bundle_tx_compl->ack_rssi_done) { 909 bundle_tx_compl->ack_rssi_done = true; 910 bundle_tx_compl->ack_rssi = ptr; 911 } 912 break; 913 default: 914 break; 915 } 916 return 0; 917 } 918 919 static int ath10k_wmi_tlv_op_pull_mgmt_tx_bundle_compl_ev( 920 struct ath10k *ar, struct sk_buff *skb, 921 struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg *arg) 922 { 923 struct wmi_tlv_tx_bundle_compl_parse bundle_tx_compl = { }; 924 int ret; 925 926 ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len, 927 ath10k_wmi_tlv_mgmt_tx_bundle_compl_parse, 928 &bundle_tx_compl); 929 if (ret) { 930 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); 931 return ret; 932 } 933 934 if (!bundle_tx_compl.num_reports || !bundle_tx_compl.desc_ids || 935 !bundle_tx_compl.status) 936 return -EPROTO; 937 938 arg->num_reports = *bundle_tx_compl.num_reports; 939 arg->desc_ids = bundle_tx_compl.desc_ids; 940 arg->status = bundle_tx_compl.status; 941 arg->ppdu_ids = bundle_tx_compl.ppdu_ids; 942 943 if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map)) 944 arg->ack_rssi = bundle_tx_compl.ack_rssi; 945 946 return 0; 947 } 948 949 static int ath10k_wmi_tlv_op_pull_mgmt_rx_ev(struct ath10k *ar, 950 struct sk_buff *skb, 951 struct wmi_mgmt_rx_ev_arg *arg) 952 { 953 const void **tb; 954 const struct wmi_tlv_mgmt_rx_ev *ev; 955 const u8 *frame; 956 u32 msdu_len; 957 int ret, i; 958 959 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); 960 if (IS_ERR(tb)) { 961 ret = PTR_ERR(tb); 962 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); 963 return ret; 964 } 965 966 ev = tb[WMI_TLV_TAG_STRUCT_MGMT_RX_HDR]; 967 frame = tb[WMI_TLV_TAG_ARRAY_BYTE]; 968 969 if (!ev || !frame) { 970 kfree(tb); 971 return -EPROTO; 972 } 973 974 arg->channel = ev->channel; 975 arg->buf_len = ev->buf_len; 976 arg->status = ev->status; 977 arg->snr = ev->snr; 978 arg->phy_mode = ev->phy_mode; 979 arg->rate = ev->rate; 980 981 for (i = 0; i < ARRAY_SIZE(ev->rssi); i++) 982 arg->rssi[i] = ev->rssi[i]; 983 984 msdu_len = __le32_to_cpu(arg->buf_len); 985 986 if (skb->len < (frame - skb->data) + msdu_len) { 987 kfree(tb); 988 return -EPROTO; 989 } 990 991 /* shift the sk_buff to point to `frame` */ 992 skb_trim(skb, 0); 993 skb_put(skb, frame - skb->data); 994 skb_pull(skb, frame - skb->data); 995 skb_put(skb, msdu_len); 996 997 kfree(tb); 998 return 0; 999 } 1000 1001 static int ath10k_wmi_tlv_op_pull_ch_info_ev(struct ath10k *ar, 1002 struct sk_buff *skb, 1003 struct wmi_ch_info_ev_arg *arg) 1004 { 1005 const void **tb; 1006 const struct wmi_tlv_chan_info_event *ev; 1007 int ret; 1008 1009 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); 1010 if (IS_ERR(tb)) { 1011 ret = PTR_ERR(tb); 1012 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); 1013 return ret; 1014 } 1015 1016 ev = tb[WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT]; 1017 if (!ev) { 1018 kfree(tb); 1019 return -EPROTO; 1020 } 1021 1022 arg->err_code = ev->err_code; 1023 arg->freq = ev->freq; 1024 arg->cmd_flags = ev->cmd_flags; 1025 arg->noise_floor = ev->noise_floor; 1026 arg->rx_clear_count = ev->rx_clear_count; 1027 arg->cycle_count = ev->cycle_count; 1028 if (test_bit(ATH10K_FW_FEATURE_SINGLE_CHAN_INFO_PER_CHANNEL, 1029 ar->running_fw->fw_file.fw_features)) 1030 arg->mac_clk_mhz = ev->mac_clk_mhz; 1031 1032 kfree(tb); 1033 return 0; 1034 } 1035 1036 static int 1037 ath10k_wmi_tlv_op_pull_vdev_start_ev(struct ath10k *ar, struct sk_buff *skb, 1038 struct wmi_vdev_start_ev_arg *arg) 1039 { 1040 const void **tb; 1041 const struct wmi_vdev_start_response_event *ev; 1042 int ret; 1043 1044 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); 1045 if (IS_ERR(tb)) { 1046 ret = PTR_ERR(tb); 1047 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); 1048 return ret; 1049 } 1050 1051 ev = tb[WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT]; 1052 if (!ev) { 1053 kfree(tb); 1054 return -EPROTO; 1055 } 1056 1057 skb_pull(skb, sizeof(*ev)); 1058 arg->vdev_id = ev->vdev_id; 1059 arg->req_id = ev->req_id; 1060 arg->resp_type = ev->resp_type; 1061 arg->status = ev->status; 1062 1063 kfree(tb); 1064 return 0; 1065 } 1066 1067 static int ath10k_wmi_tlv_op_pull_peer_kick_ev(struct ath10k *ar, 1068 struct sk_buff *skb, 1069 struct wmi_peer_kick_ev_arg *arg) 1070 { 1071 const void **tb; 1072 const struct wmi_peer_sta_kickout_event *ev; 1073 int ret; 1074 1075 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); 1076 if (IS_ERR(tb)) { 1077 ret = PTR_ERR(tb); 1078 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); 1079 return ret; 1080 } 1081 1082 ev = tb[WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT]; 1083 if (!ev) { 1084 kfree(tb); 1085 return -EPROTO; 1086 } 1087 1088 arg->mac_addr = ev->peer_macaddr.addr; 1089 1090 kfree(tb); 1091 return 0; 1092 } 1093 1094 struct wmi_tlv_swba_parse { 1095 const struct wmi_host_swba_event *ev; 1096 bool tim_done; 1097 bool noa_done; 1098 size_t n_tim; 1099 size_t n_noa; 1100 struct wmi_swba_ev_arg *arg; 1101 }; 1102 1103 static int ath10k_wmi_tlv_swba_tim_parse(struct ath10k *ar, u16 tag, u16 len, 1104 const void *ptr, void *data) 1105 { 1106 struct wmi_tlv_swba_parse *swba = data; 1107 struct wmi_tim_info_arg *tim_info_arg; 1108 const struct wmi_tim_info *tim_info_ev = ptr; 1109 1110 if (tag != WMI_TLV_TAG_STRUCT_TIM_INFO) 1111 return -EPROTO; 1112 1113 if (swba->n_tim >= ARRAY_SIZE(swba->arg->tim_info)) 1114 return -ENOBUFS; 1115 1116 if (__le32_to_cpu(tim_info_ev->tim_len) > 1117 sizeof(tim_info_ev->tim_bitmap)) { 1118 ath10k_warn(ar, "refusing to parse invalid swba structure\n"); 1119 return -EPROTO; 1120 } 1121 1122 tim_info_arg = &swba->arg->tim_info[swba->n_tim]; 1123 tim_info_arg->tim_len = tim_info_ev->tim_len; 1124 tim_info_arg->tim_mcast = tim_info_ev->tim_mcast; 1125 tim_info_arg->tim_bitmap = tim_info_ev->tim_bitmap; 1126 tim_info_arg->tim_changed = tim_info_ev->tim_changed; 1127 tim_info_arg->tim_num_ps_pending = tim_info_ev->tim_num_ps_pending; 1128 1129 swba->n_tim++; 1130 1131 return 0; 1132 } 1133 1134 static int ath10k_wmi_tlv_swba_noa_parse(struct ath10k *ar, u16 tag, u16 len, 1135 const void *ptr, void *data) 1136 { 1137 struct wmi_tlv_swba_parse *swba = data; 1138 1139 if (tag != WMI_TLV_TAG_STRUCT_P2P_NOA_INFO) 1140 return -EPROTO; 1141 1142 if (swba->n_noa >= ARRAY_SIZE(swba->arg->noa_info)) 1143 return -ENOBUFS; 1144 1145 swba->arg->noa_info[swba->n_noa++] = ptr; 1146 return 0; 1147 } 1148 1149 static int ath10k_wmi_tlv_swba_parse(struct ath10k *ar, u16 tag, u16 len, 1150 const void *ptr, void *data) 1151 { 1152 struct wmi_tlv_swba_parse *swba = data; 1153 int ret; 1154 1155 switch (tag) { 1156 case WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT: 1157 swba->ev = ptr; 1158 break; 1159 case WMI_TLV_TAG_ARRAY_STRUCT: 1160 if (!swba->tim_done) { 1161 swba->tim_done = true; 1162 ret = ath10k_wmi_tlv_iter(ar, ptr, len, 1163 ath10k_wmi_tlv_swba_tim_parse, 1164 swba); 1165 if (ret) 1166 return ret; 1167 } else if (!swba->noa_done) { 1168 swba->noa_done = true; 1169 ret = ath10k_wmi_tlv_iter(ar, ptr, len, 1170 ath10k_wmi_tlv_swba_noa_parse, 1171 swba); 1172 if (ret) 1173 return ret; 1174 } 1175 break; 1176 default: 1177 break; 1178 } 1179 return 0; 1180 } 1181 1182 static int ath10k_wmi_tlv_op_pull_swba_ev(struct ath10k *ar, 1183 struct sk_buff *skb, 1184 struct wmi_swba_ev_arg *arg) 1185 { 1186 struct wmi_tlv_swba_parse swba = { .arg = arg }; 1187 u32 map; 1188 size_t n_vdevs; 1189 int ret; 1190 1191 ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len, 1192 ath10k_wmi_tlv_swba_parse, &swba); 1193 if (ret) { 1194 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); 1195 return ret; 1196 } 1197 1198 if (!swba.ev) 1199 return -EPROTO; 1200 1201 arg->vdev_map = swba.ev->vdev_map; 1202 1203 for (map = __le32_to_cpu(arg->vdev_map), n_vdevs = 0; map; map >>= 1) 1204 if (map & BIT(0)) 1205 n_vdevs++; 1206 1207 if (n_vdevs != swba.n_tim || 1208 n_vdevs != swba.n_noa) 1209 return -EPROTO; 1210 1211 return 0; 1212 } 1213 1214 static int ath10k_wmi_tlv_op_pull_phyerr_ev_hdr(struct ath10k *ar, 1215 struct sk_buff *skb, 1216 struct wmi_phyerr_hdr_arg *arg) 1217 { 1218 const void **tb; 1219 const struct wmi_tlv_phyerr_ev *ev; 1220 const void *phyerrs; 1221 int ret; 1222 1223 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); 1224 if (IS_ERR(tb)) { 1225 ret = PTR_ERR(tb); 1226 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); 1227 return ret; 1228 } 1229 1230 ev = tb[WMI_TLV_TAG_STRUCT_COMB_PHYERR_RX_HDR]; 1231 phyerrs = tb[WMI_TLV_TAG_ARRAY_BYTE]; 1232 1233 if (!ev || !phyerrs) { 1234 kfree(tb); 1235 return -EPROTO; 1236 } 1237 1238 arg->num_phyerrs = __le32_to_cpu(ev->num_phyerrs); 1239 arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32); 1240 arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32); 1241 arg->buf_len = __le32_to_cpu(ev->buf_len); 1242 arg->phyerrs = phyerrs; 1243 1244 kfree(tb); 1245 return 0; 1246 } 1247 1248 #define WMI_TLV_ABI_VER_NS0 0x5F414351 1249 #define WMI_TLV_ABI_VER_NS1 0x00004C4D 1250 #define WMI_TLV_ABI_VER_NS2 0x00000000 1251 #define WMI_TLV_ABI_VER_NS3 0x00000000 1252 1253 #define WMI_TLV_ABI_VER0_MAJOR 1 1254 #define WMI_TLV_ABI_VER0_MINOR 0 1255 #define WMI_TLV_ABI_VER0 ((((WMI_TLV_ABI_VER0_MAJOR) << 24) & 0xFF000000) | \ 1256 (((WMI_TLV_ABI_VER0_MINOR) << 0) & 0x00FFFFFF)) 1257 #define WMI_TLV_ABI_VER1 53 1258 1259 static int 1260 ath10k_wmi_tlv_parse_mem_reqs(struct ath10k *ar, u16 tag, u16 len, 1261 const void *ptr, void *data) 1262 { 1263 struct wmi_svc_rdy_ev_arg *arg = data; 1264 int i; 1265 1266 if (tag != WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ) 1267 return -EPROTO; 1268 1269 for (i = 0; i < ARRAY_SIZE(arg->mem_reqs); i++) { 1270 if (!arg->mem_reqs[i]) { 1271 arg->mem_reqs[i] = ptr; 1272 return 0; 1273 } 1274 } 1275 1276 return -ENOMEM; 1277 } 1278 1279 struct wmi_tlv_svc_rdy_parse { 1280 const struct hal_reg_capabilities *reg; 1281 const struct wmi_tlv_svc_rdy_ev *ev; 1282 const __le32 *svc_bmap; 1283 const struct wlan_host_mem_req *mem_reqs; 1284 bool svc_bmap_done; 1285 bool dbs_hw_mode_done; 1286 }; 1287 1288 static int ath10k_wmi_tlv_svc_rdy_parse(struct ath10k *ar, u16 tag, u16 len, 1289 const void *ptr, void *data) 1290 { 1291 struct wmi_tlv_svc_rdy_parse *svc_rdy = data; 1292 1293 switch (tag) { 1294 case WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT: 1295 svc_rdy->ev = ptr; 1296 break; 1297 case WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES: 1298 svc_rdy->reg = ptr; 1299 break; 1300 case WMI_TLV_TAG_ARRAY_STRUCT: 1301 svc_rdy->mem_reqs = ptr; 1302 break; 1303 case WMI_TLV_TAG_ARRAY_UINT32: 1304 if (!svc_rdy->svc_bmap_done) { 1305 svc_rdy->svc_bmap_done = true; 1306 svc_rdy->svc_bmap = ptr; 1307 } else if (!svc_rdy->dbs_hw_mode_done) { 1308 svc_rdy->dbs_hw_mode_done = true; 1309 } 1310 break; 1311 default: 1312 break; 1313 } 1314 return 0; 1315 } 1316 1317 static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar, 1318 struct sk_buff *skb, 1319 struct wmi_svc_rdy_ev_arg *arg) 1320 { 1321 const struct hal_reg_capabilities *reg; 1322 const struct wmi_tlv_svc_rdy_ev *ev; 1323 const __le32 *svc_bmap; 1324 const struct wlan_host_mem_req *mem_reqs; 1325 struct wmi_tlv_svc_rdy_parse svc_rdy = { }; 1326 int ret; 1327 1328 ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len, 1329 ath10k_wmi_tlv_svc_rdy_parse, &svc_rdy); 1330 if (ret) { 1331 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); 1332 return ret; 1333 } 1334 1335 ev = svc_rdy.ev; 1336 reg = svc_rdy.reg; 1337 svc_bmap = svc_rdy.svc_bmap; 1338 mem_reqs = svc_rdy.mem_reqs; 1339 1340 if (!ev || !reg || !svc_bmap || !mem_reqs) 1341 return -EPROTO; 1342 1343 /* This is an internal ABI compatibility check for WMI TLV so check it 1344 * here instead of the generic WMI code. 1345 */ 1346 ath10k_dbg(ar, ATH10K_DBG_WMI, 1347 "wmi tlv abi 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x\n", 1348 __le32_to_cpu(ev->abi.abi_ver0), WMI_TLV_ABI_VER0, 1349 __le32_to_cpu(ev->abi.abi_ver_ns0), WMI_TLV_ABI_VER_NS0, 1350 __le32_to_cpu(ev->abi.abi_ver_ns1), WMI_TLV_ABI_VER_NS1, 1351 __le32_to_cpu(ev->abi.abi_ver_ns2), WMI_TLV_ABI_VER_NS2, 1352 __le32_to_cpu(ev->abi.abi_ver_ns3), WMI_TLV_ABI_VER_NS3); 1353 1354 if (__le32_to_cpu(ev->abi.abi_ver0) != WMI_TLV_ABI_VER0 || 1355 __le32_to_cpu(ev->abi.abi_ver_ns0) != WMI_TLV_ABI_VER_NS0 || 1356 __le32_to_cpu(ev->abi.abi_ver_ns1) != WMI_TLV_ABI_VER_NS1 || 1357 __le32_to_cpu(ev->abi.abi_ver_ns2) != WMI_TLV_ABI_VER_NS2 || 1358 __le32_to_cpu(ev->abi.abi_ver_ns3) != WMI_TLV_ABI_VER_NS3) { 1359 return -EOPNOTSUPP; 1360 } 1361 1362 arg->min_tx_power = ev->hw_min_tx_power; 1363 arg->max_tx_power = ev->hw_max_tx_power; 1364 arg->ht_cap = ev->ht_cap_info; 1365 arg->vht_cap = ev->vht_cap_info; 1366 arg->vht_supp_mcs = ev->vht_supp_mcs; 1367 arg->sw_ver0 = ev->abi.abi_ver0; 1368 arg->sw_ver1 = ev->abi.abi_ver1; 1369 arg->fw_build = ev->fw_build_vers; 1370 arg->phy_capab = ev->phy_capability; 1371 arg->num_rf_chains = ev->num_rf_chains; 1372 arg->eeprom_rd = reg->eeprom_rd; 1373 arg->low_2ghz_chan = reg->low_2ghz_chan; 1374 arg->high_2ghz_chan = reg->high_2ghz_chan; 1375 arg->low_5ghz_chan = reg->low_5ghz_chan; 1376 arg->high_5ghz_chan = reg->high_5ghz_chan; 1377 arg->num_mem_reqs = ev->num_mem_reqs; 1378 arg->service_map = svc_bmap; 1379 arg->service_map_len = ath10k_wmi_tlv_len(svc_bmap); 1380 arg->sys_cap_info = ev->sys_cap_info; 1381 1382 ret = ath10k_wmi_tlv_iter(ar, mem_reqs, ath10k_wmi_tlv_len(mem_reqs), 1383 ath10k_wmi_tlv_parse_mem_reqs, arg); 1384 if (ret) { 1385 ath10k_warn(ar, "failed to parse mem_reqs tlv: %d\n", ret); 1386 return ret; 1387 } 1388 1389 return 0; 1390 } 1391 1392 static int ath10k_wmi_tlv_op_pull_rdy_ev(struct ath10k *ar, 1393 struct sk_buff *skb, 1394 struct wmi_rdy_ev_arg *arg) 1395 { 1396 const void **tb; 1397 const struct wmi_tlv_rdy_ev *ev; 1398 int ret; 1399 1400 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); 1401 if (IS_ERR(tb)) { 1402 ret = PTR_ERR(tb); 1403 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); 1404 return ret; 1405 } 1406 1407 ev = tb[WMI_TLV_TAG_STRUCT_READY_EVENT]; 1408 if (!ev) { 1409 kfree(tb); 1410 return -EPROTO; 1411 } 1412 1413 arg->sw_version = ev->abi.abi_ver0; 1414 arg->abi_version = ev->abi.abi_ver1; 1415 arg->status = ev->status; 1416 arg->mac_addr = ev->mac_addr.addr; 1417 1418 kfree(tb); 1419 return 0; 1420 } 1421 1422 static int ath10k_wmi_tlv_svc_avail_parse(struct ath10k *ar, u16 tag, u16 len, 1423 const void *ptr, void *data) 1424 { 1425 struct wmi_svc_avail_ev_arg *arg = data; 1426 1427 switch (tag) { 1428 case WMI_TLV_TAG_STRUCT_SERVICE_AVAILABLE_EVENT: 1429 arg->service_map_ext_valid = true; 1430 arg->service_map_ext_len = *(__le32 *)ptr; 1431 arg->service_map_ext = ptr + sizeof(__le32); 1432 return 0; 1433 default: 1434 break; 1435 } 1436 1437 return 0; 1438 } 1439 1440 static int ath10k_wmi_tlv_op_pull_svc_avail(struct ath10k *ar, 1441 struct sk_buff *skb, 1442 struct wmi_svc_avail_ev_arg *arg) 1443 { 1444 int ret; 1445 1446 ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len, 1447 ath10k_wmi_tlv_svc_avail_parse, arg); 1448 1449 if (ret) { 1450 ath10k_warn(ar, "failed to parse svc_avail tlv: %d\n", ret); 1451 return ret; 1452 } 1453 1454 return 0; 1455 } 1456 1457 static void ath10k_wmi_tlv_pull_vdev_stats(const struct wmi_tlv_vdev_stats *src, 1458 struct ath10k_fw_stats_vdev *dst) 1459 { 1460 int i; 1461 1462 dst->vdev_id = __le32_to_cpu(src->vdev_id); 1463 dst->beacon_snr = __le32_to_cpu(src->beacon_snr); 1464 dst->data_snr = __le32_to_cpu(src->data_snr); 1465 dst->num_rx_frames = __le32_to_cpu(src->num_rx_frames); 1466 dst->num_rts_fail = __le32_to_cpu(src->num_rts_fail); 1467 dst->num_rts_success = __le32_to_cpu(src->num_rts_success); 1468 dst->num_rx_err = __le32_to_cpu(src->num_rx_err); 1469 dst->num_rx_discard = __le32_to_cpu(src->num_rx_discard); 1470 dst->num_tx_not_acked = __le32_to_cpu(src->num_tx_not_acked); 1471 1472 for (i = 0; i < ARRAY_SIZE(src->num_tx_frames); i++) 1473 dst->num_tx_frames[i] = 1474 __le32_to_cpu(src->num_tx_frames[i]); 1475 1476 for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_retries); i++) 1477 dst->num_tx_frames_retries[i] = 1478 __le32_to_cpu(src->num_tx_frames_retries[i]); 1479 1480 for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_failures); i++) 1481 dst->num_tx_frames_failures[i] = 1482 __le32_to_cpu(src->num_tx_frames_failures[i]); 1483 1484 for (i = 0; i < ARRAY_SIZE(src->tx_rate_history); i++) 1485 dst->tx_rate_history[i] = 1486 __le32_to_cpu(src->tx_rate_history[i]); 1487 1488 for (i = 0; i < ARRAY_SIZE(src->beacon_rssi_history); i++) 1489 dst->beacon_rssi_history[i] = 1490 __le32_to_cpu(src->beacon_rssi_history[i]); 1491 } 1492 1493 static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar, 1494 struct sk_buff *skb, 1495 struct ath10k_fw_stats *stats) 1496 { 1497 const void **tb; 1498 const struct wmi_tlv_stats_ev *ev; 1499 u32 num_peer_stats_extd; 1500 const void *data; 1501 u32 num_pdev_stats; 1502 u32 num_vdev_stats; 1503 u32 num_peer_stats; 1504 u32 num_bcnflt_stats; 1505 u32 num_chan_stats; 1506 size_t data_len; 1507 u32 stats_id; 1508 int ret; 1509 int i; 1510 1511 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); 1512 if (IS_ERR(tb)) { 1513 ret = PTR_ERR(tb); 1514 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); 1515 return ret; 1516 } 1517 1518 ev = tb[WMI_TLV_TAG_STRUCT_STATS_EVENT]; 1519 data = tb[WMI_TLV_TAG_ARRAY_BYTE]; 1520 1521 if (!ev || !data) { 1522 kfree(tb); 1523 return -EPROTO; 1524 } 1525 1526 data_len = ath10k_wmi_tlv_len(data); 1527 num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); 1528 num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats); 1529 num_peer_stats = __le32_to_cpu(ev->num_peer_stats); 1530 num_bcnflt_stats = __le32_to_cpu(ev->num_bcnflt_stats); 1531 num_chan_stats = __le32_to_cpu(ev->num_chan_stats); 1532 stats_id = __le32_to_cpu(ev->stats_id); 1533 num_peer_stats_extd = __le32_to_cpu(ev->num_peer_stats_extd); 1534 1535 ath10k_dbg(ar, ATH10K_DBG_WMI, 1536 "wmi tlv stats update pdev %i vdev %i peer %i bcnflt %i chan %i peer_extd %i\n", 1537 num_pdev_stats, num_vdev_stats, num_peer_stats, 1538 num_bcnflt_stats, num_chan_stats, num_peer_stats_extd); 1539 1540 for (i = 0; i < num_pdev_stats; i++) { 1541 const struct wmi_pdev_stats *src; 1542 struct ath10k_fw_stats_pdev *dst; 1543 1544 src = data; 1545 if (data_len < sizeof(*src)) { 1546 kfree(tb); 1547 return -EPROTO; 1548 } 1549 1550 data += sizeof(*src); 1551 data_len -= sizeof(*src); 1552 1553 dst = kzalloc_obj(*dst, GFP_ATOMIC); 1554 if (!dst) 1555 continue; 1556 1557 ath10k_wmi_pull_pdev_stats_base(&src->base, dst); 1558 ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst); 1559 ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst); 1560 list_add_tail(&dst->list, &stats->pdevs); 1561 } 1562 1563 for (i = 0; i < num_vdev_stats; i++) { 1564 const struct wmi_tlv_vdev_stats *src; 1565 struct ath10k_fw_stats_vdev *dst; 1566 1567 src = data; 1568 if (data_len < sizeof(*src)) { 1569 kfree(tb); 1570 return -EPROTO; 1571 } 1572 1573 data += sizeof(*src); 1574 data_len -= sizeof(*src); 1575 1576 dst = kzalloc_obj(*dst, GFP_ATOMIC); 1577 if (!dst) 1578 continue; 1579 1580 ath10k_wmi_tlv_pull_vdev_stats(src, dst); 1581 list_add_tail(&dst->list, &stats->vdevs); 1582 } 1583 1584 for (i = 0; i < num_peer_stats; i++) { 1585 const struct wmi_10x_peer_stats *src; 1586 struct ath10k_fw_stats_peer *dst; 1587 1588 src = data; 1589 if (data_len < sizeof(*src)) { 1590 kfree(tb); 1591 return -EPROTO; 1592 } 1593 1594 data += sizeof(*src); 1595 data_len -= sizeof(*src); 1596 1597 dst = kzalloc_obj(*dst, GFP_ATOMIC); 1598 if (!dst) 1599 continue; 1600 1601 ath10k_wmi_pull_peer_stats(&src->old, dst); 1602 dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate); 1603 1604 if (stats_id & WMI_TLV_STAT_PEER_EXTD) { 1605 const struct wmi_tlv_peer_stats_extd *extd; 1606 unsigned long rx_duration_high; 1607 1608 extd = data + sizeof(*src) * (num_peer_stats - i - 1) 1609 + sizeof(*extd) * i; 1610 1611 dst->rx_duration = __le32_to_cpu(extd->rx_duration); 1612 rx_duration_high = __le32_to_cpu 1613 (extd->rx_duration_high); 1614 1615 if (test_bit(WMI_TLV_PEER_RX_DURATION_HIGH_VALID_BIT, 1616 &rx_duration_high)) { 1617 rx_duration_high = 1618 FIELD_GET(WMI_TLV_PEER_RX_DURATION_HIGH_MASK, 1619 rx_duration_high); 1620 dst->rx_duration |= (u64)rx_duration_high << 1621 WMI_TLV_PEER_RX_DURATION_SHIFT; 1622 } 1623 } 1624 1625 list_add_tail(&dst->list, &stats->peers); 1626 } 1627 1628 kfree(tb); 1629 return 0; 1630 } 1631 1632 static int ath10k_wmi_tlv_op_pull_roam_ev(struct ath10k *ar, 1633 struct sk_buff *skb, 1634 struct wmi_roam_ev_arg *arg) 1635 { 1636 const void **tb; 1637 const struct wmi_tlv_roam_ev *ev; 1638 int ret; 1639 1640 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); 1641 if (IS_ERR(tb)) { 1642 ret = PTR_ERR(tb); 1643 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); 1644 return ret; 1645 } 1646 1647 ev = tb[WMI_TLV_TAG_STRUCT_ROAM_EVENT]; 1648 if (!ev) { 1649 kfree(tb); 1650 return -EPROTO; 1651 } 1652 1653 arg->vdev_id = ev->vdev_id; 1654 arg->reason = ev->reason; 1655 arg->rssi = ev->rssi; 1656 1657 kfree(tb); 1658 return 0; 1659 } 1660 1661 static int 1662 ath10k_wmi_tlv_op_pull_wow_ev(struct ath10k *ar, struct sk_buff *skb, 1663 struct wmi_wow_ev_arg *arg) 1664 { 1665 const void **tb; 1666 const struct wmi_tlv_wow_event_info *ev; 1667 int ret; 1668 1669 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); 1670 if (IS_ERR(tb)) { 1671 ret = PTR_ERR(tb); 1672 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); 1673 return ret; 1674 } 1675 1676 ev = tb[WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO]; 1677 if (!ev) { 1678 kfree(tb); 1679 return -EPROTO; 1680 } 1681 1682 arg->vdev_id = __le32_to_cpu(ev->vdev_id); 1683 arg->flag = __le32_to_cpu(ev->flag); 1684 arg->wake_reason = __le32_to_cpu(ev->wake_reason); 1685 arg->data_len = __le32_to_cpu(ev->data_len); 1686 1687 kfree(tb); 1688 return 0; 1689 } 1690 1691 static int ath10k_wmi_tlv_op_pull_echo_ev(struct ath10k *ar, 1692 struct sk_buff *skb, 1693 struct wmi_echo_ev_arg *arg) 1694 { 1695 const void **tb; 1696 const struct wmi_echo_event *ev; 1697 int ret; 1698 1699 tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC); 1700 if (IS_ERR(tb)) { 1701 ret = PTR_ERR(tb); 1702 ath10k_warn(ar, "failed to parse tlv: %d\n", ret); 1703 return ret; 1704 } 1705 1706 ev = tb[WMI_TLV_TAG_STRUCT_ECHO_EVENT]; 1707 if (!ev) { 1708 kfree(tb); 1709 return -EPROTO; 1710 } 1711 1712 arg->value = ev->value; 1713 1714 kfree(tb); 1715 return 0; 1716 } 1717 1718 static struct sk_buff * 1719 ath10k_wmi_tlv_op_gen_pdev_suspend(struct ath10k *ar, u32 opt) 1720 { 1721 struct wmi_tlv_pdev_suspend *cmd; 1722 struct wmi_tlv *tlv; 1723 struct sk_buff *skb; 1724 1725 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 1726 if (!skb) 1727 return ERR_PTR(-ENOMEM); 1728 1729 tlv = (void *)skb->data; 1730 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SUSPEND_CMD); 1731 tlv->len = __cpu_to_le16(sizeof(*cmd)); 1732 cmd = (void *)tlv->value; 1733 cmd->opt = __cpu_to_le32(opt); 1734 1735 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev suspend\n"); 1736 return skb; 1737 } 1738 1739 static struct sk_buff * 1740 ath10k_wmi_tlv_op_gen_pdev_resume(struct ath10k *ar) 1741 { 1742 struct wmi_tlv_resume_cmd *cmd; 1743 struct wmi_tlv *tlv; 1744 struct sk_buff *skb; 1745 1746 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 1747 if (!skb) 1748 return ERR_PTR(-ENOMEM); 1749 1750 tlv = (void *)skb->data; 1751 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_RESUME_CMD); 1752 tlv->len = __cpu_to_le16(sizeof(*cmd)); 1753 cmd = (void *)tlv->value; 1754 cmd->reserved = __cpu_to_le32(0); 1755 1756 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev resume\n"); 1757 return skb; 1758 } 1759 1760 static struct sk_buff * 1761 ath10k_wmi_tlv_op_gen_pdev_set_rd(struct ath10k *ar, 1762 u16 rd, u16 rd2g, u16 rd5g, 1763 u16 ctl2g, u16 ctl5g, 1764 enum wmi_dfs_region dfs_reg) 1765 { 1766 struct wmi_tlv_pdev_set_rd_cmd *cmd; 1767 struct wmi_tlv *tlv; 1768 struct sk_buff *skb; 1769 1770 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 1771 if (!skb) 1772 return ERR_PTR(-ENOMEM); 1773 1774 tlv = (void *)skb->data; 1775 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_REGDOMAIN_CMD); 1776 tlv->len = __cpu_to_le16(sizeof(*cmd)); 1777 cmd = (void *)tlv->value; 1778 cmd->regd = __cpu_to_le32(rd); 1779 cmd->regd_2ghz = __cpu_to_le32(rd2g); 1780 cmd->regd_5ghz = __cpu_to_le32(rd5g); 1781 cmd->conform_limit_2ghz = __cpu_to_le32(ctl2g); 1782 cmd->conform_limit_5ghz = __cpu_to_le32(ctl5g); 1783 1784 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set rd\n"); 1785 return skb; 1786 } 1787 1788 static enum wmi_txbf_conf ath10k_wmi_tlv_txbf_conf_scheme(struct ath10k *ar) 1789 { 1790 return WMI_TXBF_CONF_AFTER_ASSOC; 1791 } 1792 1793 static struct sk_buff * 1794 ath10k_wmi_tlv_op_gen_pdev_set_param(struct ath10k *ar, u32 param_id, 1795 u32 param_value) 1796 { 1797 struct wmi_tlv_pdev_set_param_cmd *cmd; 1798 struct wmi_tlv *tlv; 1799 struct sk_buff *skb; 1800 1801 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 1802 if (!skb) 1803 return ERR_PTR(-ENOMEM); 1804 1805 tlv = (void *)skb->data; 1806 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_PARAM_CMD); 1807 tlv->len = __cpu_to_le16(sizeof(*cmd)); 1808 cmd = (void *)tlv->value; 1809 cmd->param_id = __cpu_to_le32(param_id); 1810 cmd->param_value = __cpu_to_le32(param_value); 1811 1812 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set param %d value 0x%x\n", 1813 param_id, param_value); 1814 return skb; 1815 } 1816 1817 static void 1818 ath10k_wmi_tlv_put_host_mem_chunks(struct ath10k *ar, void *host_mem_chunks) 1819 { 1820 struct host_memory_chunk_tlv *chunk; 1821 struct wmi_tlv *tlv; 1822 dma_addr_t paddr; 1823 int i; 1824 __le16 tlv_len, tlv_tag; 1825 1826 tlv_tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WLAN_HOST_MEMORY_CHUNK); 1827 tlv_len = __cpu_to_le16(sizeof(*chunk)); 1828 for (i = 0; i < ar->wmi.num_mem_chunks; i++) { 1829 tlv = host_mem_chunks; 1830 tlv->tag = tlv_tag; 1831 tlv->len = tlv_len; 1832 chunk = (void *)tlv->value; 1833 1834 chunk->ptr = __cpu_to_le32(ar->wmi.mem_chunks[i].paddr); 1835 chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len); 1836 chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id); 1837 1838 if (test_bit(WMI_SERVICE_SUPPORT_EXTEND_ADDRESS, 1839 ar->wmi.svc_map)) { 1840 paddr = ar->wmi.mem_chunks[i].paddr; 1841 chunk->ptr_high = __cpu_to_le32(upper_32_bits(paddr)); 1842 } 1843 1844 ath10k_dbg(ar, ATH10K_DBG_WMI, 1845 "wmi-tlv chunk %d len %d, addr 0x%llx, id 0x%x\n", 1846 i, 1847 ar->wmi.mem_chunks[i].len, 1848 (unsigned long long)ar->wmi.mem_chunks[i].paddr, 1849 ar->wmi.mem_chunks[i].req_id); 1850 1851 host_mem_chunks += sizeof(*tlv); 1852 host_mem_chunks += sizeof(*chunk); 1853 } 1854 } 1855 1856 static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar) 1857 { 1858 struct sk_buff *skb; 1859 struct wmi_tlv *tlv; 1860 struct wmi_tlv_init_cmd *cmd; 1861 struct wmi_tlv_resource_config *cfg; 1862 void *chunks; 1863 size_t len, chunks_len; 1864 void *ptr; 1865 1866 chunks_len = ar->wmi.num_mem_chunks * 1867 (sizeof(struct host_memory_chunk_tlv) + sizeof(*tlv)); 1868 len = (sizeof(*tlv) + sizeof(*cmd)) + 1869 (sizeof(*tlv) + sizeof(*cfg)) + 1870 (sizeof(*tlv) + chunks_len); 1871 1872 skb = ath10k_wmi_alloc_skb(ar, len); 1873 if (!skb) 1874 return ERR_PTR(-ENOMEM); 1875 1876 ptr = skb->data; 1877 1878 tlv = ptr; 1879 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_INIT_CMD); 1880 tlv->len = __cpu_to_le16(sizeof(*cmd)); 1881 cmd = (void *)tlv->value; 1882 ptr += sizeof(*tlv); 1883 ptr += sizeof(*cmd); 1884 1885 tlv = ptr; 1886 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESOURCE_CONFIG); 1887 tlv->len = __cpu_to_le16(sizeof(*cfg)); 1888 cfg = (void *)tlv->value; 1889 ptr += sizeof(*tlv); 1890 ptr += sizeof(*cfg); 1891 1892 tlv = ptr; 1893 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); 1894 tlv->len = __cpu_to_le16(chunks_len); 1895 chunks = (void *)tlv->value; 1896 1897 ptr += sizeof(*tlv); 1898 ptr += chunks_len; 1899 1900 cmd->abi.abi_ver0 = __cpu_to_le32(WMI_TLV_ABI_VER0); 1901 cmd->abi.abi_ver1 = __cpu_to_le32(WMI_TLV_ABI_VER1); 1902 cmd->abi.abi_ver_ns0 = __cpu_to_le32(WMI_TLV_ABI_VER_NS0); 1903 cmd->abi.abi_ver_ns1 = __cpu_to_le32(WMI_TLV_ABI_VER_NS1); 1904 cmd->abi.abi_ver_ns2 = __cpu_to_le32(WMI_TLV_ABI_VER_NS2); 1905 cmd->abi.abi_ver_ns3 = __cpu_to_le32(WMI_TLV_ABI_VER_NS3); 1906 cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks); 1907 1908 cfg->num_vdevs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS); 1909 1910 if (ar->hw_params.num_peers) 1911 cfg->num_peers = __cpu_to_le32(ar->hw_params.num_peers); 1912 else 1913 cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS); 1914 cfg->ast_skid_limit = __cpu_to_le32(ar->hw_params.ast_skid_limit); 1915 cfg->num_wds_entries = __cpu_to_le32(ar->hw_params.num_wds_entries); 1916 1917 if (test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)) { 1918 cfg->num_offload_peers = __cpu_to_le32(TARGET_TLV_NUM_VDEVS); 1919 cfg->num_offload_reorder_bufs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS); 1920 } else { 1921 cfg->num_offload_peers = __cpu_to_le32(0); 1922 cfg->num_offload_reorder_bufs = __cpu_to_le32(0); 1923 } 1924 1925 cfg->num_peer_keys = __cpu_to_le32(2); 1926 if (ar->hw_params.num_peers) 1927 cfg->num_tids = __cpu_to_le32(ar->hw_params.num_peers * 2); 1928 else 1929 cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS); 1930 cfg->tx_chain_mask = __cpu_to_le32(0x7); 1931 cfg->rx_chain_mask = __cpu_to_le32(0x7); 1932 cfg->rx_timeout_pri[0] = __cpu_to_le32(0x64); 1933 cfg->rx_timeout_pri[1] = __cpu_to_le32(0x64); 1934 cfg->rx_timeout_pri[2] = __cpu_to_le32(0x64); 1935 cfg->rx_timeout_pri[3] = __cpu_to_le32(0x28); 1936 cfg->rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode); 1937 cfg->scan_max_pending_reqs = __cpu_to_le32(4); 1938 cfg->bmiss_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS); 1939 cfg->roam_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS); 1940 cfg->roam_offload_max_ap_profiles = __cpu_to_le32(8); 1941 cfg->num_mcast_groups = __cpu_to_le32(0); 1942 cfg->num_mcast_table_elems = __cpu_to_le32(0); 1943 cfg->mcast2ucast_mode = __cpu_to_le32(0); 1944 cfg->tx_dbg_log_size = __cpu_to_le32(0x400); 1945 cfg->dma_burst_size = __cpu_to_le32(0); 1946 cfg->mac_aggr_delim = __cpu_to_le32(0); 1947 cfg->rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(0); 1948 cfg->vow_config = __cpu_to_le32(0); 1949 cfg->gtk_offload_max_vdev = __cpu_to_le32(2); 1950 cfg->num_msdu_desc = __cpu_to_le32(ar->htt.max_num_pending_tx); 1951 cfg->max_frag_entries = __cpu_to_le32(2); 1952 cfg->num_tdls_vdevs = __cpu_to_le32(TARGET_TLV_NUM_TDLS_VDEVS); 1953 cfg->num_tdls_conn_table_entries = __cpu_to_le32(0x20); 1954 cfg->beacon_tx_offload_max_vdev = __cpu_to_le32(2); 1955 cfg->num_multicast_filter_entries = __cpu_to_le32(5); 1956 cfg->num_wow_filters = __cpu_to_le32(ar->wow.max_num_patterns); 1957 cfg->num_keep_alive_pattern = __cpu_to_le32(6); 1958 cfg->keep_alive_pattern_size = __cpu_to_le32(0); 1959 cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1); 1960 cfg->max_tdls_concurrent_buffer_sta = __cpu_to_le32(1); 1961 cfg->wmi_send_separate = __cpu_to_le32(0); 1962 cfg->num_ocb_vdevs = __cpu_to_le32(0); 1963 cfg->num_ocb_channels = __cpu_to_le32(0); 1964 cfg->num_ocb_schedules = __cpu_to_le32(0); 1965 cfg->host_capab = __cpu_to_le32(WMI_TLV_FLAG_MGMT_BUNDLE_TX_COMPL); 1966 1967 if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map)) 1968 cfg->host_capab |= __cpu_to_le32(WMI_RSRC_CFG_FLAG_TX_ACK_RSSI); 1969 1970 ath10k_wmi_tlv_put_host_mem_chunks(ar, chunks); 1971 1972 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv init\n"); 1973 return skb; 1974 } 1975 1976 static struct sk_buff * 1977 ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar, 1978 const struct wmi_start_scan_arg *arg) 1979 { 1980 struct wmi_tlv_start_scan_cmd *cmd; 1981 struct wmi_tlv *tlv; 1982 struct sk_buff *skb; 1983 size_t len, chan_len, ssid_len, bssid_len, ie_len; 1984 __le32 *chans; 1985 struct wmi_ssid *ssids; 1986 struct wmi_mac_addr *addrs; 1987 void *ptr; 1988 int i, ret; 1989 1990 ret = ath10k_wmi_start_scan_verify(arg); 1991 if (ret) 1992 return ERR_PTR(ret); 1993 1994 chan_len = arg->n_channels * sizeof(__le32); 1995 ssid_len = arg->n_ssids * sizeof(struct wmi_ssid); 1996 bssid_len = arg->n_bssids * sizeof(struct wmi_mac_addr); 1997 ie_len = roundup(arg->ie_len, 4); 1998 len = (sizeof(*tlv) + sizeof(*cmd)) + 1999 sizeof(*tlv) + chan_len + 2000 sizeof(*tlv) + ssid_len + 2001 sizeof(*tlv) + bssid_len + 2002 sizeof(*tlv) + ie_len; 2003 2004 skb = ath10k_wmi_alloc_skb(ar, len); 2005 if (!skb) 2006 return ERR_PTR(-ENOMEM); 2007 2008 ptr = (void *)skb->data; 2009 tlv = ptr; 2010 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_START_SCAN_CMD); 2011 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2012 cmd = (void *)tlv->value; 2013 2014 ath10k_wmi_put_start_scan_common(&cmd->common, arg); 2015 cmd->burst_duration_ms = __cpu_to_le32(arg->burst_duration_ms); 2016 cmd->num_channels = __cpu_to_le32(arg->n_channels); 2017 cmd->num_ssids = __cpu_to_le32(arg->n_ssids); 2018 cmd->num_bssids = __cpu_to_le32(arg->n_bssids); 2019 cmd->ie_len = __cpu_to_le32(arg->ie_len); 2020 cmd->num_probes = __cpu_to_le32(3); 2021 ether_addr_copy(cmd->mac_addr.addr, arg->mac_addr.addr); 2022 ether_addr_copy(cmd->mac_mask.addr, arg->mac_mask.addr); 2023 2024 /* FIXME: There are some scan flag inconsistencies across firmwares, 2025 * e.g. WMI-TLV inverts the logic behind the following flag. 2026 */ 2027 cmd->common.scan_ctrl_flags ^= __cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ); 2028 2029 ptr += sizeof(*tlv); 2030 ptr += sizeof(*cmd); 2031 2032 tlv = ptr; 2033 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32); 2034 tlv->len = __cpu_to_le16(chan_len); 2035 chans = (void *)tlv->value; 2036 for (i = 0; i < arg->n_channels; i++) 2037 chans[i] = __cpu_to_le32(arg->channels[i]); 2038 2039 ptr += sizeof(*tlv); 2040 ptr += chan_len; 2041 2042 tlv = ptr; 2043 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_FIXED_STRUCT); 2044 tlv->len = __cpu_to_le16(ssid_len); 2045 ssids = (void *)tlv->value; 2046 for (i = 0; i < arg->n_ssids; i++) { 2047 ssids[i].ssid_len = __cpu_to_le32(arg->ssids[i].len); 2048 memcpy(ssids[i].ssid, arg->ssids[i].ssid, arg->ssids[i].len); 2049 } 2050 2051 ptr += sizeof(*tlv); 2052 ptr += ssid_len; 2053 2054 tlv = ptr; 2055 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_FIXED_STRUCT); 2056 tlv->len = __cpu_to_le16(bssid_len); 2057 addrs = (void *)tlv->value; 2058 for (i = 0; i < arg->n_bssids; i++) 2059 ether_addr_copy(addrs[i].addr, arg->bssids[i].bssid); 2060 2061 ptr += sizeof(*tlv); 2062 ptr += bssid_len; 2063 2064 tlv = ptr; 2065 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE); 2066 tlv->len = __cpu_to_le16(ie_len); 2067 memcpy(tlv->value, arg->ie, arg->ie_len); 2068 2069 ptr += sizeof(*tlv); 2070 ptr += ie_len; 2071 2072 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv start scan\n"); 2073 return skb; 2074 } 2075 2076 static struct sk_buff * 2077 ath10k_wmi_tlv_op_gen_stop_scan(struct ath10k *ar, 2078 const struct wmi_stop_scan_arg *arg) 2079 { 2080 struct wmi_stop_scan_cmd *cmd; 2081 struct wmi_tlv *tlv; 2082 struct sk_buff *skb; 2083 u32 scan_id; 2084 u32 req_id; 2085 2086 if (arg->req_id > 0xFFF) 2087 return ERR_PTR(-EINVAL); 2088 if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF) 2089 return ERR_PTR(-EINVAL); 2090 2091 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 2092 if (!skb) 2093 return ERR_PTR(-ENOMEM); 2094 2095 scan_id = arg->u.scan_id; 2096 scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX; 2097 2098 req_id = arg->req_id; 2099 req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX; 2100 2101 tlv = (void *)skb->data; 2102 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STOP_SCAN_CMD); 2103 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2104 cmd = (void *)tlv->value; 2105 cmd->req_type = __cpu_to_le32(arg->req_type); 2106 cmd->vdev_id = __cpu_to_le32(arg->u.vdev_id); 2107 cmd->scan_id = __cpu_to_le32(scan_id); 2108 cmd->scan_req_id = __cpu_to_le32(req_id); 2109 2110 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv stop scan\n"); 2111 return skb; 2112 } 2113 2114 static int ath10k_wmi_tlv_op_get_vdev_subtype(struct ath10k *ar, 2115 enum wmi_vdev_subtype subtype) 2116 { 2117 switch (subtype) { 2118 case WMI_VDEV_SUBTYPE_NONE: 2119 return WMI_TLV_VDEV_SUBTYPE_NONE; 2120 case WMI_VDEV_SUBTYPE_P2P_DEVICE: 2121 return WMI_TLV_VDEV_SUBTYPE_P2P_DEV; 2122 case WMI_VDEV_SUBTYPE_P2P_CLIENT: 2123 return WMI_TLV_VDEV_SUBTYPE_P2P_CLI; 2124 case WMI_VDEV_SUBTYPE_P2P_GO: 2125 return WMI_TLV_VDEV_SUBTYPE_P2P_GO; 2126 case WMI_VDEV_SUBTYPE_PROXY_STA: 2127 return WMI_TLV_VDEV_SUBTYPE_PROXY_STA; 2128 case WMI_VDEV_SUBTYPE_MESH_11S: 2129 return WMI_TLV_VDEV_SUBTYPE_MESH_11S; 2130 case WMI_VDEV_SUBTYPE_MESH_NON_11S: 2131 return -EOPNOTSUPP; 2132 } 2133 return -EOPNOTSUPP; 2134 } 2135 2136 static struct sk_buff * 2137 ath10k_wmi_tlv_op_gen_vdev_create(struct ath10k *ar, 2138 u32 vdev_id, 2139 enum wmi_vdev_type vdev_type, 2140 enum wmi_vdev_subtype vdev_subtype, 2141 const u8 mac_addr[ETH_ALEN]) 2142 { 2143 struct wmi_vdev_create_cmd *cmd; 2144 struct wmi_tlv *tlv; 2145 struct sk_buff *skb; 2146 2147 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 2148 if (!skb) 2149 return ERR_PTR(-ENOMEM); 2150 2151 tlv = (void *)skb->data; 2152 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_CREATE_CMD); 2153 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2154 cmd = (void *)tlv->value; 2155 cmd->vdev_id = __cpu_to_le32(vdev_id); 2156 cmd->vdev_type = __cpu_to_le32(vdev_type); 2157 cmd->vdev_subtype = __cpu_to_le32(vdev_subtype); 2158 ether_addr_copy(cmd->vdev_macaddr.addr, mac_addr); 2159 2160 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev create\n"); 2161 return skb; 2162 } 2163 2164 static struct sk_buff * 2165 ath10k_wmi_tlv_op_gen_vdev_delete(struct ath10k *ar, u32 vdev_id) 2166 { 2167 struct wmi_vdev_delete_cmd *cmd; 2168 struct wmi_tlv *tlv; 2169 struct sk_buff *skb; 2170 2171 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 2172 if (!skb) 2173 return ERR_PTR(-ENOMEM); 2174 2175 tlv = (void *)skb->data; 2176 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_DELETE_CMD); 2177 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2178 cmd = (void *)tlv->value; 2179 cmd->vdev_id = __cpu_to_le32(vdev_id); 2180 2181 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev delete\n"); 2182 return skb; 2183 } 2184 2185 static struct sk_buff * 2186 ath10k_wmi_tlv_op_gen_vdev_start(struct ath10k *ar, 2187 const struct wmi_vdev_start_request_arg *arg, 2188 bool restart) 2189 { 2190 struct wmi_tlv_vdev_start_cmd *cmd; 2191 struct wmi_channel *ch; 2192 struct wmi_tlv *tlv; 2193 struct sk_buff *skb; 2194 size_t len; 2195 void *ptr; 2196 u32 flags = 0; 2197 2198 if (WARN_ON(arg->hidden_ssid && !arg->ssid)) 2199 return ERR_PTR(-EINVAL); 2200 if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid))) 2201 return ERR_PTR(-EINVAL); 2202 2203 len = (sizeof(*tlv) + sizeof(*cmd)) + 2204 (sizeof(*tlv) + sizeof(*ch)) + 2205 (sizeof(*tlv) + 0); 2206 skb = ath10k_wmi_alloc_skb(ar, len); 2207 if (!skb) 2208 return ERR_PTR(-ENOMEM); 2209 2210 if (arg->hidden_ssid) 2211 flags |= WMI_VDEV_START_HIDDEN_SSID; 2212 if (arg->pmf_enabled) 2213 flags |= WMI_VDEV_START_PMF_ENABLED; 2214 2215 ptr = (void *)skb->data; 2216 2217 tlv = ptr; 2218 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_START_REQUEST_CMD); 2219 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2220 cmd = (void *)tlv->value; 2221 cmd->vdev_id = __cpu_to_le32(arg->vdev_id); 2222 cmd->bcn_intval = __cpu_to_le32(arg->bcn_intval); 2223 cmd->dtim_period = __cpu_to_le32(arg->dtim_period); 2224 cmd->flags = __cpu_to_le32(flags); 2225 cmd->bcn_tx_rate = __cpu_to_le32(arg->bcn_tx_rate); 2226 cmd->bcn_tx_power = __cpu_to_le32(arg->bcn_tx_power); 2227 cmd->disable_hw_ack = __cpu_to_le32(arg->disable_hw_ack); 2228 2229 if (arg->ssid) { 2230 cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len); 2231 memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len); 2232 } 2233 2234 ptr += sizeof(*tlv); 2235 ptr += sizeof(*cmd); 2236 2237 tlv = ptr; 2238 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL); 2239 tlv->len = __cpu_to_le16(sizeof(*ch)); 2240 ch = (void *)tlv->value; 2241 ath10k_wmi_put_wmi_channel(ar, ch, &arg->channel); 2242 2243 ptr += sizeof(*tlv); 2244 ptr += sizeof(*ch); 2245 2246 tlv = ptr; 2247 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); 2248 tlv->len = 0; 2249 2250 /* Note: This is a nested TLV containing: 2251 * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv].. 2252 */ 2253 2254 ptr += sizeof(*tlv); 2255 ptr += 0; 2256 2257 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev start\n"); 2258 return skb; 2259 } 2260 2261 static struct sk_buff * 2262 ath10k_wmi_tlv_op_gen_vdev_stop(struct ath10k *ar, u32 vdev_id) 2263 { 2264 struct wmi_vdev_stop_cmd *cmd; 2265 struct wmi_tlv *tlv; 2266 struct sk_buff *skb; 2267 2268 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 2269 if (!skb) 2270 return ERR_PTR(-ENOMEM); 2271 2272 tlv = (void *)skb->data; 2273 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_STOP_CMD); 2274 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2275 cmd = (void *)tlv->value; 2276 cmd->vdev_id = __cpu_to_le32(vdev_id); 2277 2278 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev stop\n"); 2279 return skb; 2280 } 2281 2282 static struct sk_buff * 2283 ath10k_wmi_tlv_op_gen_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, 2284 const u8 *bssid) 2285 2286 { 2287 struct wmi_vdev_up_cmd *cmd; 2288 struct wmi_tlv *tlv; 2289 struct sk_buff *skb; 2290 2291 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 2292 if (!skb) 2293 return ERR_PTR(-ENOMEM); 2294 2295 tlv = (void *)skb->data; 2296 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_UP_CMD); 2297 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2298 cmd = (void *)tlv->value; 2299 cmd->vdev_id = __cpu_to_le32(vdev_id); 2300 cmd->vdev_assoc_id = __cpu_to_le32(aid); 2301 ether_addr_copy(cmd->vdev_bssid.addr, bssid); 2302 2303 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev up\n"); 2304 return skb; 2305 } 2306 2307 static struct sk_buff * 2308 ath10k_wmi_tlv_op_gen_vdev_down(struct ath10k *ar, u32 vdev_id) 2309 { 2310 struct wmi_vdev_down_cmd *cmd; 2311 struct wmi_tlv *tlv; 2312 struct sk_buff *skb; 2313 2314 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 2315 if (!skb) 2316 return ERR_PTR(-ENOMEM); 2317 2318 tlv = (void *)skb->data; 2319 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_DOWN_CMD); 2320 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2321 cmd = (void *)tlv->value; 2322 cmd->vdev_id = __cpu_to_le32(vdev_id); 2323 2324 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev down\n"); 2325 return skb; 2326 } 2327 2328 static struct sk_buff * 2329 ath10k_wmi_tlv_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id, 2330 u32 param_id, u32 param_value) 2331 { 2332 struct wmi_vdev_set_param_cmd *cmd; 2333 struct wmi_tlv *tlv; 2334 struct sk_buff *skb; 2335 2336 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 2337 if (!skb) 2338 return ERR_PTR(-ENOMEM); 2339 2340 tlv = (void *)skb->data; 2341 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_PARAM_CMD); 2342 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2343 cmd = (void *)tlv->value; 2344 cmd->vdev_id = __cpu_to_le32(vdev_id); 2345 cmd->param_id = __cpu_to_le32(param_id); 2346 cmd->param_value = __cpu_to_le32(param_value); 2347 2348 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev %d set param %d value 0x%x\n", 2349 vdev_id, param_id, param_value); 2350 return skb; 2351 } 2352 2353 static struct sk_buff * 2354 ath10k_wmi_tlv_op_gen_vdev_install_key(struct ath10k *ar, 2355 const struct wmi_vdev_install_key_arg *arg) 2356 { 2357 struct wmi_vdev_install_key_cmd *cmd; 2358 struct wmi_tlv *tlv; 2359 struct sk_buff *skb; 2360 size_t len; 2361 void *ptr; 2362 2363 if (arg->key_cipher == ar->wmi_key_cipher[WMI_CIPHER_NONE] && 2364 arg->key_data) 2365 return ERR_PTR(-EINVAL); 2366 if (arg->key_cipher != ar->wmi_key_cipher[WMI_CIPHER_NONE] && 2367 !arg->key_data) 2368 return ERR_PTR(-EINVAL); 2369 2370 len = sizeof(*tlv) + sizeof(*cmd) + 2371 sizeof(*tlv) + roundup(arg->key_len, sizeof(__le32)); 2372 skb = ath10k_wmi_alloc_skb(ar, len); 2373 if (!skb) 2374 return ERR_PTR(-ENOMEM); 2375 2376 ptr = (void *)skb->data; 2377 tlv = ptr; 2378 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_INSTALL_KEY_CMD); 2379 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2380 cmd = (void *)tlv->value; 2381 cmd->vdev_id = __cpu_to_le32(arg->vdev_id); 2382 cmd->key_idx = __cpu_to_le32(arg->key_idx); 2383 cmd->key_flags = __cpu_to_le32(arg->key_flags); 2384 cmd->key_cipher = __cpu_to_le32(arg->key_cipher); 2385 cmd->key_len = __cpu_to_le32(arg->key_len); 2386 cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len); 2387 cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len); 2388 2389 if (arg->macaddr) 2390 ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr); 2391 2392 ptr += sizeof(*tlv); 2393 ptr += sizeof(*cmd); 2394 2395 tlv = ptr; 2396 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE); 2397 tlv->len = __cpu_to_le16(roundup(arg->key_len, sizeof(__le32))); 2398 if (arg->key_data) 2399 memcpy(tlv->value, arg->key_data, arg->key_len); 2400 2401 ptr += sizeof(*tlv); 2402 ptr += roundup(arg->key_len, sizeof(__le32)); 2403 2404 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev install key\n"); 2405 return skb; 2406 } 2407 2408 static void *ath10k_wmi_tlv_put_uapsd_ac(struct ath10k *ar, void *ptr, 2409 const struct wmi_sta_uapsd_auto_trig_arg *arg) 2410 { 2411 struct wmi_sta_uapsd_auto_trig_param *ac; 2412 struct wmi_tlv *tlv; 2413 2414 tlv = ptr; 2415 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_PARAM); 2416 tlv->len = __cpu_to_le16(sizeof(*ac)); 2417 ac = (void *)tlv->value; 2418 2419 ac->wmm_ac = __cpu_to_le32(arg->wmm_ac); 2420 ac->user_priority = __cpu_to_le32(arg->user_priority); 2421 ac->service_interval = __cpu_to_le32(arg->service_interval); 2422 ac->suspend_interval = __cpu_to_le32(arg->suspend_interval); 2423 ac->delay_interval = __cpu_to_le32(arg->delay_interval); 2424 2425 ath10k_dbg(ar, ATH10K_DBG_WMI, 2426 "wmi tlv vdev sta uapsd auto trigger ac %d prio %d svc int %d susp int %d delay int %d\n", 2427 ac->wmm_ac, ac->user_priority, ac->service_interval, 2428 ac->suspend_interval, ac->delay_interval); 2429 2430 return ptr + sizeof(*tlv) + sizeof(*ac); 2431 } 2432 2433 static struct sk_buff * 2434 ath10k_wmi_tlv_op_gen_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id, 2435 const u8 peer_addr[ETH_ALEN], 2436 const struct wmi_sta_uapsd_auto_trig_arg *args, 2437 u32 num_ac) 2438 { 2439 struct wmi_sta_uapsd_auto_trig_cmd_fixed_param *cmd; 2440 struct wmi_sta_uapsd_auto_trig_param *ac; 2441 struct wmi_tlv *tlv; 2442 struct sk_buff *skb; 2443 size_t len; 2444 size_t ac_tlv_len; 2445 void *ptr; 2446 int i; 2447 2448 ac_tlv_len = num_ac * (sizeof(*tlv) + sizeof(*ac)); 2449 len = sizeof(*tlv) + sizeof(*cmd) + 2450 sizeof(*tlv) + ac_tlv_len; 2451 skb = ath10k_wmi_alloc_skb(ar, len); 2452 if (!skb) 2453 return ERR_PTR(-ENOMEM); 2454 2455 ptr = (void *)skb->data; 2456 tlv = ptr; 2457 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_CMD); 2458 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2459 cmd = (void *)tlv->value; 2460 cmd->vdev_id = __cpu_to_le32(vdev_id); 2461 cmd->num_ac = __cpu_to_le32(num_ac); 2462 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); 2463 2464 ptr += sizeof(*tlv); 2465 ptr += sizeof(*cmd); 2466 2467 tlv = ptr; 2468 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); 2469 tlv->len = __cpu_to_le16(ac_tlv_len); 2470 ac = (void *)tlv->value; 2471 2472 ptr += sizeof(*tlv); 2473 for (i = 0; i < num_ac; i++) 2474 ptr = ath10k_wmi_tlv_put_uapsd_ac(ar, ptr, &args[i]); 2475 2476 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev sta uapsd auto trigger\n"); 2477 return skb; 2478 } 2479 2480 static void *ath10k_wmi_tlv_put_wmm(void *ptr, 2481 const struct wmi_wmm_params_arg *arg) 2482 { 2483 struct wmi_wmm_params *wmm; 2484 struct wmi_tlv *tlv; 2485 2486 tlv = ptr; 2487 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WMM_PARAMS); 2488 tlv->len = __cpu_to_le16(sizeof(*wmm)); 2489 wmm = (void *)tlv->value; 2490 ath10k_wmi_set_wmm_param(wmm, arg); 2491 2492 return ptr + sizeof(*tlv) + sizeof(*wmm); 2493 } 2494 2495 static struct sk_buff * 2496 ath10k_wmi_tlv_op_gen_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id, 2497 const struct wmi_wmm_params_all_arg *arg) 2498 { 2499 struct wmi_tlv_vdev_set_wmm_cmd *cmd; 2500 struct wmi_tlv *tlv; 2501 struct sk_buff *skb; 2502 size_t len; 2503 void *ptr; 2504 2505 len = sizeof(*tlv) + sizeof(*cmd); 2506 skb = ath10k_wmi_alloc_skb(ar, len); 2507 if (!skb) 2508 return ERR_PTR(-ENOMEM); 2509 2510 ptr = (void *)skb->data; 2511 tlv = ptr; 2512 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_WMM_PARAMS_CMD); 2513 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2514 cmd = (void *)tlv->value; 2515 cmd->vdev_id = __cpu_to_le32(vdev_id); 2516 2517 ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[0].params, &arg->ac_be); 2518 ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[1].params, &arg->ac_bk); 2519 ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[2].params, &arg->ac_vi); 2520 ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[3].params, &arg->ac_vo); 2521 2522 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev wmm conf\n"); 2523 return skb; 2524 } 2525 2526 static struct sk_buff * 2527 ath10k_wmi_tlv_op_gen_sta_keepalive(struct ath10k *ar, 2528 const struct wmi_sta_keepalive_arg *arg) 2529 { 2530 struct wmi_tlv_sta_keepalive_cmd *cmd; 2531 struct wmi_sta_keepalive_arp_resp *arp; 2532 struct sk_buff *skb; 2533 struct wmi_tlv *tlv; 2534 void *ptr; 2535 size_t len; 2536 2537 len = sizeof(*tlv) + sizeof(*cmd) + 2538 sizeof(*tlv) + sizeof(*arp); 2539 skb = ath10k_wmi_alloc_skb(ar, len); 2540 if (!skb) 2541 return ERR_PTR(-ENOMEM); 2542 2543 ptr = (void *)skb->data; 2544 tlv = ptr; 2545 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALIVE_CMD); 2546 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2547 cmd = (void *)tlv->value; 2548 cmd->vdev_id = __cpu_to_le32(arg->vdev_id); 2549 cmd->enabled = __cpu_to_le32(arg->enabled); 2550 cmd->method = __cpu_to_le32(arg->method); 2551 cmd->interval = __cpu_to_le32(arg->interval); 2552 2553 ptr += sizeof(*tlv); 2554 ptr += sizeof(*cmd); 2555 2556 tlv = ptr; 2557 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALVE_ARP_RESPONSE); 2558 tlv->len = __cpu_to_le16(sizeof(*arp)); 2559 arp = (void *)tlv->value; 2560 2561 arp->src_ip4_addr = arg->src_ip4_addr; 2562 arp->dest_ip4_addr = arg->dest_ip4_addr; 2563 ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr); 2564 2565 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv sta keepalive vdev %d enabled %d method %d interval %d\n", 2566 arg->vdev_id, arg->enabled, arg->method, arg->interval); 2567 return skb; 2568 } 2569 2570 static struct sk_buff * 2571 ath10k_wmi_tlv_op_gen_peer_create(struct ath10k *ar, u32 vdev_id, 2572 const u8 peer_addr[ETH_ALEN], 2573 enum wmi_peer_type peer_type) 2574 { 2575 struct wmi_tlv_peer_create_cmd *cmd; 2576 struct wmi_tlv *tlv; 2577 struct sk_buff *skb; 2578 2579 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 2580 if (!skb) 2581 return ERR_PTR(-ENOMEM); 2582 2583 tlv = (void *)skb->data; 2584 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_CREATE_CMD); 2585 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2586 cmd = (void *)tlv->value; 2587 cmd->vdev_id = __cpu_to_le32(vdev_id); 2588 cmd->peer_type = __cpu_to_le32(peer_type); 2589 ether_addr_copy(cmd->peer_addr.addr, peer_addr); 2590 2591 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer create\n"); 2592 return skb; 2593 } 2594 2595 static struct sk_buff * 2596 ath10k_wmi_tlv_op_gen_peer_delete(struct ath10k *ar, u32 vdev_id, 2597 const u8 peer_addr[ETH_ALEN]) 2598 { 2599 struct wmi_peer_delete_cmd *cmd; 2600 struct wmi_tlv *tlv; 2601 struct sk_buff *skb; 2602 2603 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 2604 if (!skb) 2605 return ERR_PTR(-ENOMEM); 2606 2607 tlv = (void *)skb->data; 2608 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_DELETE_CMD); 2609 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2610 cmd = (void *)tlv->value; 2611 cmd->vdev_id = __cpu_to_le32(vdev_id); 2612 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); 2613 2614 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer delete\n"); 2615 return skb; 2616 } 2617 2618 static struct sk_buff * 2619 ath10k_wmi_tlv_op_gen_peer_flush(struct ath10k *ar, u32 vdev_id, 2620 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap) 2621 { 2622 struct wmi_peer_flush_tids_cmd *cmd; 2623 struct wmi_tlv *tlv; 2624 struct sk_buff *skb; 2625 2626 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 2627 if (!skb) 2628 return ERR_PTR(-ENOMEM); 2629 2630 tlv = (void *)skb->data; 2631 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_FLUSH_TIDS_CMD); 2632 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2633 cmd = (void *)tlv->value; 2634 cmd->vdev_id = __cpu_to_le32(vdev_id); 2635 cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap); 2636 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); 2637 2638 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer flush\n"); 2639 return skb; 2640 } 2641 2642 static struct sk_buff * 2643 ath10k_wmi_tlv_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id, 2644 const u8 *peer_addr, 2645 enum wmi_peer_param param_id, 2646 u32 param_value) 2647 { 2648 struct wmi_peer_set_param_cmd *cmd; 2649 struct wmi_tlv *tlv; 2650 struct sk_buff *skb; 2651 2652 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 2653 if (!skb) 2654 return ERR_PTR(-ENOMEM); 2655 2656 tlv = (void *)skb->data; 2657 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_SET_PARAM_CMD); 2658 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2659 cmd = (void *)tlv->value; 2660 cmd->vdev_id = __cpu_to_le32(vdev_id); 2661 cmd->param_id = __cpu_to_le32(param_id); 2662 cmd->param_value = __cpu_to_le32(param_value); 2663 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); 2664 2665 ath10k_dbg(ar, ATH10K_DBG_WMI, 2666 "wmi tlv vdev %d peer %pM set param %d value 0x%x\n", 2667 vdev_id, peer_addr, param_id, param_value); 2668 return skb; 2669 } 2670 2671 static struct sk_buff * 2672 ath10k_wmi_tlv_op_gen_peer_assoc(struct ath10k *ar, 2673 const struct wmi_peer_assoc_complete_arg *arg) 2674 { 2675 struct wmi_tlv_peer_assoc_cmd *cmd; 2676 struct wmi_vht_rate_set *vht_rate; 2677 struct wmi_tlv *tlv; 2678 struct sk_buff *skb; 2679 size_t len, legacy_rate_len, ht_rate_len; 2680 void *ptr; 2681 2682 if (arg->peer_mpdu_density > 16) 2683 return ERR_PTR(-EINVAL); 2684 if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES) 2685 return ERR_PTR(-EINVAL); 2686 if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES) 2687 return ERR_PTR(-EINVAL); 2688 2689 legacy_rate_len = roundup(arg->peer_legacy_rates.num_rates, 2690 sizeof(__le32)); 2691 ht_rate_len = roundup(arg->peer_ht_rates.num_rates, sizeof(__le32)); 2692 len = (sizeof(*tlv) + sizeof(*cmd)) + 2693 (sizeof(*tlv) + legacy_rate_len) + 2694 (sizeof(*tlv) + ht_rate_len) + 2695 (sizeof(*tlv) + sizeof(*vht_rate)); 2696 skb = ath10k_wmi_alloc_skb(ar, len); 2697 if (!skb) 2698 return ERR_PTR(-ENOMEM); 2699 2700 ptr = (void *)skb->data; 2701 tlv = ptr; 2702 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_ASSOC_COMPLETE_CMD); 2703 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2704 cmd = (void *)tlv->value; 2705 2706 cmd->vdev_id = __cpu_to_le32(arg->vdev_id); 2707 cmd->new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1); 2708 cmd->assoc_id = __cpu_to_le32(arg->peer_aid); 2709 cmd->flags = __cpu_to_le32(arg->peer_flags); 2710 cmd->caps = __cpu_to_le32(arg->peer_caps); 2711 cmd->listen_intval = __cpu_to_le32(arg->peer_listen_intval); 2712 cmd->ht_caps = __cpu_to_le32(arg->peer_ht_caps); 2713 cmd->max_mpdu = __cpu_to_le32(arg->peer_max_mpdu); 2714 cmd->mpdu_density = __cpu_to_le32(arg->peer_mpdu_density); 2715 cmd->rate_caps = __cpu_to_le32(arg->peer_rate_caps); 2716 cmd->nss = __cpu_to_le32(arg->peer_num_spatial_streams); 2717 cmd->vht_caps = __cpu_to_le32(arg->peer_vht_caps); 2718 cmd->phy_mode = __cpu_to_le32(arg->peer_phymode); 2719 cmd->num_legacy_rates = __cpu_to_le32(arg->peer_legacy_rates.num_rates); 2720 cmd->num_ht_rates = __cpu_to_le32(arg->peer_ht_rates.num_rates); 2721 ether_addr_copy(cmd->mac_addr.addr, arg->addr); 2722 2723 ptr += sizeof(*tlv); 2724 ptr += sizeof(*cmd); 2725 2726 tlv = ptr; 2727 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE); 2728 tlv->len = __cpu_to_le16(legacy_rate_len); 2729 memcpy(tlv->value, arg->peer_legacy_rates.rates, 2730 arg->peer_legacy_rates.num_rates); 2731 2732 ptr += sizeof(*tlv); 2733 ptr += legacy_rate_len; 2734 2735 tlv = ptr; 2736 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE); 2737 tlv->len = __cpu_to_le16(ht_rate_len); 2738 memcpy(tlv->value, arg->peer_ht_rates.rates, 2739 arg->peer_ht_rates.num_rates); 2740 2741 ptr += sizeof(*tlv); 2742 ptr += ht_rate_len; 2743 2744 tlv = ptr; 2745 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VHT_RATE_SET); 2746 tlv->len = __cpu_to_le16(sizeof(*vht_rate)); 2747 vht_rate = (void *)tlv->value; 2748 2749 vht_rate->rx_max_rate = __cpu_to_le32(arg->peer_vht_rates.rx_max_rate); 2750 vht_rate->rx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.rx_mcs_set); 2751 vht_rate->tx_max_rate = __cpu_to_le32(arg->peer_vht_rates.tx_max_rate); 2752 vht_rate->tx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set); 2753 2754 ptr += sizeof(*tlv); 2755 ptr += sizeof(*vht_rate); 2756 2757 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer assoc\n"); 2758 return skb; 2759 } 2760 2761 static struct sk_buff * 2762 ath10k_wmi_tlv_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id, 2763 enum wmi_sta_ps_mode psmode) 2764 { 2765 struct wmi_sta_powersave_mode_cmd *cmd; 2766 struct wmi_tlv *tlv; 2767 struct sk_buff *skb; 2768 2769 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 2770 if (!skb) 2771 return ERR_PTR(-ENOMEM); 2772 2773 tlv = (void *)skb->data; 2774 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_POWERSAVE_MODE_CMD); 2775 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2776 cmd = (void *)tlv->value; 2777 cmd->vdev_id = __cpu_to_le32(vdev_id); 2778 cmd->sta_ps_mode = __cpu_to_le32(psmode); 2779 2780 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set psmode\n"); 2781 return skb; 2782 } 2783 2784 static struct sk_buff * 2785 ath10k_wmi_tlv_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id, 2786 enum wmi_sta_powersave_param param_id, 2787 u32 param_value) 2788 { 2789 struct wmi_sta_powersave_param_cmd *cmd; 2790 struct wmi_tlv *tlv; 2791 struct sk_buff *skb; 2792 2793 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 2794 if (!skb) 2795 return ERR_PTR(-ENOMEM); 2796 2797 tlv = (void *)skb->data; 2798 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_POWERSAVE_PARAM_CMD); 2799 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2800 cmd = (void *)tlv->value; 2801 cmd->vdev_id = __cpu_to_le32(vdev_id); 2802 cmd->param_id = __cpu_to_le32(param_id); 2803 cmd->param_value = __cpu_to_le32(param_value); 2804 2805 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set sta ps\n"); 2806 return skb; 2807 } 2808 2809 static struct sk_buff * 2810 ath10k_wmi_tlv_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac, 2811 enum wmi_ap_ps_peer_param param_id, u32 value) 2812 { 2813 struct wmi_ap_ps_peer_cmd *cmd; 2814 struct wmi_tlv *tlv; 2815 struct sk_buff *skb; 2816 2817 if (!mac) 2818 return ERR_PTR(-EINVAL); 2819 2820 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 2821 if (!skb) 2822 return ERR_PTR(-ENOMEM); 2823 2824 tlv = (void *)skb->data; 2825 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_AP_PS_PEER_CMD); 2826 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2827 cmd = (void *)tlv->value; 2828 cmd->vdev_id = __cpu_to_le32(vdev_id); 2829 cmd->param_id = __cpu_to_le32(param_id); 2830 cmd->param_value = __cpu_to_le32(value); 2831 ether_addr_copy(cmd->peer_macaddr.addr, mac); 2832 2833 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv ap ps param\n"); 2834 return skb; 2835 } 2836 2837 static struct sk_buff * 2838 ath10k_wmi_tlv_op_gen_scan_chan_list(struct ath10k *ar, 2839 const struct wmi_scan_chan_list_arg *arg) 2840 { 2841 struct wmi_tlv_scan_chan_list_cmd *cmd; 2842 struct wmi_channel *ci; 2843 struct wmi_channel_arg *ch; 2844 struct wmi_tlv *tlv; 2845 struct sk_buff *skb; 2846 size_t chans_len, len; 2847 int i; 2848 void *ptr, *chans; 2849 2850 chans_len = arg->n_channels * (sizeof(*tlv) + sizeof(*ci)); 2851 len = (sizeof(*tlv) + sizeof(*cmd)) + 2852 (sizeof(*tlv) + chans_len); 2853 2854 skb = ath10k_wmi_alloc_skb(ar, len); 2855 if (!skb) 2856 return ERR_PTR(-ENOMEM); 2857 2858 ptr = (void *)skb->data; 2859 tlv = ptr; 2860 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_SCAN_CHAN_LIST_CMD); 2861 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2862 cmd = (void *)tlv->value; 2863 cmd->num_scan_chans = __cpu_to_le32(arg->n_channels); 2864 2865 ptr += sizeof(*tlv); 2866 ptr += sizeof(*cmd); 2867 2868 tlv = ptr; 2869 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); 2870 tlv->len = __cpu_to_le16(chans_len); 2871 chans = (void *)tlv->value; 2872 2873 for (i = 0; i < arg->n_channels; i++) { 2874 ch = &arg->channels[i]; 2875 2876 tlv = chans; 2877 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL); 2878 tlv->len = __cpu_to_le16(sizeof(*ci)); 2879 ci = (void *)tlv->value; 2880 2881 ath10k_wmi_put_wmi_channel(ar, ci, ch); 2882 2883 chans += sizeof(*tlv); 2884 chans += sizeof(*ci); 2885 } 2886 2887 ptr += sizeof(*tlv); 2888 ptr += chans_len; 2889 2890 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv scan chan list\n"); 2891 return skb; 2892 } 2893 2894 static struct sk_buff * 2895 ath10k_wmi_tlv_op_gen_scan_prob_req_oui(struct ath10k *ar, u32 prob_req_oui) 2896 { 2897 struct wmi_scan_prob_req_oui_cmd *cmd; 2898 struct wmi_tlv *tlv; 2899 struct sk_buff *skb; 2900 2901 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 2902 if (!skb) 2903 return ERR_PTR(-ENOMEM); 2904 2905 tlv = (void *)skb->data; 2906 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_SCAN_PROB_REQ_OUI_CMD); 2907 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2908 cmd = (void *)tlv->value; 2909 cmd->prob_req_oui = __cpu_to_le32(prob_req_oui); 2910 2911 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv scan prob req oui\n"); 2912 return skb; 2913 } 2914 2915 static struct sk_buff * 2916 ath10k_wmi_tlv_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id, 2917 const void *bcn, size_t bcn_len, 2918 u32 bcn_paddr, bool dtim_zero, 2919 bool deliver_cab) 2920 2921 { 2922 struct wmi_bcn_tx_ref_cmd *cmd; 2923 struct wmi_tlv *tlv; 2924 struct sk_buff *skb; 2925 struct ieee80211_hdr *hdr; 2926 u16 fc; 2927 2928 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 2929 if (!skb) 2930 return ERR_PTR(-ENOMEM); 2931 2932 hdr = (struct ieee80211_hdr *)bcn; 2933 fc = le16_to_cpu(hdr->frame_control); 2934 2935 tlv = (void *)skb->data; 2936 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_SEND_FROM_HOST_CMD); 2937 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2938 cmd = (void *)tlv->value; 2939 cmd->vdev_id = __cpu_to_le32(vdev_id); 2940 cmd->data_len = __cpu_to_le32(bcn_len); 2941 cmd->data_ptr = __cpu_to_le32(bcn_paddr); 2942 cmd->msdu_id = 0; 2943 cmd->frame_control = __cpu_to_le32(fc); 2944 cmd->flags = 0; 2945 2946 if (dtim_zero) 2947 cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO); 2948 2949 if (deliver_cab) 2950 cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB); 2951 2952 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv beacon dma\n"); 2953 return skb; 2954 } 2955 2956 static struct sk_buff * 2957 ath10k_wmi_tlv_op_gen_pdev_set_wmm(struct ath10k *ar, 2958 const struct wmi_wmm_params_all_arg *arg) 2959 { 2960 struct wmi_tlv_pdev_set_wmm_cmd *cmd; 2961 struct wmi_wmm_params *wmm; 2962 struct wmi_tlv *tlv; 2963 struct sk_buff *skb; 2964 size_t len; 2965 void *ptr; 2966 2967 len = (sizeof(*tlv) + sizeof(*cmd)) + 2968 (4 * (sizeof(*tlv) + sizeof(*wmm))); 2969 skb = ath10k_wmi_alloc_skb(ar, len); 2970 if (!skb) 2971 return ERR_PTR(-ENOMEM); 2972 2973 ptr = (void *)skb->data; 2974 2975 tlv = ptr; 2976 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_WMM_PARAMS_CMD); 2977 tlv->len = __cpu_to_le16(sizeof(*cmd)); 2978 cmd = (void *)tlv->value; 2979 2980 /* nothing to set here */ 2981 2982 ptr += sizeof(*tlv); 2983 ptr += sizeof(*cmd); 2984 2985 ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_be); 2986 ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_bk); 2987 ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vi); 2988 ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vo); 2989 2990 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set wmm\n"); 2991 return skb; 2992 } 2993 2994 static struct sk_buff * 2995 ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask) 2996 { 2997 struct wmi_request_stats_cmd *cmd; 2998 struct wmi_tlv *tlv; 2999 struct sk_buff *skb; 3000 3001 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 3002 if (!skb) 3003 return ERR_PTR(-ENOMEM); 3004 3005 tlv = (void *)skb->data; 3006 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_REQUEST_STATS_CMD); 3007 tlv->len = __cpu_to_le16(sizeof(*cmd)); 3008 cmd = (void *)tlv->value; 3009 cmd->stats_id = __cpu_to_le32(stats_mask); 3010 3011 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv request stats\n"); 3012 return skb; 3013 } 3014 3015 static struct sk_buff * 3016 ath10k_wmi_tlv_op_gen_request_peer_stats_info(struct ath10k *ar, 3017 u32 vdev_id, 3018 enum wmi_peer_stats_info_request_type type, 3019 u8 *addr, 3020 u32 reset) 3021 { 3022 struct wmi_tlv_request_peer_stats_info *cmd; 3023 struct wmi_tlv *tlv; 3024 struct sk_buff *skb; 3025 3026 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 3027 if (!skb) 3028 return ERR_PTR(-ENOMEM); 3029 3030 tlv = (void *)skb->data; 3031 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_REQUEST_PEER_STATS_INFO_CMD); 3032 tlv->len = __cpu_to_le16(sizeof(*cmd)); 3033 cmd = (void *)tlv->value; 3034 cmd->vdev_id = __cpu_to_le32(vdev_id); 3035 cmd->request_type = __cpu_to_le32(type); 3036 3037 if (type == WMI_REQUEST_ONE_PEER_STATS_INFO) 3038 ether_addr_copy(cmd->peer_macaddr.addr, addr); 3039 3040 cmd->reset_after_request = __cpu_to_le32(reset); 3041 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv request peer stats info\n"); 3042 return skb; 3043 } 3044 3045 static int 3046 ath10k_wmi_tlv_op_cleanup_mgmt_tx_send(struct ath10k *ar, 3047 struct sk_buff *msdu) 3048 { 3049 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu); 3050 struct ath10k_mgmt_tx_pkt_addr *pkt_addr; 3051 struct ath10k_wmi *wmi = &ar->wmi; 3052 3053 spin_lock_bh(&ar->data_lock); 3054 pkt_addr = idr_remove(&wmi->mgmt_pending_tx, cb->msdu_id); 3055 spin_unlock_bh(&ar->data_lock); 3056 3057 kfree(pkt_addr); 3058 3059 return 0; 3060 } 3061 3062 static int 3063 ath10k_wmi_mgmt_tx_alloc_msdu_id(struct ath10k *ar, struct sk_buff *skb, 3064 dma_addr_t paddr) 3065 { 3066 struct ath10k_wmi *wmi = &ar->wmi; 3067 struct ath10k_mgmt_tx_pkt_addr *pkt_addr; 3068 int ret; 3069 3070 pkt_addr = kmalloc_obj(*pkt_addr, GFP_ATOMIC); 3071 if (!pkt_addr) 3072 return -ENOMEM; 3073 3074 pkt_addr->vaddr = skb; 3075 pkt_addr->paddr = paddr; 3076 3077 spin_lock_bh(&ar->data_lock); 3078 ret = idr_alloc(&wmi->mgmt_pending_tx, pkt_addr, 0, 3079 wmi->mgmt_max_num_pending_tx, GFP_ATOMIC); 3080 spin_unlock_bh(&ar->data_lock); 3081 3082 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx alloc msdu_id ret %d\n", ret); 3083 return ret; 3084 } 3085 3086 static struct sk_buff * 3087 ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu, 3088 dma_addr_t paddr) 3089 { 3090 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu); 3091 struct wmi_tlv_mgmt_tx_cmd *cmd; 3092 struct ieee80211_hdr *hdr; 3093 struct ath10k_vif *arvif; 3094 u32 buf_len = msdu->len; 3095 struct wmi_tlv *tlv; 3096 struct sk_buff *skb; 3097 int len, desc_id; 3098 u32 vdev_id; 3099 void *ptr; 3100 3101 if (!cb->vif) 3102 return ERR_PTR(-EINVAL); 3103 3104 hdr = (struct ieee80211_hdr *)msdu->data; 3105 arvif = (void *)cb->vif->drv_priv; 3106 vdev_id = arvif->vdev_id; 3107 3108 if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control) && 3109 (!(ieee80211_is_nullfunc(hdr->frame_control) || 3110 ieee80211_is_qos_nullfunc(hdr->frame_control))))) 3111 return ERR_PTR(-EINVAL); 3112 3113 len = sizeof(*cmd) + 2 * sizeof(*tlv); 3114 3115 if ((ieee80211_is_action(hdr->frame_control) || 3116 ieee80211_is_deauth(hdr->frame_control) || 3117 ieee80211_is_disassoc(hdr->frame_control)) && 3118 ieee80211_has_protected(hdr->frame_control)) { 3119 skb_put(msdu, IEEE80211_CCMP_MIC_LEN); 3120 buf_len += IEEE80211_CCMP_MIC_LEN; 3121 } 3122 3123 buf_len = min_t(u32, buf_len, WMI_TLV_MGMT_TX_FRAME_MAX_LEN); 3124 buf_len = round_up(buf_len, 4); 3125 3126 len += buf_len; 3127 len = round_up(len, 4); 3128 skb = ath10k_wmi_alloc_skb(ar, len); 3129 if (!skb) 3130 return ERR_PTR(-ENOMEM); 3131 3132 desc_id = ath10k_wmi_mgmt_tx_alloc_msdu_id(ar, msdu, paddr); 3133 if (desc_id < 0) 3134 goto err_free_skb; 3135 3136 cb->msdu_id = desc_id; 3137 3138 ptr = (void *)skb->data; 3139 tlv = ptr; 3140 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_MGMT_TX_CMD); 3141 tlv->len = __cpu_to_le16(sizeof(*cmd)); 3142 cmd = (void *)tlv->value; 3143 cmd->vdev_id = __cpu_to_le32(vdev_id); 3144 cmd->desc_id = __cpu_to_le32(desc_id); 3145 cmd->chanfreq = 0; 3146 cmd->buf_len = __cpu_to_le32(buf_len); 3147 cmd->frame_len = __cpu_to_le32(msdu->len); 3148 cmd->paddr = __cpu_to_le64(paddr); 3149 3150 ptr += sizeof(*tlv); 3151 ptr += sizeof(*cmd); 3152 3153 tlv = ptr; 3154 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE); 3155 tlv->len = __cpu_to_le16(buf_len); 3156 3157 ptr += sizeof(*tlv); 3158 memcpy(ptr, msdu->data, buf_len); 3159 3160 return skb; 3161 3162 err_free_skb: 3163 dev_kfree_skb(skb); 3164 return ERR_PTR(desc_id); 3165 } 3166 3167 static struct sk_buff * 3168 ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar, 3169 enum wmi_force_fw_hang_type type, 3170 u32 delay_ms) 3171 { 3172 struct wmi_force_fw_hang_cmd *cmd; 3173 struct wmi_tlv *tlv; 3174 struct sk_buff *skb; 3175 3176 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 3177 if (!skb) 3178 return ERR_PTR(-ENOMEM); 3179 3180 tlv = (void *)skb->data; 3181 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_FORCE_FW_HANG_CMD); 3182 tlv->len = __cpu_to_le16(sizeof(*cmd)); 3183 cmd = (void *)tlv->value; 3184 cmd->type = __cpu_to_le32(type); 3185 cmd->delay_ms = __cpu_to_le32(delay_ms); 3186 3187 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv force fw hang\n"); 3188 return skb; 3189 } 3190 3191 static struct sk_buff * 3192 ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable, 3193 u32 log_level) 3194 { 3195 struct wmi_tlv_dbglog_cmd *cmd; 3196 struct wmi_tlv *tlv; 3197 struct sk_buff *skb; 3198 size_t len, bmap_len; 3199 u32 value; 3200 void *ptr; 3201 3202 if (module_enable) { 3203 value = WMI_TLV_DBGLOG_LOG_LEVEL_VALUE( 3204 module_enable, 3205 WMI_TLV_DBGLOG_LOG_LEVEL_VERBOSE); 3206 } else { 3207 value = WMI_TLV_DBGLOG_LOG_LEVEL_VALUE( 3208 WMI_TLV_DBGLOG_ALL_MODULES, 3209 WMI_TLV_DBGLOG_LOG_LEVEL_WARN); 3210 } 3211 3212 bmap_len = 0; 3213 len = sizeof(*tlv) + sizeof(*cmd) + sizeof(*tlv) + bmap_len; 3214 skb = ath10k_wmi_alloc_skb(ar, len); 3215 if (!skb) 3216 return ERR_PTR(-ENOMEM); 3217 3218 ptr = (void *)skb->data; 3219 3220 tlv = ptr; 3221 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_DEBUG_LOG_CONFIG_CMD); 3222 tlv->len = __cpu_to_le16(sizeof(*cmd)); 3223 cmd = (void *)tlv->value; 3224 cmd->param = __cpu_to_le32(WMI_TLV_DBGLOG_PARAM_LOG_LEVEL); 3225 cmd->value = __cpu_to_le32(value); 3226 3227 ptr += sizeof(*tlv); 3228 ptr += sizeof(*cmd); 3229 3230 tlv = ptr; 3231 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32); 3232 tlv->len = __cpu_to_le16(bmap_len); 3233 3234 /* nothing to do here */ 3235 3236 ptr += sizeof(*tlv); 3237 ptr += sizeof(bmap_len); 3238 3239 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv dbglog value 0x%08x\n", value); 3240 return skb; 3241 } 3242 3243 static struct sk_buff * 3244 ath10k_wmi_tlv_op_gen_pktlog_enable(struct ath10k *ar, u32 filter) 3245 { 3246 struct wmi_tlv_pktlog_enable *cmd; 3247 struct wmi_tlv *tlv; 3248 struct sk_buff *skb; 3249 void *ptr; 3250 size_t len; 3251 3252 len = sizeof(*tlv) + sizeof(*cmd); 3253 skb = ath10k_wmi_alloc_skb(ar, len); 3254 if (!skb) 3255 return ERR_PTR(-ENOMEM); 3256 3257 ptr = (void *)skb->data; 3258 tlv = ptr; 3259 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_ENABLE_CMD); 3260 tlv->len = __cpu_to_le16(sizeof(*cmd)); 3261 cmd = (void *)tlv->value; 3262 cmd->filter = __cpu_to_le32(filter); 3263 3264 ptr += sizeof(*tlv); 3265 ptr += sizeof(*cmd); 3266 3267 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pktlog enable filter 0x%08x\n", 3268 filter); 3269 return skb; 3270 } 3271 3272 static struct sk_buff * 3273 ath10k_wmi_tlv_op_gen_pdev_get_temperature(struct ath10k *ar) 3274 { 3275 struct wmi_tlv_pdev_get_temp_cmd *cmd; 3276 struct wmi_tlv *tlv; 3277 struct sk_buff *skb; 3278 3279 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 3280 if (!skb) 3281 return ERR_PTR(-ENOMEM); 3282 3283 tlv = (void *)skb->data; 3284 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_GET_TEMPERATURE_CMD); 3285 tlv->len = __cpu_to_le16(sizeof(*cmd)); 3286 cmd = (void *)tlv->value; 3287 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev get temperature tlv\n"); 3288 return skb; 3289 } 3290 3291 static struct sk_buff * 3292 ath10k_wmi_tlv_op_gen_pktlog_disable(struct ath10k *ar) 3293 { 3294 struct wmi_tlv_pktlog_disable *cmd; 3295 struct wmi_tlv *tlv; 3296 struct sk_buff *skb; 3297 void *ptr; 3298 size_t len; 3299 3300 len = sizeof(*tlv) + sizeof(*cmd); 3301 skb = ath10k_wmi_alloc_skb(ar, len); 3302 if (!skb) 3303 return ERR_PTR(-ENOMEM); 3304 3305 ptr = (void *)skb->data; 3306 tlv = ptr; 3307 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_DISABLE_CMD); 3308 tlv->len = __cpu_to_le16(sizeof(*cmd)); 3309 cmd = (void *)tlv->value; 3310 3311 ptr += sizeof(*tlv); 3312 ptr += sizeof(*cmd); 3313 3314 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pktlog disable\n"); 3315 return skb; 3316 } 3317 3318 static struct sk_buff * 3319 ath10k_wmi_tlv_op_gen_bcn_tmpl(struct ath10k *ar, u32 vdev_id, 3320 u32 tim_ie_offset, struct sk_buff *bcn, 3321 u32 prb_caps, u32 prb_erp, void *prb_ies, 3322 size_t prb_ies_len) 3323 { 3324 struct wmi_tlv_bcn_tmpl_cmd *cmd; 3325 struct wmi_tlv_bcn_prb_info *info; 3326 struct wmi_tlv *tlv; 3327 struct sk_buff *skb; 3328 void *ptr; 3329 size_t len; 3330 3331 if (WARN_ON(prb_ies_len > 0 && !prb_ies)) 3332 return ERR_PTR(-EINVAL); 3333 3334 len = sizeof(*tlv) + sizeof(*cmd) + 3335 sizeof(*tlv) + sizeof(*info) + prb_ies_len + 3336 sizeof(*tlv) + roundup(bcn->len, 4); 3337 skb = ath10k_wmi_alloc_skb(ar, len); 3338 if (!skb) 3339 return ERR_PTR(-ENOMEM); 3340 3341 ptr = (void *)skb->data; 3342 tlv = ptr; 3343 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_TMPL_CMD); 3344 tlv->len = __cpu_to_le16(sizeof(*cmd)); 3345 cmd = (void *)tlv->value; 3346 cmd->vdev_id = __cpu_to_le32(vdev_id); 3347 cmd->tim_ie_offset = __cpu_to_le32(tim_ie_offset); 3348 cmd->buf_len = __cpu_to_le32(bcn->len); 3349 3350 ptr += sizeof(*tlv); 3351 ptr += sizeof(*cmd); 3352 3353 /* FIXME: prb_ies_len should be probably aligned to 4byte boundary but 3354 * then it is then impossible to pass original ie len. 3355 * This chunk is not used yet so if setting probe resp template yields 3356 * problems with beaconing or crashes firmware look here. 3357 */ 3358 tlv = ptr; 3359 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO); 3360 tlv->len = __cpu_to_le16(sizeof(*info) + prb_ies_len); 3361 info = (void *)tlv->value; 3362 info->caps = __cpu_to_le32(prb_caps); 3363 info->erp = __cpu_to_le32(prb_erp); 3364 memcpy(info->ies, prb_ies, prb_ies_len); 3365 3366 ptr += sizeof(*tlv); 3367 ptr += sizeof(*info); 3368 ptr += prb_ies_len; 3369 3370 tlv = ptr; 3371 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE); 3372 tlv->len = __cpu_to_le16(roundup(bcn->len, 4)); 3373 memcpy(tlv->value, bcn->data, bcn->len); 3374 3375 /* FIXME: Adjust TSF? */ 3376 3377 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv bcn tmpl vdev_id %i\n", 3378 vdev_id); 3379 return skb; 3380 } 3381 3382 static struct sk_buff * 3383 ath10k_wmi_tlv_op_gen_prb_tmpl(struct ath10k *ar, u32 vdev_id, 3384 struct sk_buff *prb) 3385 { 3386 struct wmi_tlv_prb_tmpl_cmd *cmd; 3387 struct wmi_tlv_bcn_prb_info *info; 3388 struct wmi_tlv *tlv; 3389 struct sk_buff *skb; 3390 void *ptr; 3391 size_t len; 3392 3393 len = sizeof(*tlv) + sizeof(*cmd) + 3394 sizeof(*tlv) + sizeof(*info) + 3395 sizeof(*tlv) + roundup(prb->len, 4); 3396 skb = ath10k_wmi_alloc_skb(ar, len); 3397 if (!skb) 3398 return ERR_PTR(-ENOMEM); 3399 3400 ptr = (void *)skb->data; 3401 tlv = ptr; 3402 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PRB_TMPL_CMD); 3403 tlv->len = __cpu_to_le16(sizeof(*cmd)); 3404 cmd = (void *)tlv->value; 3405 cmd->vdev_id = __cpu_to_le32(vdev_id); 3406 cmd->buf_len = __cpu_to_le32(prb->len); 3407 3408 ptr += sizeof(*tlv); 3409 ptr += sizeof(*cmd); 3410 3411 tlv = ptr; 3412 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO); 3413 tlv->len = __cpu_to_le16(sizeof(*info)); 3414 info = (void *)tlv->value; 3415 info->caps = 0; 3416 info->erp = 0; 3417 3418 ptr += sizeof(*tlv); 3419 ptr += sizeof(*info); 3420 3421 tlv = ptr; 3422 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE); 3423 tlv->len = __cpu_to_le16(roundup(prb->len, 4)); 3424 memcpy(tlv->value, prb->data, prb->len); 3425 3426 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv prb tmpl vdev_id %i\n", 3427 vdev_id); 3428 return skb; 3429 } 3430 3431 static struct sk_buff * 3432 ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, 3433 const u8 *p2p_ie) 3434 { 3435 struct wmi_tlv_p2p_go_bcn_ie *cmd; 3436 struct wmi_tlv *tlv; 3437 struct sk_buff *skb; 3438 void *ptr; 3439 size_t len; 3440 3441 len = sizeof(*tlv) + sizeof(*cmd) + 3442 sizeof(*tlv) + roundup(p2p_ie[1] + 2, 4); 3443 skb = ath10k_wmi_alloc_skb(ar, len); 3444 if (!skb) 3445 return ERR_PTR(-ENOMEM); 3446 3447 ptr = (void *)skb->data; 3448 tlv = ptr; 3449 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_P2P_GO_SET_BEACON_IE); 3450 tlv->len = __cpu_to_le16(sizeof(*cmd)); 3451 cmd = (void *)tlv->value; 3452 cmd->vdev_id = __cpu_to_le32(vdev_id); 3453 cmd->ie_len = __cpu_to_le32(p2p_ie[1] + 2); 3454 3455 ptr += sizeof(*tlv); 3456 ptr += sizeof(*cmd); 3457 3458 tlv = ptr; 3459 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE); 3460 tlv->len = __cpu_to_le16(roundup(p2p_ie[1] + 2, 4)); 3461 memcpy(tlv->value, p2p_ie, p2p_ie[1] + 2); 3462 3463 ptr += sizeof(*tlv); 3464 ptr += roundup(p2p_ie[1] + 2, 4); 3465 3466 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv p2p go bcn ie for vdev %i\n", 3467 vdev_id); 3468 return skb; 3469 } 3470 3471 static struct sk_buff * 3472 ath10k_wmi_tlv_op_gen_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id, 3473 enum wmi_tdls_state state) 3474 { 3475 struct wmi_tdls_set_state_cmd *cmd; 3476 struct wmi_tlv *tlv; 3477 struct sk_buff *skb; 3478 void *ptr; 3479 size_t len; 3480 /* Set to options from wmi_tlv_tdls_options, 3481 * for now none of them are enabled. 3482 */ 3483 u32 options = 0; 3484 3485 if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, ar->wmi.svc_map)) 3486 options |= WMI_TLV_TDLS_BUFFER_STA_EN; 3487 3488 /* WMI_TDLS_ENABLE_ACTIVE_EXTERNAL_CONTROL means firm will handle TDLS 3489 * link inactivity detecting logic. 3490 */ 3491 if (state == WMI_TDLS_ENABLE_ACTIVE) 3492 state = WMI_TDLS_ENABLE_ACTIVE_EXTERNAL_CONTROL; 3493 3494 len = sizeof(*tlv) + sizeof(*cmd); 3495 skb = ath10k_wmi_alloc_skb(ar, len); 3496 if (!skb) 3497 return ERR_PTR(-ENOMEM); 3498 3499 ptr = (void *)skb->data; 3500 tlv = ptr; 3501 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_SET_STATE_CMD); 3502 tlv->len = __cpu_to_le16(sizeof(*cmd)); 3503 3504 cmd = (void *)tlv->value; 3505 cmd->vdev_id = __cpu_to_le32(vdev_id); 3506 cmd->state = __cpu_to_le32(state); 3507 cmd->notification_interval_ms = __cpu_to_le32(5000); 3508 cmd->tx_discovery_threshold = __cpu_to_le32(100); 3509 cmd->tx_teardown_threshold = __cpu_to_le32(5); 3510 cmd->rssi_teardown_threshold = __cpu_to_le32(-75); 3511 cmd->rssi_delta = __cpu_to_le32(-20); 3512 cmd->tdls_options = __cpu_to_le32(options); 3513 cmd->tdls_peer_traffic_ind_window = __cpu_to_le32(2); 3514 cmd->tdls_peer_traffic_response_timeout_ms = __cpu_to_le32(5000); 3515 cmd->tdls_puapsd_mask = __cpu_to_le32(0xf); 3516 cmd->tdls_puapsd_inactivity_time_ms = __cpu_to_le32(0); 3517 cmd->tdls_puapsd_rx_frame_threshold = __cpu_to_le32(10); 3518 3519 ptr += sizeof(*tlv); 3520 ptr += sizeof(*cmd); 3521 3522 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv update fw tdls state %d for vdev %i\n", 3523 state, vdev_id); 3524 return skb; 3525 } 3526 3527 static u32 ath10k_wmi_tlv_prepare_peer_qos(u8 uapsd_queues, u8 sp) 3528 { 3529 u32 peer_qos = 0; 3530 3531 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) 3532 peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VO; 3533 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) 3534 peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VI; 3535 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) 3536 peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BK; 3537 if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) 3538 peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BE; 3539 3540 peer_qos |= SM(sp, WMI_TLV_TDLS_PEER_SP); 3541 3542 return peer_qos; 3543 } 3544 3545 static struct sk_buff * 3546 ath10k_wmi_tlv_op_gen_tdls_peer_update(struct ath10k *ar, 3547 const struct wmi_tdls_peer_update_cmd_arg *arg, 3548 const struct wmi_tdls_peer_capab_arg *cap, 3549 const struct wmi_channel_arg *chan_arg) 3550 { 3551 struct wmi_tdls_peer_update_cmd *cmd; 3552 struct wmi_tdls_peer_capab *peer_cap; 3553 struct wmi_channel *chan; 3554 struct wmi_tlv *tlv; 3555 struct sk_buff *skb; 3556 u32 peer_qos; 3557 void *ptr; 3558 int len; 3559 int i; 3560 3561 len = sizeof(*tlv) + sizeof(*cmd) + 3562 sizeof(*tlv) + sizeof(*peer_cap) + 3563 sizeof(*tlv) + cap->peer_chan_len * sizeof(*chan); 3564 3565 skb = ath10k_wmi_alloc_skb(ar, len); 3566 if (!skb) 3567 return ERR_PTR(-ENOMEM); 3568 3569 ptr = (void *)skb->data; 3570 tlv = ptr; 3571 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_UPDATE_CMD); 3572 tlv->len = __cpu_to_le16(sizeof(*cmd)); 3573 3574 cmd = (void *)tlv->value; 3575 cmd->vdev_id = __cpu_to_le32(arg->vdev_id); 3576 ether_addr_copy(cmd->peer_macaddr.addr, arg->addr); 3577 cmd->peer_state = __cpu_to_le32(arg->peer_state); 3578 3579 ptr += sizeof(*tlv); 3580 ptr += sizeof(*cmd); 3581 3582 tlv = ptr; 3583 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_CAPABILITIES); 3584 tlv->len = __cpu_to_le16(sizeof(*peer_cap)); 3585 peer_cap = (void *)tlv->value; 3586 peer_qos = ath10k_wmi_tlv_prepare_peer_qos(cap->peer_uapsd_queues, 3587 cap->peer_max_sp); 3588 peer_cap->peer_qos = __cpu_to_le32(peer_qos); 3589 peer_cap->buff_sta_support = __cpu_to_le32(cap->buff_sta_support); 3590 peer_cap->off_chan_support = __cpu_to_le32(cap->off_chan_support); 3591 peer_cap->peer_curr_operclass = __cpu_to_le32(cap->peer_curr_operclass); 3592 peer_cap->self_curr_operclass = __cpu_to_le32(cap->self_curr_operclass); 3593 peer_cap->peer_chan_len = __cpu_to_le32(cap->peer_chan_len); 3594 peer_cap->peer_operclass_len = __cpu_to_le32(cap->peer_operclass_len); 3595 3596 for (i = 0; i < WMI_TDLS_MAX_SUPP_OPER_CLASSES; i++) 3597 peer_cap->peer_operclass[i] = cap->peer_operclass[i]; 3598 3599 peer_cap->is_peer_responder = __cpu_to_le32(cap->is_peer_responder); 3600 peer_cap->pref_offchan_num = __cpu_to_le32(cap->pref_offchan_num); 3601 peer_cap->pref_offchan_bw = __cpu_to_le32(cap->pref_offchan_bw); 3602 3603 ptr += sizeof(*tlv); 3604 ptr += sizeof(*peer_cap); 3605 3606 tlv = ptr; 3607 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); 3608 tlv->len = __cpu_to_le16(cap->peer_chan_len * sizeof(*chan)); 3609 3610 ptr += sizeof(*tlv); 3611 3612 for (i = 0; i < cap->peer_chan_len; i++) { 3613 tlv = ptr; 3614 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL); 3615 tlv->len = __cpu_to_le16(sizeof(*chan)); 3616 chan = (void *)tlv->value; 3617 ath10k_wmi_put_wmi_channel(ar, chan, &chan_arg[i]); 3618 3619 ptr += sizeof(*tlv); 3620 ptr += sizeof(*chan); 3621 } 3622 3623 ath10k_dbg(ar, ATH10K_DBG_WMI, 3624 "wmi tlv tdls peer update vdev %i state %d n_chans %u\n", 3625 arg->vdev_id, arg->peer_state, cap->peer_chan_len); 3626 return skb; 3627 } 3628 3629 static struct sk_buff * 3630 ath10k_wmi_tlv_op_gen_pdev_set_quiet_mode(struct ath10k *ar, u32 period, 3631 u32 duration, u32 next_offset, 3632 u32 enabled) 3633 { 3634 struct wmi_tlv_set_quiet_cmd *cmd; 3635 struct wmi_tlv *tlv; 3636 struct sk_buff *skb; 3637 3638 skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd)); 3639 if (!skb) 3640 return ERR_PTR(-ENOMEM); 3641 3642 tlv = (void *)skb->data; 3643 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_QUIET_CMD); 3644 tlv->len = __cpu_to_le16(sizeof(*cmd)); 3645 cmd = (void *)tlv->value; 3646 3647 /* vdev_id is not in use, set to 0 */ 3648 cmd->vdev_id = __cpu_to_le32(0); 3649 cmd->period = __cpu_to_le32(period); 3650 cmd->duration = __cpu_to_le32(duration); 3651 cmd->next_start = __cpu_to_le32(next_offset); 3652 cmd->enabled = __cpu_to_le32(enabled); 3653 3654 ath10k_dbg(ar, ATH10K_DBG_WMI, 3655 "wmi tlv quiet param: period %u duration %u enabled %d\n", 3656 period, duration, enabled); 3657 return skb; 3658 } 3659 3660 static struct sk_buff * 3661 ath10k_wmi_tlv_op_gen_wow_enable(struct ath10k *ar) 3662 { 3663 struct wmi_tlv_wow_enable_cmd *cmd; 3664 struct wmi_tlv *tlv; 3665 struct sk_buff *skb; 3666 size_t len; 3667 3668 len = sizeof(*tlv) + sizeof(*cmd); 3669 skb = ath10k_wmi_alloc_skb(ar, len); 3670 if (!skb) 3671 return ERR_PTR(-ENOMEM); 3672 3673 tlv = (struct wmi_tlv *)skb->data; 3674 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ENABLE_CMD); 3675 tlv->len = __cpu_to_le16(sizeof(*cmd)); 3676 cmd = (void *)tlv->value; 3677 3678 cmd->enable = __cpu_to_le32(1); 3679 if (!ar->bus_param.link_can_suspend) 3680 cmd->pause_iface_config = __cpu_to_le32(WOW_IFACE_PAUSE_DISABLED); 3681 3682 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow enable\n"); 3683 return skb; 3684 } 3685 3686 static struct sk_buff * 3687 ath10k_wmi_tlv_op_gen_wow_add_wakeup_event(struct ath10k *ar, 3688 u32 vdev_id, 3689 enum wmi_wow_wakeup_event event, 3690 u32 enable) 3691 { 3692 struct wmi_tlv_wow_add_del_event_cmd *cmd; 3693 struct wmi_tlv *tlv; 3694 struct sk_buff *skb; 3695 size_t len; 3696 3697 len = sizeof(*tlv) + sizeof(*cmd); 3698 skb = ath10k_wmi_alloc_skb(ar, len); 3699 if (!skb) 3700 return ERR_PTR(-ENOMEM); 3701 3702 tlv = (struct wmi_tlv *)skb->data; 3703 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_DEL_EVT_CMD); 3704 tlv->len = __cpu_to_le16(sizeof(*cmd)); 3705 cmd = (void *)tlv->value; 3706 3707 cmd->vdev_id = __cpu_to_le32(vdev_id); 3708 cmd->is_add = __cpu_to_le32(enable); 3709 cmd->event_bitmap = __cpu_to_le32(1 << event); 3710 3711 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n", 3712 wow_wakeup_event(event), enable, vdev_id); 3713 return skb; 3714 } 3715 3716 static struct sk_buff * 3717 ath10k_wmi_tlv_gen_wow_host_wakeup_ind(struct ath10k *ar) 3718 { 3719 struct wmi_tlv_wow_host_wakeup_ind *cmd; 3720 struct wmi_tlv *tlv; 3721 struct sk_buff *skb; 3722 size_t len; 3723 3724 len = sizeof(*tlv) + sizeof(*cmd); 3725 skb = ath10k_wmi_alloc_skb(ar, len); 3726 if (!skb) 3727 return ERR_PTR(-ENOMEM); 3728 3729 tlv = (struct wmi_tlv *)skb->data; 3730 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_HOSTWAKEUP_FROM_SLEEP_CMD); 3731 tlv->len = __cpu_to_le16(sizeof(*cmd)); 3732 cmd = (void *)tlv->value; 3733 3734 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow host wakeup ind\n"); 3735 return skb; 3736 } 3737 3738 static struct sk_buff * 3739 ath10k_wmi_tlv_op_gen_wow_add_pattern(struct ath10k *ar, u32 vdev_id, 3740 u32 pattern_id, const u8 *pattern, 3741 const u8 *bitmask, int pattern_len, 3742 int pattern_offset) 3743 { 3744 struct wmi_tlv_wow_add_pattern_cmd *cmd; 3745 struct wmi_tlv_wow_bitmap_pattern *bitmap; 3746 struct wmi_tlv *tlv; 3747 struct sk_buff *skb; 3748 void *ptr; 3749 size_t len; 3750 3751 len = sizeof(*tlv) + sizeof(*cmd) + 3752 sizeof(*tlv) + /* array struct */ 3753 sizeof(*tlv) + sizeof(*bitmap) + /* bitmap */ 3754 sizeof(*tlv) + /* empty ipv4 sync */ 3755 sizeof(*tlv) + /* empty ipv6 sync */ 3756 sizeof(*tlv) + /* empty magic */ 3757 sizeof(*tlv) + /* empty info timeout */ 3758 sizeof(*tlv) + sizeof(u32); /* ratelimit interval */ 3759 3760 skb = ath10k_wmi_alloc_skb(ar, len); 3761 if (!skb) 3762 return ERR_PTR(-ENOMEM); 3763 3764 /* cmd */ 3765 ptr = (void *)skb->data; 3766 tlv = ptr; 3767 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_PATTERN_CMD); 3768 tlv->len = __cpu_to_le16(sizeof(*cmd)); 3769 cmd = (void *)tlv->value; 3770 3771 cmd->vdev_id = __cpu_to_le32(vdev_id); 3772 cmd->pattern_id = __cpu_to_le32(pattern_id); 3773 cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN); 3774 3775 ptr += sizeof(*tlv); 3776 ptr += sizeof(*cmd); 3777 3778 /* bitmap */ 3779 tlv = ptr; 3780 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); 3781 tlv->len = __cpu_to_le16(sizeof(*tlv) + sizeof(*bitmap)); 3782 3783 ptr += sizeof(*tlv); 3784 3785 tlv = ptr; 3786 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_BITMAP_PATTERN_T); 3787 tlv->len = __cpu_to_le16(sizeof(*bitmap)); 3788 bitmap = (void *)tlv->value; 3789 3790 memcpy(bitmap->patternbuf, pattern, pattern_len); 3791 memcpy(bitmap->bitmaskbuf, bitmask, pattern_len); 3792 bitmap->pattern_offset = __cpu_to_le32(pattern_offset); 3793 bitmap->pattern_len = __cpu_to_le32(pattern_len); 3794 bitmap->bitmask_len = __cpu_to_le32(pattern_len); 3795 bitmap->pattern_id = __cpu_to_le32(pattern_id); 3796 3797 ptr += sizeof(*tlv); 3798 ptr += sizeof(*bitmap); 3799 3800 /* ipv4 sync */ 3801 tlv = ptr; 3802 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); 3803 tlv->len = __cpu_to_le16(0); 3804 3805 ptr += sizeof(*tlv); 3806 3807 /* ipv6 sync */ 3808 tlv = ptr; 3809 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); 3810 tlv->len = __cpu_to_le16(0); 3811 3812 ptr += sizeof(*tlv); 3813 3814 /* magic */ 3815 tlv = ptr; 3816 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); 3817 tlv->len = __cpu_to_le16(0); 3818 3819 ptr += sizeof(*tlv); 3820 3821 /* pattern info timeout */ 3822 tlv = ptr; 3823 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32); 3824 tlv->len = __cpu_to_le16(0); 3825 3826 ptr += sizeof(*tlv); 3827 3828 /* ratelimit interval */ 3829 tlv = ptr; 3830 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32); 3831 tlv->len = __cpu_to_le16(sizeof(u32)); 3832 3833 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d, pattern_offset %d\n", 3834 vdev_id, pattern_id, pattern_offset); 3835 return skb; 3836 } 3837 3838 static struct sk_buff * 3839 ath10k_wmi_tlv_op_gen_wow_del_pattern(struct ath10k *ar, u32 vdev_id, 3840 u32 pattern_id) 3841 { 3842 struct wmi_tlv_wow_del_pattern_cmd *cmd; 3843 struct wmi_tlv *tlv; 3844 struct sk_buff *skb; 3845 size_t len; 3846 3847 len = sizeof(*tlv) + sizeof(*cmd); 3848 skb = ath10k_wmi_alloc_skb(ar, len); 3849 if (!skb) 3850 return ERR_PTR(-ENOMEM); 3851 3852 tlv = (struct wmi_tlv *)skb->data; 3853 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_DEL_PATTERN_CMD); 3854 tlv->len = __cpu_to_le16(sizeof(*cmd)); 3855 cmd = (void *)tlv->value; 3856 3857 cmd->vdev_id = __cpu_to_le32(vdev_id); 3858 cmd->pattern_id = __cpu_to_le32(pattern_id); 3859 cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN); 3860 3861 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n", 3862 vdev_id, pattern_id); 3863 return skb; 3864 } 3865 3866 /* Request FW to start PNO operation */ 3867 static struct sk_buff * 3868 ath10k_wmi_tlv_op_gen_config_pno_start(struct ath10k *ar, 3869 u32 vdev_id, 3870 struct wmi_pno_scan_req *pno) 3871 { 3872 struct nlo_configured_parameters *nlo_list; 3873 struct wmi_tlv_wow_nlo_config_cmd *cmd; 3874 struct wmi_tlv *tlv; 3875 struct sk_buff *skb; 3876 __le32 *channel_list; 3877 u16 tlv_len; 3878 size_t len; 3879 void *ptr; 3880 u32 i; 3881 3882 len = sizeof(*tlv) + sizeof(*cmd) + 3883 sizeof(*tlv) + 3884 /* TLV place holder for array of structures 3885 * nlo_configured_parameters(nlo_list) 3886 */ 3887 sizeof(*tlv); 3888 /* TLV place holder for array of uint32 channel_list */ 3889 3890 len += sizeof(u32) * min_t(u8, pno->a_networks[0].channel_count, 3891 WMI_NLO_MAX_CHAN); 3892 len += sizeof(struct nlo_configured_parameters) * 3893 min_t(u8, pno->uc_networks_count, WMI_NLO_MAX_SSIDS); 3894 3895 skb = ath10k_wmi_alloc_skb(ar, len); 3896 if (!skb) 3897 return ERR_PTR(-ENOMEM); 3898 3899 ptr = (void *)skb->data; 3900 tlv = ptr; 3901 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_NLO_CONFIG_CMD); 3902 tlv->len = __cpu_to_le16(sizeof(*cmd)); 3903 cmd = (void *)tlv->value; 3904 3905 /* wmi_tlv_wow_nlo_config_cmd parameters*/ 3906 cmd->vdev_id = __cpu_to_le32(pno->vdev_id); 3907 cmd->flags = __cpu_to_le32(WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN); 3908 3909 /* current FW does not support min-max range for dwell time */ 3910 cmd->active_dwell_time = __cpu_to_le32(pno->active_max_time); 3911 cmd->passive_dwell_time = __cpu_to_le32(pno->passive_max_time); 3912 3913 if (pno->do_passive_scan) 3914 cmd->flags |= __cpu_to_le32(WMI_NLO_CONFIG_SCAN_PASSIVE); 3915 3916 /* copy scan interval */ 3917 cmd->fast_scan_period = __cpu_to_le32(pno->fast_scan_period); 3918 cmd->slow_scan_period = __cpu_to_le32(pno->slow_scan_period); 3919 cmd->fast_scan_max_cycles = __cpu_to_le32(pno->fast_scan_max_cycles); 3920 cmd->delay_start_time = __cpu_to_le32(pno->delay_start_time); 3921 3922 if (pno->enable_pno_scan_randomization) { 3923 cmd->flags |= __cpu_to_le32(WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ | 3924 WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ); 3925 ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr); 3926 ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask); 3927 } 3928 3929 ptr += sizeof(*tlv); 3930 ptr += sizeof(*cmd); 3931 3932 /* nlo_configured_parameters(nlo_list) */ 3933 cmd->no_of_ssids = __cpu_to_le32(min_t(u8, pno->uc_networks_count, 3934 WMI_NLO_MAX_SSIDS)); 3935 tlv_len = __le32_to_cpu(cmd->no_of_ssids) * 3936 sizeof(struct nlo_configured_parameters); 3937 3938 tlv = ptr; 3939 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); 3940 tlv->len = __cpu_to_le16(tlv_len); 3941 3942 ptr += sizeof(*tlv); 3943 nlo_list = ptr; 3944 for (i = 0; i < __le32_to_cpu(cmd->no_of_ssids); i++) { 3945 tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header); 3946 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE); 3947 tlv->len = __cpu_to_le16(sizeof(struct nlo_configured_parameters) - 3948 sizeof(*tlv)); 3949 3950 /* copy ssid and it's length */ 3951 nlo_list[i].ssid.valid = __cpu_to_le32(true); 3952 nlo_list[i].ssid.ssid.ssid_len = pno->a_networks[i].ssid.ssid_len; 3953 memcpy(nlo_list[i].ssid.ssid.ssid, 3954 pno->a_networks[i].ssid.ssid, 3955 __le32_to_cpu(nlo_list[i].ssid.ssid.ssid_len)); 3956 3957 /* copy rssi threshold */ 3958 if (pno->a_networks[i].rssi_threshold && 3959 pno->a_networks[i].rssi_threshold > -300) { 3960 nlo_list[i].rssi_cond.valid = __cpu_to_le32(true); 3961 nlo_list[i].rssi_cond.rssi = 3962 __cpu_to_le32(pno->a_networks[i].rssi_threshold); 3963 } 3964 3965 nlo_list[i].bcast_nw_type.valid = __cpu_to_le32(true); 3966 nlo_list[i].bcast_nw_type.bcast_nw_type = 3967 __cpu_to_le32(pno->a_networks[i].bcast_nw_type); 3968 } 3969 3970 ptr += __le32_to_cpu(cmd->no_of_ssids) * sizeof(struct nlo_configured_parameters); 3971 3972 /* copy channel info */ 3973 cmd->num_of_channels = __cpu_to_le32(min_t(u8, 3974 pno->a_networks[0].channel_count, 3975 WMI_NLO_MAX_CHAN)); 3976 3977 tlv = ptr; 3978 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32); 3979 tlv->len = __cpu_to_le16(__le32_to_cpu(cmd->num_of_channels) * 3980 sizeof(u_int32_t)); 3981 ptr += sizeof(*tlv); 3982 3983 channel_list = (__le32 *)ptr; 3984 for (i = 0; i < __le32_to_cpu(cmd->num_of_channels); i++) 3985 channel_list[i] = __cpu_to_le32(pno->a_networks[0].channels[i]); 3986 3987 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n", 3988 vdev_id); 3989 3990 return skb; 3991 } 3992 3993 /* Request FW to stop ongoing PNO operation */ 3994 static struct sk_buff *ath10k_wmi_tlv_op_gen_config_pno_stop(struct ath10k *ar, 3995 u32 vdev_id) 3996 { 3997 struct wmi_tlv_wow_nlo_config_cmd *cmd; 3998 struct wmi_tlv *tlv; 3999 struct sk_buff *skb; 4000 void *ptr; 4001 size_t len; 4002 4003 len = sizeof(*tlv) + sizeof(*cmd) + 4004 sizeof(*tlv) + 4005 /* TLV place holder for array of structures 4006 * nlo_configured_parameters(nlo_list) 4007 */ 4008 sizeof(*tlv); 4009 /* TLV place holder for array of uint32 channel_list */ 4010 skb = ath10k_wmi_alloc_skb(ar, len); 4011 if (!skb) 4012 return ERR_PTR(-ENOMEM); 4013 4014 ptr = (void *)skb->data; 4015 tlv = ptr; 4016 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_NLO_CONFIG_CMD); 4017 tlv->len = __cpu_to_le16(sizeof(*cmd)); 4018 cmd = (void *)tlv->value; 4019 4020 cmd->vdev_id = __cpu_to_le32(vdev_id); 4021 cmd->flags = __cpu_to_le32(WMI_NLO_CONFIG_STOP); 4022 4023 ptr += sizeof(*tlv); 4024 ptr += sizeof(*cmd); 4025 4026 /* nlo_configured_parameters(nlo_list) */ 4027 tlv = ptr; 4028 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); 4029 tlv->len = __cpu_to_le16(0); 4030 4031 ptr += sizeof(*tlv); 4032 4033 /* channel list */ 4034 tlv = ptr; 4035 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32); 4036 tlv->len = __cpu_to_le16(0); 4037 4038 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv stop pno config vdev_id %d\n", vdev_id); 4039 return skb; 4040 } 4041 4042 static struct sk_buff * 4043 ath10k_wmi_tlv_op_gen_config_pno(struct ath10k *ar, u32 vdev_id, 4044 struct wmi_pno_scan_req *pno_scan) 4045 { 4046 if (pno_scan->enable) 4047 return ath10k_wmi_tlv_op_gen_config_pno_start(ar, vdev_id, pno_scan); 4048 else 4049 return ath10k_wmi_tlv_op_gen_config_pno_stop(ar, vdev_id); 4050 } 4051 4052 static struct sk_buff * 4053 ath10k_wmi_tlv_op_gen_adaptive_qcs(struct ath10k *ar, bool enable) 4054 { 4055 struct wmi_tlv_adaptive_qcs *cmd; 4056 struct wmi_tlv *tlv; 4057 struct sk_buff *skb; 4058 void *ptr; 4059 size_t len; 4060 4061 len = sizeof(*tlv) + sizeof(*cmd); 4062 skb = ath10k_wmi_alloc_skb(ar, len); 4063 if (!skb) 4064 return ERR_PTR(-ENOMEM); 4065 4066 ptr = (void *)skb->data; 4067 tlv = ptr; 4068 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESMGR_ADAPTIVE_OCS_CMD); 4069 tlv->len = __cpu_to_le16(sizeof(*cmd)); 4070 cmd = (void *)tlv->value; 4071 cmd->enable = __cpu_to_le32(enable ? 1 : 0); 4072 4073 ptr += sizeof(*tlv); 4074 ptr += sizeof(*cmd); 4075 4076 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv adaptive qcs %d\n", enable); 4077 return skb; 4078 } 4079 4080 static struct sk_buff * 4081 ath10k_wmi_tlv_op_gen_echo(struct ath10k *ar, u32 value) 4082 { 4083 struct wmi_echo_cmd *cmd; 4084 struct wmi_tlv *tlv; 4085 struct sk_buff *skb; 4086 void *ptr; 4087 size_t len; 4088 4089 len = sizeof(*tlv) + sizeof(*cmd); 4090 skb = ath10k_wmi_alloc_skb(ar, len); 4091 if (!skb) 4092 return ERR_PTR(-ENOMEM); 4093 4094 ptr = (void *)skb->data; 4095 tlv = ptr; 4096 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_ECHO_CMD); 4097 tlv->len = __cpu_to_le16(sizeof(*cmd)); 4098 cmd = (void *)tlv->value; 4099 cmd->value = cpu_to_le32(value); 4100 4101 ptr += sizeof(*tlv); 4102 ptr += sizeof(*cmd); 4103 4104 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv echo value 0x%08x\n", value); 4105 return skb; 4106 } 4107 4108 static struct sk_buff * 4109 ath10k_wmi_tlv_op_gen_vdev_spectral_conf(struct ath10k *ar, 4110 const struct wmi_vdev_spectral_conf_arg *arg) 4111 { 4112 struct wmi_vdev_spectral_conf_cmd *cmd; 4113 struct sk_buff *skb; 4114 struct wmi_tlv *tlv; 4115 void *ptr; 4116 size_t len; 4117 4118 len = sizeof(*tlv) + sizeof(*cmd); 4119 skb = ath10k_wmi_alloc_skb(ar, len); 4120 if (!skb) 4121 return ERR_PTR(-ENOMEM); 4122 4123 ptr = (void *)skb->data; 4124 tlv = ptr; 4125 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_CONFIGURE_CMD); 4126 tlv->len = __cpu_to_le16(sizeof(*cmd)); 4127 cmd = (void *)tlv->value; 4128 cmd->vdev_id = __cpu_to_le32(arg->vdev_id); 4129 cmd->scan_count = __cpu_to_le32(arg->scan_count); 4130 cmd->scan_period = __cpu_to_le32(arg->scan_period); 4131 cmd->scan_priority = __cpu_to_le32(arg->scan_priority); 4132 cmd->scan_fft_size = __cpu_to_le32(arg->scan_fft_size); 4133 cmd->scan_gc_ena = __cpu_to_le32(arg->scan_gc_ena); 4134 cmd->scan_restart_ena = __cpu_to_le32(arg->scan_restart_ena); 4135 cmd->scan_noise_floor_ref = __cpu_to_le32(arg->scan_noise_floor_ref); 4136 cmd->scan_init_delay = __cpu_to_le32(arg->scan_init_delay); 4137 cmd->scan_nb_tone_thr = __cpu_to_le32(arg->scan_nb_tone_thr); 4138 cmd->scan_str_bin_thr = __cpu_to_le32(arg->scan_str_bin_thr); 4139 cmd->scan_wb_rpt_mode = __cpu_to_le32(arg->scan_wb_rpt_mode); 4140 cmd->scan_rssi_rpt_mode = __cpu_to_le32(arg->scan_rssi_rpt_mode); 4141 cmd->scan_rssi_thr = __cpu_to_le32(arg->scan_rssi_thr); 4142 cmd->scan_pwr_format = __cpu_to_le32(arg->scan_pwr_format); 4143 cmd->scan_rpt_mode = __cpu_to_le32(arg->scan_rpt_mode); 4144 cmd->scan_bin_scale = __cpu_to_le32(arg->scan_bin_scale); 4145 cmd->scan_dbm_adj = __cpu_to_le32(arg->scan_dbm_adj); 4146 cmd->scan_chn_mask = __cpu_to_le32(arg->scan_chn_mask); 4147 4148 return skb; 4149 } 4150 4151 static struct sk_buff * 4152 ath10k_wmi_tlv_op_gen_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, 4153 u32 trigger, u32 enable) 4154 { 4155 struct wmi_vdev_spectral_enable_cmd *cmd; 4156 struct sk_buff *skb; 4157 struct wmi_tlv *tlv; 4158 void *ptr; 4159 size_t len; 4160 4161 len = sizeof(*tlv) + sizeof(*cmd); 4162 skb = ath10k_wmi_alloc_skb(ar, len); 4163 if (!skb) 4164 return ERR_PTR(-ENOMEM); 4165 4166 ptr = (void *)skb->data; 4167 tlv = ptr; 4168 tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_ENABLE_CMD); 4169 tlv->len = __cpu_to_le16(sizeof(*cmd)); 4170 cmd = (void *)tlv->value; 4171 cmd->vdev_id = __cpu_to_le32(vdev_id); 4172 cmd->trigger_cmd = __cpu_to_le32(trigger); 4173 cmd->enable_cmd = __cpu_to_le32(enable); 4174 4175 return skb; 4176 } 4177 4178 /****************/ 4179 /* TLV mappings */ 4180 /****************/ 4181 4182 static struct wmi_cmd_map wmi_tlv_cmd_map = { 4183 .init_cmdid = WMI_TLV_INIT_CMDID, 4184 .start_scan_cmdid = WMI_TLV_START_SCAN_CMDID, 4185 .stop_scan_cmdid = WMI_TLV_STOP_SCAN_CMDID, 4186 .scan_chan_list_cmdid = WMI_TLV_SCAN_CHAN_LIST_CMDID, 4187 .scan_sch_prio_tbl_cmdid = WMI_TLV_SCAN_SCH_PRIO_TBL_CMDID, 4188 .scan_prob_req_oui_cmdid = WMI_TLV_SCAN_PROB_REQ_OUI_CMDID, 4189 .pdev_set_regdomain_cmdid = WMI_TLV_PDEV_SET_REGDOMAIN_CMDID, 4190 .pdev_set_channel_cmdid = WMI_TLV_PDEV_SET_CHANNEL_CMDID, 4191 .pdev_set_param_cmdid = WMI_TLV_PDEV_SET_PARAM_CMDID, 4192 .pdev_pktlog_enable_cmdid = WMI_TLV_PDEV_PKTLOG_ENABLE_CMDID, 4193 .pdev_pktlog_disable_cmdid = WMI_TLV_PDEV_PKTLOG_DISABLE_CMDID, 4194 .pdev_set_wmm_params_cmdid = WMI_TLV_PDEV_SET_WMM_PARAMS_CMDID, 4195 .pdev_set_ht_cap_ie_cmdid = WMI_TLV_PDEV_SET_HT_CAP_IE_CMDID, 4196 .pdev_set_vht_cap_ie_cmdid = WMI_TLV_PDEV_SET_VHT_CAP_IE_CMDID, 4197 .pdev_set_dscp_tid_map_cmdid = WMI_TLV_PDEV_SET_DSCP_TID_MAP_CMDID, 4198 .pdev_set_quiet_mode_cmdid = WMI_TLV_PDEV_SET_QUIET_MODE_CMDID, 4199 .pdev_green_ap_ps_enable_cmdid = WMI_TLV_PDEV_GREEN_AP_PS_ENABLE_CMDID, 4200 .pdev_get_tpc_config_cmdid = WMI_TLV_PDEV_GET_TPC_CONFIG_CMDID, 4201 .pdev_set_base_macaddr_cmdid = WMI_TLV_PDEV_SET_BASE_MACADDR_CMDID, 4202 .vdev_create_cmdid = WMI_TLV_VDEV_CREATE_CMDID, 4203 .vdev_delete_cmdid = WMI_TLV_VDEV_DELETE_CMDID, 4204 .vdev_start_request_cmdid = WMI_TLV_VDEV_START_REQUEST_CMDID, 4205 .vdev_restart_request_cmdid = WMI_TLV_VDEV_RESTART_REQUEST_CMDID, 4206 .vdev_up_cmdid = WMI_TLV_VDEV_UP_CMDID, 4207 .vdev_stop_cmdid = WMI_TLV_VDEV_STOP_CMDID, 4208 .vdev_down_cmdid = WMI_TLV_VDEV_DOWN_CMDID, 4209 .vdev_set_param_cmdid = WMI_TLV_VDEV_SET_PARAM_CMDID, 4210 .vdev_install_key_cmdid = WMI_TLV_VDEV_INSTALL_KEY_CMDID, 4211 .peer_create_cmdid = WMI_TLV_PEER_CREATE_CMDID, 4212 .peer_delete_cmdid = WMI_TLV_PEER_DELETE_CMDID, 4213 .peer_flush_tids_cmdid = WMI_TLV_PEER_FLUSH_TIDS_CMDID, 4214 .peer_set_param_cmdid = WMI_TLV_PEER_SET_PARAM_CMDID, 4215 .peer_assoc_cmdid = WMI_TLV_PEER_ASSOC_CMDID, 4216 .peer_add_wds_entry_cmdid = WMI_TLV_PEER_ADD_WDS_ENTRY_CMDID, 4217 .peer_remove_wds_entry_cmdid = WMI_TLV_PEER_REMOVE_WDS_ENTRY_CMDID, 4218 .peer_mcast_group_cmdid = WMI_TLV_PEER_MCAST_GROUP_CMDID, 4219 .bcn_tx_cmdid = WMI_TLV_BCN_TX_CMDID, 4220 .pdev_send_bcn_cmdid = WMI_TLV_PDEV_SEND_BCN_CMDID, 4221 .bcn_tmpl_cmdid = WMI_TLV_BCN_TMPL_CMDID, 4222 .bcn_filter_rx_cmdid = WMI_TLV_BCN_FILTER_RX_CMDID, 4223 .prb_req_filter_rx_cmdid = WMI_TLV_PRB_REQ_FILTER_RX_CMDID, 4224 .mgmt_tx_cmdid = WMI_TLV_MGMT_TX_CMDID, 4225 .mgmt_tx_send_cmdid = WMI_TLV_MGMT_TX_SEND_CMD, 4226 .prb_tmpl_cmdid = WMI_TLV_PRB_TMPL_CMDID, 4227 .addba_clear_resp_cmdid = WMI_TLV_ADDBA_CLEAR_RESP_CMDID, 4228 .addba_send_cmdid = WMI_TLV_ADDBA_SEND_CMDID, 4229 .addba_status_cmdid = WMI_TLV_ADDBA_STATUS_CMDID, 4230 .delba_send_cmdid = WMI_TLV_DELBA_SEND_CMDID, 4231 .addba_set_resp_cmdid = WMI_TLV_ADDBA_SET_RESP_CMDID, 4232 .send_singleamsdu_cmdid = WMI_TLV_SEND_SINGLEAMSDU_CMDID, 4233 .sta_powersave_mode_cmdid = WMI_TLV_STA_POWERSAVE_MODE_CMDID, 4234 .sta_powersave_param_cmdid = WMI_TLV_STA_POWERSAVE_PARAM_CMDID, 4235 .sta_mimo_ps_mode_cmdid = WMI_TLV_STA_MIMO_PS_MODE_CMDID, 4236 .pdev_dfs_enable_cmdid = WMI_TLV_PDEV_DFS_ENABLE_CMDID, 4237 .pdev_dfs_disable_cmdid = WMI_TLV_PDEV_DFS_DISABLE_CMDID, 4238 .roam_scan_mode = WMI_TLV_ROAM_SCAN_MODE, 4239 .roam_scan_rssi_threshold = WMI_TLV_ROAM_SCAN_RSSI_THRESHOLD, 4240 .roam_scan_period = WMI_TLV_ROAM_SCAN_PERIOD, 4241 .roam_scan_rssi_change_threshold = 4242 WMI_TLV_ROAM_SCAN_RSSI_CHANGE_THRESHOLD, 4243 .roam_ap_profile = WMI_TLV_ROAM_AP_PROFILE, 4244 .ofl_scan_add_ap_profile = WMI_TLV_ROAM_AP_PROFILE, 4245 .ofl_scan_remove_ap_profile = WMI_TLV_OFL_SCAN_REMOVE_AP_PROFILE, 4246 .ofl_scan_period = WMI_TLV_OFL_SCAN_PERIOD, 4247 .p2p_dev_set_device_info = WMI_TLV_P2P_DEV_SET_DEVICE_INFO, 4248 .p2p_dev_set_discoverability = WMI_TLV_P2P_DEV_SET_DISCOVERABILITY, 4249 .p2p_go_set_beacon_ie = WMI_TLV_P2P_GO_SET_BEACON_IE, 4250 .p2p_go_set_probe_resp_ie = WMI_TLV_P2P_GO_SET_PROBE_RESP_IE, 4251 .p2p_set_vendor_ie_data_cmdid = WMI_TLV_P2P_SET_VENDOR_IE_DATA_CMDID, 4252 .ap_ps_peer_param_cmdid = WMI_TLV_AP_PS_PEER_PARAM_CMDID, 4253 .ap_ps_peer_uapsd_coex_cmdid = WMI_TLV_AP_PS_PEER_UAPSD_COEX_CMDID, 4254 .peer_rate_retry_sched_cmdid = WMI_TLV_PEER_RATE_RETRY_SCHED_CMDID, 4255 .wlan_profile_trigger_cmdid = WMI_TLV_WLAN_PROFILE_TRIGGER_CMDID, 4256 .wlan_profile_set_hist_intvl_cmdid = 4257 WMI_TLV_WLAN_PROFILE_SET_HIST_INTVL_CMDID, 4258 .wlan_profile_get_profile_data_cmdid = 4259 WMI_TLV_WLAN_PROFILE_GET_PROFILE_DATA_CMDID, 4260 .wlan_profile_enable_profile_id_cmdid = 4261 WMI_TLV_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID, 4262 .wlan_profile_list_profile_id_cmdid = 4263 WMI_TLV_WLAN_PROFILE_LIST_PROFILE_ID_CMDID, 4264 .pdev_suspend_cmdid = WMI_TLV_PDEV_SUSPEND_CMDID, 4265 .pdev_resume_cmdid = WMI_TLV_PDEV_RESUME_CMDID, 4266 .add_bcn_filter_cmdid = WMI_TLV_ADD_BCN_FILTER_CMDID, 4267 .rmv_bcn_filter_cmdid = WMI_TLV_RMV_BCN_FILTER_CMDID, 4268 .wow_add_wake_pattern_cmdid = WMI_TLV_WOW_ADD_WAKE_PATTERN_CMDID, 4269 .wow_del_wake_pattern_cmdid = WMI_TLV_WOW_DEL_WAKE_PATTERN_CMDID, 4270 .wow_enable_disable_wake_event_cmdid = 4271 WMI_TLV_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID, 4272 .wow_enable_cmdid = WMI_TLV_WOW_ENABLE_CMDID, 4273 .wow_hostwakeup_from_sleep_cmdid = 4274 WMI_TLV_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID, 4275 .rtt_measreq_cmdid = WMI_TLV_RTT_MEASREQ_CMDID, 4276 .rtt_tsf_cmdid = WMI_TLV_RTT_TSF_CMDID, 4277 .vdev_spectral_scan_configure_cmdid = WMI_TLV_SPECTRAL_SCAN_CONF_CMDID, 4278 .vdev_spectral_scan_enable_cmdid = WMI_TLV_SPECTRAL_SCAN_ENABLE_CMDID, 4279 .request_stats_cmdid = WMI_TLV_REQUEST_STATS_CMDID, 4280 .request_peer_stats_info_cmdid = WMI_TLV_REQUEST_PEER_STATS_INFO_CMDID, 4281 .set_arp_ns_offload_cmdid = WMI_TLV_SET_ARP_NS_OFFLOAD_CMDID, 4282 .network_list_offload_config_cmdid = 4283 WMI_TLV_NETWORK_LIST_OFFLOAD_CONFIG_CMDID, 4284 .gtk_offload_cmdid = WMI_TLV_GTK_OFFLOAD_CMDID, 4285 .csa_offload_enable_cmdid = WMI_TLV_CSA_OFFLOAD_ENABLE_CMDID, 4286 .csa_offload_chanswitch_cmdid = WMI_TLV_CSA_OFFLOAD_CHANSWITCH_CMDID, 4287 .chatter_set_mode_cmdid = WMI_TLV_CHATTER_SET_MODE_CMDID, 4288 .peer_tid_addba_cmdid = WMI_TLV_PEER_TID_ADDBA_CMDID, 4289 .peer_tid_delba_cmdid = WMI_TLV_PEER_TID_DELBA_CMDID, 4290 .sta_dtim_ps_method_cmdid = WMI_TLV_STA_DTIM_PS_METHOD_CMDID, 4291 .sta_uapsd_auto_trig_cmdid = WMI_TLV_STA_UAPSD_AUTO_TRIG_CMDID, 4292 .sta_keepalive_cmd = WMI_TLV_STA_KEEPALIVE_CMDID, 4293 .echo_cmdid = WMI_TLV_ECHO_CMDID, 4294 .pdev_utf_cmdid = WMI_TLV_PDEV_UTF_CMDID, 4295 .dbglog_cfg_cmdid = WMI_TLV_DBGLOG_CFG_CMDID, 4296 .pdev_qvit_cmdid = WMI_TLV_PDEV_QVIT_CMDID, 4297 .pdev_ftm_intg_cmdid = WMI_TLV_PDEV_FTM_INTG_CMDID, 4298 .vdev_set_keepalive_cmdid = WMI_TLV_VDEV_SET_KEEPALIVE_CMDID, 4299 .vdev_get_keepalive_cmdid = WMI_TLV_VDEV_GET_KEEPALIVE_CMDID, 4300 .force_fw_hang_cmdid = WMI_TLV_FORCE_FW_HANG_CMDID, 4301 .gpio_config_cmdid = WMI_TLV_GPIO_CONFIG_CMDID, 4302 .gpio_output_cmdid = WMI_TLV_GPIO_OUTPUT_CMDID, 4303 .pdev_get_temperature_cmdid = WMI_TLV_PDEV_GET_TEMPERATURE_CMDID, 4304 .vdev_set_wmm_params_cmdid = WMI_TLV_VDEV_SET_WMM_PARAMS_CMDID, 4305 .tdls_set_state_cmdid = WMI_TLV_TDLS_SET_STATE_CMDID, 4306 .tdls_peer_update_cmdid = WMI_TLV_TDLS_PEER_UPDATE_CMDID, 4307 .adaptive_qcs_cmdid = WMI_TLV_RESMGR_ADAPTIVE_OCS_CMDID, 4308 .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED, 4309 .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED, 4310 .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED, 4311 .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED, 4312 .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED, 4313 .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED, 4314 .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED, 4315 .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED, 4316 .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED, 4317 .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED, 4318 .oem_req_cmdid = WMI_CMD_UNSUPPORTED, 4319 .nan_cmdid = WMI_CMD_UNSUPPORTED, 4320 .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED, 4321 .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED, 4322 .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED, 4323 .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED, 4324 .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED, 4325 .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED, 4326 .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED, 4327 .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED, 4328 .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED, 4329 .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED, 4330 .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED, 4331 .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED, 4332 .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED, 4333 .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED, 4334 .fwtest_cmdid = WMI_CMD_UNSUPPORTED, 4335 .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED, 4336 .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED, 4337 .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED, 4338 .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED, 4339 .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED, 4340 }; 4341 4342 static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = { 4343 .tx_chain_mask = WMI_TLV_PDEV_PARAM_TX_CHAIN_MASK, 4344 .rx_chain_mask = WMI_TLV_PDEV_PARAM_RX_CHAIN_MASK, 4345 .txpower_limit2g = WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT2G, 4346 .txpower_limit5g = WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT5G, 4347 .txpower_scale = WMI_TLV_PDEV_PARAM_TXPOWER_SCALE, 4348 .beacon_gen_mode = WMI_TLV_PDEV_PARAM_BEACON_GEN_MODE, 4349 .beacon_tx_mode = WMI_TLV_PDEV_PARAM_BEACON_TX_MODE, 4350 .resmgr_offchan_mode = WMI_TLV_PDEV_PARAM_RESMGR_OFFCHAN_MODE, 4351 .protection_mode = WMI_TLV_PDEV_PARAM_PROTECTION_MODE, 4352 .dynamic_bw = WMI_TLV_PDEV_PARAM_DYNAMIC_BW, 4353 .non_agg_sw_retry_th = WMI_TLV_PDEV_PARAM_NON_AGG_SW_RETRY_TH, 4354 .agg_sw_retry_th = WMI_TLV_PDEV_PARAM_AGG_SW_RETRY_TH, 4355 .sta_kickout_th = WMI_TLV_PDEV_PARAM_STA_KICKOUT_TH, 4356 .ac_aggrsize_scaling = WMI_TLV_PDEV_PARAM_AC_AGGRSIZE_SCALING, 4357 .ltr_enable = WMI_TLV_PDEV_PARAM_LTR_ENABLE, 4358 .ltr_ac_latency_be = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BE, 4359 .ltr_ac_latency_bk = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BK, 4360 .ltr_ac_latency_vi = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VI, 4361 .ltr_ac_latency_vo = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VO, 4362 .ltr_ac_latency_timeout = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT, 4363 .ltr_sleep_override = WMI_TLV_PDEV_PARAM_LTR_SLEEP_OVERRIDE, 4364 .ltr_rx_override = WMI_TLV_PDEV_PARAM_LTR_RX_OVERRIDE, 4365 .ltr_tx_activity_timeout = WMI_TLV_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT, 4366 .l1ss_enable = WMI_TLV_PDEV_PARAM_L1SS_ENABLE, 4367 .dsleep_enable = WMI_TLV_PDEV_PARAM_DSLEEP_ENABLE, 4368 .pcielp_txbuf_flush = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_FLUSH, 4369 .pcielp_txbuf_watermark = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN, 4370 .pcielp_txbuf_tmo_en = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN, 4371 .pcielp_txbuf_tmo_value = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE, 4372 .pdev_stats_update_period = WMI_TLV_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD, 4373 .vdev_stats_update_period = WMI_TLV_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD, 4374 .peer_stats_update_period = WMI_TLV_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD, 4375 .bcnflt_stats_update_period = 4376 WMI_TLV_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD, 4377 .pmf_qos = WMI_TLV_PDEV_PARAM_PMF_QOS, 4378 .arp_ac_override = WMI_TLV_PDEV_PARAM_ARP_AC_OVERRIDE, 4379 .dcs = WMI_TLV_PDEV_PARAM_DCS, 4380 .ani_enable = WMI_TLV_PDEV_PARAM_ANI_ENABLE, 4381 .ani_poll_period = WMI_TLV_PDEV_PARAM_ANI_POLL_PERIOD, 4382 .ani_listen_period = WMI_TLV_PDEV_PARAM_ANI_LISTEN_PERIOD, 4383 .ani_ofdm_level = WMI_TLV_PDEV_PARAM_ANI_OFDM_LEVEL, 4384 .ani_cck_level = WMI_TLV_PDEV_PARAM_ANI_CCK_LEVEL, 4385 .dyntxchain = WMI_TLV_PDEV_PARAM_DYNTXCHAIN, 4386 .proxy_sta = WMI_TLV_PDEV_PARAM_PROXY_STA, 4387 .idle_ps_config = WMI_TLV_PDEV_PARAM_IDLE_PS_CONFIG, 4388 .power_gating_sleep = WMI_TLV_PDEV_PARAM_POWER_GATING_SLEEP, 4389 .fast_channel_reset = WMI_TLV_PDEV_PARAM_UNSUPPORTED, 4390 .burst_dur = WMI_TLV_PDEV_PARAM_BURST_DUR, 4391 .burst_enable = WMI_TLV_PDEV_PARAM_BURST_ENABLE, 4392 .cal_period = WMI_PDEV_PARAM_UNSUPPORTED, 4393 .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED, 4394 .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED, 4395 .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED, 4396 .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED, 4397 .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED, 4398 .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED, 4399 .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED, 4400 .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED, 4401 .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED, 4402 .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED, 4403 .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED, 4404 .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED, 4405 .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED, 4406 .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED, 4407 .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED, 4408 .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, 4409 .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, 4410 .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, 4411 .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, 4412 .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, 4413 .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED, 4414 .en_stats = WMI_PDEV_PARAM_UNSUPPORTED, 4415 .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED, 4416 .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED, 4417 .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED, 4418 .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED, 4419 .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED, 4420 .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED, 4421 .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED, 4422 .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED, 4423 .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED, 4424 .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED, 4425 .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED, 4426 .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED, 4427 .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED, 4428 .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED, 4429 .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED, 4430 .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED, 4431 .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED, 4432 .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED, 4433 .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED, 4434 .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED, 4435 .rfkill_config = WMI_TLV_PDEV_PARAM_HW_RFKILL_CONFIG, 4436 .rfkill_enable = WMI_TLV_PDEV_PARAM_RFKILL_ENABLE, 4437 .peer_stats_info_enable = WMI_TLV_PDEV_PARAM_PEER_STATS_INFO_ENABLE, 4438 }; 4439 4440 static struct wmi_peer_param_map wmi_tlv_peer_param_map = { 4441 .smps_state = WMI_TLV_PEER_SMPS_STATE, 4442 .ampdu = WMI_TLV_PEER_AMPDU, 4443 .authorize = WMI_TLV_PEER_AUTHORIZE, 4444 .chan_width = WMI_TLV_PEER_CHAN_WIDTH, 4445 .nss = WMI_TLV_PEER_NSS, 4446 .use_4addr = WMI_TLV_PEER_USE_4ADDR, 4447 .membership = WMI_TLV_PEER_MEMBERSHIP, 4448 .user_pos = WMI_TLV_PEER_USERPOS, 4449 .crit_proto_hint_enabled = WMI_TLV_PEER_CRIT_PROTO_HINT_ENABLED, 4450 .tx_fail_cnt_thr = WMI_TLV_PEER_TX_FAIL_CNT_THR, 4451 .set_hw_retry_cts2s = WMI_TLV_PEER_SET_HW_RETRY_CTS2S, 4452 .ibss_atim_win_len = WMI_TLV_PEER_IBSS_ATIM_WINDOW_LENGTH, 4453 .phymode = WMI_TLV_PEER_PHYMODE, 4454 .use_fixed_power = WMI_TLV_PEER_USE_FIXED_PWR, 4455 .dummy_var = WMI_TLV_PEER_DUMMY_VAR, 4456 }; 4457 4458 static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = { 4459 .rts_threshold = WMI_TLV_VDEV_PARAM_RTS_THRESHOLD, 4460 .fragmentation_threshold = WMI_TLV_VDEV_PARAM_FRAGMENTATION_THRESHOLD, 4461 .beacon_interval = WMI_TLV_VDEV_PARAM_BEACON_INTERVAL, 4462 .listen_interval = WMI_TLV_VDEV_PARAM_LISTEN_INTERVAL, 4463 .multicast_rate = WMI_TLV_VDEV_PARAM_MULTICAST_RATE, 4464 .mgmt_tx_rate = WMI_TLV_VDEV_PARAM_MGMT_TX_RATE, 4465 .slot_time = WMI_TLV_VDEV_PARAM_SLOT_TIME, 4466 .preamble = WMI_TLV_VDEV_PARAM_PREAMBLE, 4467 .swba_time = WMI_TLV_VDEV_PARAM_SWBA_TIME, 4468 .wmi_vdev_stats_update_period = WMI_TLV_VDEV_STATS_UPDATE_PERIOD, 4469 .wmi_vdev_pwrsave_ageout_time = WMI_TLV_VDEV_PWRSAVE_AGEOUT_TIME, 4470 .wmi_vdev_host_swba_interval = WMI_TLV_VDEV_HOST_SWBA_INTERVAL, 4471 .dtim_period = WMI_TLV_VDEV_PARAM_DTIM_PERIOD, 4472 .wmi_vdev_oc_scheduler_air_time_limit = 4473 WMI_TLV_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT, 4474 .wds = WMI_TLV_VDEV_PARAM_WDS, 4475 .atim_window = WMI_TLV_VDEV_PARAM_ATIM_WINDOW, 4476 .bmiss_count_max = WMI_TLV_VDEV_PARAM_BMISS_COUNT_MAX, 4477 .bmiss_first_bcnt = WMI_TLV_VDEV_PARAM_BMISS_FIRST_BCNT, 4478 .bmiss_final_bcnt = WMI_TLV_VDEV_PARAM_BMISS_FINAL_BCNT, 4479 .feature_wmm = WMI_TLV_VDEV_PARAM_FEATURE_WMM, 4480 .chwidth = WMI_TLV_VDEV_PARAM_CHWIDTH, 4481 .chextoffset = WMI_TLV_VDEV_PARAM_CHEXTOFFSET, 4482 .disable_htprotection = WMI_TLV_VDEV_PARAM_DISABLE_HTPROTECTION, 4483 .sta_quickkickout = WMI_TLV_VDEV_PARAM_STA_QUICKKICKOUT, 4484 .mgmt_rate = WMI_TLV_VDEV_PARAM_MGMT_RATE, 4485 .protection_mode = WMI_TLV_VDEV_PARAM_PROTECTION_MODE, 4486 .fixed_rate = WMI_TLV_VDEV_PARAM_FIXED_RATE, 4487 .sgi = WMI_TLV_VDEV_PARAM_SGI, 4488 .ldpc = WMI_TLV_VDEV_PARAM_LDPC, 4489 .tx_stbc = WMI_TLV_VDEV_PARAM_TX_STBC, 4490 .rx_stbc = WMI_TLV_VDEV_PARAM_RX_STBC, 4491 .intra_bss_fwd = WMI_TLV_VDEV_PARAM_INTRA_BSS_FWD, 4492 .def_keyid = WMI_TLV_VDEV_PARAM_DEF_KEYID, 4493 .nss = WMI_TLV_VDEV_PARAM_NSS, 4494 .bcast_data_rate = WMI_TLV_VDEV_PARAM_BCAST_DATA_RATE, 4495 .mcast_data_rate = WMI_TLV_VDEV_PARAM_MCAST_DATA_RATE, 4496 .mcast_indicate = WMI_TLV_VDEV_PARAM_MCAST_INDICATE, 4497 .dhcp_indicate = WMI_TLV_VDEV_PARAM_DHCP_INDICATE, 4498 .unknown_dest_indicate = WMI_TLV_VDEV_PARAM_UNKNOWN_DEST_INDICATE, 4499 .ap_keepalive_min_idle_inactive_time_secs = 4500 WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS, 4501 .ap_keepalive_max_idle_inactive_time_secs = 4502 WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS, 4503 .ap_keepalive_max_unresponsive_time_secs = 4504 WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS, 4505 .ap_enable_nawds = WMI_TLV_VDEV_PARAM_AP_ENABLE_NAWDS, 4506 .mcast2ucast_set = WMI_TLV_VDEV_PARAM_UNSUPPORTED, 4507 .enable_rtscts = WMI_TLV_VDEV_PARAM_ENABLE_RTSCTS, 4508 .txbf = WMI_TLV_VDEV_PARAM_TXBF, 4509 .packet_powersave = WMI_TLV_VDEV_PARAM_PACKET_POWERSAVE, 4510 .drop_unencry = WMI_TLV_VDEV_PARAM_DROP_UNENCRY, 4511 .tx_encap_type = WMI_TLV_VDEV_PARAM_TX_ENCAP_TYPE, 4512 .ap_detect_out_of_sync_sleeping_sta_time_secs = 4513 WMI_TLV_VDEV_PARAM_UNSUPPORTED, 4514 .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED, 4515 .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED, 4516 .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED, 4517 .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED, 4518 .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED, 4519 .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, 4520 .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED, 4521 .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED, 4522 .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED, 4523 .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED, 4524 .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED, 4525 .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED, 4526 .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED, 4527 .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED, 4528 .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED, 4529 .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, 4530 }; 4531 4532 static const struct wmi_ops wmi_tlv_ops = { 4533 .rx = ath10k_wmi_tlv_op_rx, 4534 .map_svc = wmi_tlv_svc_map, 4535 .map_svc_ext = wmi_tlv_svc_map_ext, 4536 4537 .pull_scan = ath10k_wmi_tlv_op_pull_scan_ev, 4538 .pull_mgmt_rx = ath10k_wmi_tlv_op_pull_mgmt_rx_ev, 4539 .pull_mgmt_tx_compl = ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev, 4540 .pull_mgmt_tx_bundle_compl = ath10k_wmi_tlv_op_pull_mgmt_tx_bundle_compl_ev, 4541 .pull_ch_info = ath10k_wmi_tlv_op_pull_ch_info_ev, 4542 .pull_vdev_start = ath10k_wmi_tlv_op_pull_vdev_start_ev, 4543 .pull_peer_kick = ath10k_wmi_tlv_op_pull_peer_kick_ev, 4544 .pull_swba = ath10k_wmi_tlv_op_pull_swba_ev, 4545 .pull_phyerr_hdr = ath10k_wmi_tlv_op_pull_phyerr_ev_hdr, 4546 .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev, 4547 .pull_svc_rdy = ath10k_wmi_tlv_op_pull_svc_rdy_ev, 4548 .pull_rdy = ath10k_wmi_tlv_op_pull_rdy_ev, 4549 .pull_svc_avail = ath10k_wmi_tlv_op_pull_svc_avail, 4550 .pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats, 4551 .pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev, 4552 .pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev, 4553 .pull_echo_ev = ath10k_wmi_tlv_op_pull_echo_ev, 4554 .get_txbf_conf_scheme = ath10k_wmi_tlv_txbf_conf_scheme, 4555 4556 .gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend, 4557 .gen_pdev_resume = ath10k_wmi_tlv_op_gen_pdev_resume, 4558 .gen_pdev_set_rd = ath10k_wmi_tlv_op_gen_pdev_set_rd, 4559 .gen_pdev_set_param = ath10k_wmi_tlv_op_gen_pdev_set_param, 4560 .gen_init = ath10k_wmi_tlv_op_gen_init, 4561 .gen_start_scan = ath10k_wmi_tlv_op_gen_start_scan, 4562 .gen_stop_scan = ath10k_wmi_tlv_op_gen_stop_scan, 4563 .gen_vdev_create = ath10k_wmi_tlv_op_gen_vdev_create, 4564 .gen_vdev_delete = ath10k_wmi_tlv_op_gen_vdev_delete, 4565 .gen_vdev_start = ath10k_wmi_tlv_op_gen_vdev_start, 4566 .gen_vdev_stop = ath10k_wmi_tlv_op_gen_vdev_stop, 4567 .gen_vdev_up = ath10k_wmi_tlv_op_gen_vdev_up, 4568 .gen_vdev_down = ath10k_wmi_tlv_op_gen_vdev_down, 4569 .gen_vdev_set_param = ath10k_wmi_tlv_op_gen_vdev_set_param, 4570 .gen_vdev_install_key = ath10k_wmi_tlv_op_gen_vdev_install_key, 4571 .gen_vdev_wmm_conf = ath10k_wmi_tlv_op_gen_vdev_wmm_conf, 4572 .gen_peer_create = ath10k_wmi_tlv_op_gen_peer_create, 4573 .gen_peer_delete = ath10k_wmi_tlv_op_gen_peer_delete, 4574 .gen_peer_flush = ath10k_wmi_tlv_op_gen_peer_flush, 4575 .gen_peer_set_param = ath10k_wmi_tlv_op_gen_peer_set_param, 4576 .gen_peer_assoc = ath10k_wmi_tlv_op_gen_peer_assoc, 4577 .gen_set_psmode = ath10k_wmi_tlv_op_gen_set_psmode, 4578 .gen_set_sta_ps = ath10k_wmi_tlv_op_gen_set_sta_ps, 4579 .gen_set_ap_ps = ath10k_wmi_tlv_op_gen_set_ap_ps, 4580 .gen_scan_chan_list = ath10k_wmi_tlv_op_gen_scan_chan_list, 4581 .gen_scan_prob_req_oui = ath10k_wmi_tlv_op_gen_scan_prob_req_oui, 4582 .gen_beacon_dma = ath10k_wmi_tlv_op_gen_beacon_dma, 4583 .gen_pdev_set_wmm = ath10k_wmi_tlv_op_gen_pdev_set_wmm, 4584 .gen_request_stats = ath10k_wmi_tlv_op_gen_request_stats, 4585 .gen_request_peer_stats_info = ath10k_wmi_tlv_op_gen_request_peer_stats_info, 4586 .gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang, 4587 /* .gen_mgmt_tx = not implemented; HTT is used */ 4588 .gen_mgmt_tx_send = ath10k_wmi_tlv_op_gen_mgmt_tx_send, 4589 .cleanup_mgmt_tx_send = ath10k_wmi_tlv_op_cleanup_mgmt_tx_send, 4590 .gen_dbglog_cfg = ath10k_wmi_tlv_op_gen_dbglog_cfg, 4591 .gen_pktlog_enable = ath10k_wmi_tlv_op_gen_pktlog_enable, 4592 .gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable, 4593 .gen_pdev_set_quiet_mode = ath10k_wmi_tlv_op_gen_pdev_set_quiet_mode, 4594 .gen_pdev_get_temperature = ath10k_wmi_tlv_op_gen_pdev_get_temperature, 4595 /* .gen_addba_clear_resp not implemented */ 4596 /* .gen_addba_send not implemented */ 4597 /* .gen_addba_set_resp not implemented */ 4598 /* .gen_delba_send not implemented */ 4599 .gen_bcn_tmpl = ath10k_wmi_tlv_op_gen_bcn_tmpl, 4600 .gen_prb_tmpl = ath10k_wmi_tlv_op_gen_prb_tmpl, 4601 .gen_p2p_go_bcn_ie = ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie, 4602 .gen_vdev_sta_uapsd = ath10k_wmi_tlv_op_gen_vdev_sta_uapsd, 4603 .gen_sta_keepalive = ath10k_wmi_tlv_op_gen_sta_keepalive, 4604 .gen_wow_enable = ath10k_wmi_tlv_op_gen_wow_enable, 4605 .gen_wow_add_wakeup_event = ath10k_wmi_tlv_op_gen_wow_add_wakeup_event, 4606 .gen_wow_host_wakeup_ind = ath10k_wmi_tlv_gen_wow_host_wakeup_ind, 4607 .gen_wow_add_pattern = ath10k_wmi_tlv_op_gen_wow_add_pattern, 4608 .gen_wow_del_pattern = ath10k_wmi_tlv_op_gen_wow_del_pattern, 4609 .gen_wow_config_pno = ath10k_wmi_tlv_op_gen_config_pno, 4610 .gen_update_fw_tdls_state = ath10k_wmi_tlv_op_gen_update_fw_tdls_state, 4611 .gen_tdls_peer_update = ath10k_wmi_tlv_op_gen_tdls_peer_update, 4612 .gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs, 4613 .fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill, 4614 .get_vdev_subtype = ath10k_wmi_tlv_op_get_vdev_subtype, 4615 .gen_echo = ath10k_wmi_tlv_op_gen_echo, 4616 .gen_vdev_spectral_conf = ath10k_wmi_tlv_op_gen_vdev_spectral_conf, 4617 .gen_vdev_spectral_enable = ath10k_wmi_tlv_op_gen_vdev_spectral_enable, 4618 /* .gen_gpio_config not implemented */ 4619 /* .gen_gpio_output not implemented */ 4620 }; 4621 4622 static const struct wmi_peer_flags_map wmi_tlv_peer_flags_map = { 4623 .auth = WMI_TLV_PEER_AUTH, 4624 .qos = WMI_TLV_PEER_QOS, 4625 .need_ptk_4_way = WMI_TLV_PEER_NEED_PTK_4_WAY, 4626 .need_gtk_2_way = WMI_TLV_PEER_NEED_GTK_2_WAY, 4627 .apsd = WMI_TLV_PEER_APSD, 4628 .ht = WMI_TLV_PEER_HT, 4629 .bw40 = WMI_TLV_PEER_40MHZ, 4630 .stbc = WMI_TLV_PEER_STBC, 4631 .ldbc = WMI_TLV_PEER_LDPC, 4632 .dyn_mimops = WMI_TLV_PEER_DYN_MIMOPS, 4633 .static_mimops = WMI_TLV_PEER_STATIC_MIMOPS, 4634 .spatial_mux = WMI_TLV_PEER_SPATIAL_MUX, 4635 .vht = WMI_TLV_PEER_VHT, 4636 .bw80 = WMI_TLV_PEER_80MHZ, 4637 .pmf = WMI_TLV_PEER_PMF, 4638 .bw160 = WMI_TLV_PEER_160MHZ, 4639 }; 4640 4641 /************/ 4642 /* TLV init */ 4643 /************/ 4644 4645 void ath10k_wmi_tlv_attach(struct ath10k *ar) 4646 { 4647 ar->wmi.cmd = &wmi_tlv_cmd_map; 4648 ar->wmi.vdev_param = &wmi_tlv_vdev_param_map; 4649 ar->wmi.pdev_param = &wmi_tlv_pdev_param_map; 4650 ar->wmi.peer_param = &wmi_tlv_peer_param_map; 4651 ar->wmi.ops = &wmi_tlv_ops; 4652 ar->wmi.peer_flags = &wmi_tlv_peer_flags_map; 4653 } 4654