1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
6 */
7 #include <net/mac80211.h>
8 #include <linux/netdevice.h>
9 #include <linux/dmi.h>
10
11 #include "iwl-trans.h"
12 #include "iwl-op-mode.h"
13 #include "fw/img.h"
14 #include "iwl-debug.h"
15 #include "iwl-prph.h"
16 #include "fw/acpi.h"
17 #include "fw/pnvm.h"
18 #include "fw/uefi.h"
19 #include "fw/regulatory.h"
20
21 #include "mvm.h"
22 #include "fw/dbg.h"
23 #include "iwl-phy-db.h"
24 #include "iwl-modparams.h"
25 #include "iwl-nvm-parse.h"
26 #include "time-sync.h"
27
28 #define MVM_UCODE_ALIVE_TIMEOUT (2 * HZ)
29 #define MVM_UCODE_CALIB_TIMEOUT (2 * HZ)
30
31 struct iwl_mvm_alive_data {
32 bool valid;
33 u32 scd_base_addr;
34 };
35
iwl_send_tx_ant_cfg(struct iwl_mvm * mvm,u8 valid_tx_ant)36 static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
37 {
38 struct iwl_tx_ant_cfg_cmd tx_ant_cmd = {
39 .valid = cpu_to_le32(valid_tx_ant),
40 };
41
42 IWL_DEBUG_FW(mvm, "select valid tx ant: %u\n", valid_tx_ant);
43 return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, 0,
44 sizeof(tx_ant_cmd), &tx_ant_cmd);
45 }
46
iwl_send_rss_cfg_cmd(struct iwl_mvm * mvm)47 static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
48 {
49 int i;
50 struct iwl_rss_config_cmd cmd = {
51 .flags = cpu_to_le32(IWL_RSS_ENABLE),
52 .hash_mask = BIT(IWL_RSS_HASH_TYPE_IPV4_TCP) |
53 BIT(IWL_RSS_HASH_TYPE_IPV4_UDP) |
54 BIT(IWL_RSS_HASH_TYPE_IPV4_PAYLOAD) |
55 BIT(IWL_RSS_HASH_TYPE_IPV6_TCP) |
56 BIT(IWL_RSS_HASH_TYPE_IPV6_UDP) |
57 BIT(IWL_RSS_HASH_TYPE_IPV6_PAYLOAD),
58 };
59
60 if (mvm->trans->num_rx_queues == 1)
61 return 0;
62
63 /* Do not direct RSS traffic to Q 0 which is our fallback queue */
64 for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
65 cmd.indirection_table[i] =
66 1 + (i % (mvm->trans->num_rx_queues - 1));
67 netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
68
69 return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
70 }
71
iwl_mvm_send_dqa_cmd(struct iwl_mvm * mvm)72 static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm)
73 {
74 struct iwl_dqa_enable_cmd dqa_cmd = {
75 .cmd_queue = cpu_to_le32(IWL_MVM_DQA_CMD_QUEUE),
76 };
77 u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, DQA_ENABLE_CMD);
78 int ret;
79
80 ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
81 if (ret)
82 IWL_ERR(mvm, "Failed to send DQA enabling command: %d\n", ret);
83 else
84 IWL_DEBUG_FW(mvm, "Working in DQA mode\n");
85
86 return ret;
87 }
88
iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm * mvm,struct iwl_rx_cmd_buffer * rxb)89 void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
90 struct iwl_rx_cmd_buffer *rxb)
91 {
92 struct iwl_rx_packet *pkt = rxb_addr(rxb);
93 struct iwl_mfu_assert_dump_notif *mfu_dump_notif = (void *)pkt->data;
94
95 if (mfu_dump_notif->index_num == 0)
96 IWL_INFO(mvm, "MFUART assert id 0x%x occurred\n",
97 le32_to_cpu(mfu_dump_notif->assert_id));
98 }
99
iwl_alive_fn(struct iwl_notif_wait_data * notif_wait,struct iwl_rx_packet * pkt,void * data)100 static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
101 struct iwl_rx_packet *pkt, void *data)
102 {
103 unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
104 struct iwl_mvm *mvm =
105 container_of(notif_wait, struct iwl_mvm, notif_wait);
106 struct iwl_mvm_alive_data *alive_data = data;
107 struct iwl_umac_alive *umac;
108 struct iwl_lmac_alive *lmac1;
109 struct iwl_lmac_alive *lmac2 = NULL;
110 u16 status;
111 u32 lmac_error_event_table, umac_error_table;
112 u32 version = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
113 UCODE_ALIVE_NTFY, 0);
114 u32 i;
115
116
117 if (version == 6) {
118 struct iwl_alive_ntf_v6 *palive;
119
120 if (pkt_len < sizeof(*palive))
121 return false;
122
123 palive = (void *)pkt->data;
124 mvm->trans->dbg.imr_data.imr_enable =
125 le32_to_cpu(palive->imr.enabled);
126 mvm->trans->dbg.imr_data.imr_size =
127 le32_to_cpu(palive->imr.size);
128 mvm->trans->dbg.imr_data.imr2sram_remainbyte =
129 mvm->trans->dbg.imr_data.imr_size;
130 mvm->trans->dbg.imr_data.imr_base_addr =
131 palive->imr.base_addr;
132 mvm->trans->dbg.imr_data.imr_curr_addr =
133 le64_to_cpu(mvm->trans->dbg.imr_data.imr_base_addr);
134 IWL_DEBUG_FW(mvm, "IMR Enabled: 0x0%x size 0x0%x Address 0x%016llx\n",
135 mvm->trans->dbg.imr_data.imr_enable,
136 mvm->trans->dbg.imr_data.imr_size,
137 le64_to_cpu(mvm->trans->dbg.imr_data.imr_base_addr));
138
139 if (!mvm->trans->dbg.imr_data.imr_enable) {
140 for (i = 0; i < ARRAY_SIZE(mvm->trans->dbg.active_regions); i++) {
141 struct iwl_ucode_tlv *reg_tlv;
142 struct iwl_fw_ini_region_tlv *reg;
143
144 reg_tlv = mvm->trans->dbg.active_regions[i];
145 if (!reg_tlv)
146 continue;
147
148 reg = (void *)reg_tlv->data;
149 /*
150 * We have only one DRAM IMR region, so we
151 * can break as soon as we find the first
152 * one.
153 */
154 if (reg->type == IWL_FW_INI_REGION_DRAM_IMR) {
155 mvm->trans->dbg.unsupported_region_msk |= BIT(i);
156 break;
157 }
158 }
159 }
160 }
161
162 if (version >= 5) {
163 struct iwl_alive_ntf_v5 *palive;
164
165 if (pkt_len < sizeof(*palive))
166 return false;
167
168 palive = (void *)pkt->data;
169 umac = &palive->umac_data;
170 lmac1 = &palive->lmac_data[0];
171 lmac2 = &palive->lmac_data[1];
172 status = le16_to_cpu(palive->status);
173
174 mvm->trans->sku_id[0] = le32_to_cpu(palive->sku_id.data[0]);
175 mvm->trans->sku_id[1] = le32_to_cpu(palive->sku_id.data[1]);
176 mvm->trans->sku_id[2] = le32_to_cpu(palive->sku_id.data[2]);
177
178 IWL_DEBUG_FW(mvm, "Got sku_id: 0x0%x 0x0%x 0x0%x\n",
179 mvm->trans->sku_id[0],
180 mvm->trans->sku_id[1],
181 mvm->trans->sku_id[2]);
182 } else if (iwl_rx_packet_payload_len(pkt) == sizeof(struct iwl_alive_ntf_v4)) {
183 struct iwl_alive_ntf_v4 *palive;
184
185 if (pkt_len < sizeof(*palive))
186 return false;
187
188 palive = (void *)pkt->data;
189 umac = &palive->umac_data;
190 lmac1 = &palive->lmac_data[0];
191 lmac2 = &palive->lmac_data[1];
192 status = le16_to_cpu(palive->status);
193 } else if (iwl_rx_packet_payload_len(pkt) ==
194 sizeof(struct iwl_alive_ntf_v3)) {
195 struct iwl_alive_ntf_v3 *palive3;
196
197 if (pkt_len < sizeof(*palive3))
198 return false;
199
200 palive3 = (void *)pkt->data;
201 umac = &palive3->umac_data;
202 lmac1 = &palive3->lmac_data;
203 status = le16_to_cpu(palive3->status);
204 } else {
205 WARN(1, "unsupported alive notification (size %d)\n",
206 iwl_rx_packet_payload_len(pkt));
207 /* get timeout later */
208 return false;
209 }
210
211 lmac_error_event_table =
212 le32_to_cpu(lmac1->dbg_ptrs.error_event_table_ptr);
213 iwl_fw_lmac1_set_alive_err_table(mvm->trans, lmac_error_event_table);
214
215 if (lmac2)
216 mvm->trans->dbg.lmac_error_event_table[1] =
217 le32_to_cpu(lmac2->dbg_ptrs.error_event_table_ptr);
218
219 umac_error_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr) &
220 ~FW_ADDR_CACHE_CONTROL;
221
222 if (umac_error_table) {
223 if (umac_error_table >=
224 mvm->trans->cfg->min_umac_error_event_table) {
225 iwl_fw_umac_set_alive_err_table(mvm->trans,
226 umac_error_table);
227 } else {
228 IWL_ERR(mvm,
229 "Not valid error log pointer 0x%08X for %s uCode\n",
230 umac_error_table,
231 (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) ?
232 "Init" : "RT");
233 }
234 }
235
236 alive_data->scd_base_addr = le32_to_cpu(lmac1->dbg_ptrs.scd_base_ptr);
237 alive_data->valid = status == IWL_ALIVE_STATUS_OK;
238
239 IWL_DEBUG_FW(mvm,
240 "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
241 status, lmac1->ver_type, lmac1->ver_subtype);
242
243 if (lmac2)
244 IWL_DEBUG_FW(mvm, "Alive ucode CDB\n");
245
246 IWL_DEBUG_FW(mvm,
247 "UMAC version: Major - 0x%x, Minor - 0x%x\n",
248 le32_to_cpu(umac->umac_major),
249 le32_to_cpu(umac->umac_minor));
250
251 iwl_fwrt_update_fw_versions(&mvm->fwrt, lmac1, umac);
252
253 return true;
254 }
255
iwl_wait_init_complete(struct iwl_notif_wait_data * notif_wait,struct iwl_rx_packet * pkt,void * data)256 static bool iwl_wait_init_complete(struct iwl_notif_wait_data *notif_wait,
257 struct iwl_rx_packet *pkt, void *data)
258 {
259 WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
260
261 return true;
262 }
263
iwl_wait_phy_db_entry(struct iwl_notif_wait_data * notif_wait,struct iwl_rx_packet * pkt,void * data)264 static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
265 struct iwl_rx_packet *pkt, void *data)
266 {
267 struct iwl_phy_db *phy_db = data;
268
269 if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) {
270 WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
271 return true;
272 }
273
274 WARN_ON(iwl_phy_db_set_section(phy_db, pkt));
275
276 return false;
277 }
278
iwl_mvm_print_pd_notification(struct iwl_mvm * mvm)279 static void iwl_mvm_print_pd_notification(struct iwl_mvm *mvm)
280 {
281 #define IWL_FW_PRINT_REG_INFO(reg_name) \
282 IWL_ERR(mvm, #reg_name ": 0x%x\n", iwl_read_umac_prph(trans, reg_name))
283
284 struct iwl_trans *trans = mvm->trans;
285 enum iwl_device_family device_family = trans->trans_cfg->device_family;
286
287 if (device_family < IWL_DEVICE_FAMILY_8000)
288 return;
289
290 if (device_family <= IWL_DEVICE_FAMILY_9000)
291 IWL_FW_PRINT_REG_INFO(WFPM_ARC1_PD_NOTIFICATION);
292 else
293 IWL_FW_PRINT_REG_INFO(WFPM_LMAC1_PD_NOTIFICATION);
294
295 IWL_FW_PRINT_REG_INFO(HPM_SECONDARY_DEVICE_STATE);
296
297 /* print OPT info */
298 IWL_FW_PRINT_REG_INFO(WFPM_MAC_OTP_CFG7_ADDR);
299 IWL_FW_PRINT_REG_INFO(WFPM_MAC_OTP_CFG7_DATA);
300 }
301
iwl_mvm_load_ucode_wait_alive(struct iwl_mvm * mvm,enum iwl_ucode_type ucode_type)302 static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
303 enum iwl_ucode_type ucode_type)
304 {
305 struct iwl_notification_wait alive_wait;
306 struct iwl_mvm_alive_data alive_data = {};
307 const struct fw_img *fw;
308 int ret;
309 enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img;
310 static const u16 alive_cmd[] = { UCODE_ALIVE_NTFY };
311 bool run_in_rfkill =
312 ucode_type == IWL_UCODE_INIT || iwl_mvm_has_unified_ucode(mvm);
313 u8 count;
314 struct iwl_pc_data *pc_data;
315
316 if (ucode_type == IWL_UCODE_REGULAR &&
317 iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) &&
318 !(fw_has_capa(&mvm->fw->ucode_capa,
319 IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED)))
320 fw = iwl_get_ucode_image(mvm->fw, IWL_UCODE_REGULAR_USNIFFER);
321 else
322 fw = iwl_get_ucode_image(mvm->fw, ucode_type);
323 if (WARN_ON(!fw))
324 return -EINVAL;
325 iwl_fw_set_current_image(&mvm->fwrt, ucode_type);
326 clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
327
328 iwl_init_notification_wait(&mvm->notif_wait, &alive_wait,
329 alive_cmd, ARRAY_SIZE(alive_cmd),
330 iwl_alive_fn, &alive_data);
331
332 /*
333 * We want to load the INIT firmware even in RFKILL
334 * For the unified firmware case, the ucode_type is not
335 * INIT, but we still need to run it.
336 */
337 ret = iwl_trans_start_fw(mvm->trans, fw, run_in_rfkill);
338 if (ret) {
339 iwl_fw_set_current_image(&mvm->fwrt, old_type);
340 iwl_remove_notification(&mvm->notif_wait, &alive_wait);
341 return ret;
342 }
343
344 /*
345 * Some things may run in the background now, but we
346 * just wait for the ALIVE notification here.
347 */
348 ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
349 MVM_UCODE_ALIVE_TIMEOUT);
350
351 if (mvm->trans->trans_cfg->device_family ==
352 IWL_DEVICE_FAMILY_AX210) {
353 /* print these registers regardless of alive fail/success */
354 IWL_INFO(mvm, "WFPM_UMAC_PD_NOTIFICATION: 0x%x\n",
355 iwl_read_umac_prph(mvm->trans, WFPM_ARC1_PD_NOTIFICATION));
356 IWL_INFO(mvm, "WFPM_LMAC2_PD_NOTIFICATION: 0x%x\n",
357 iwl_read_umac_prph(mvm->trans, WFPM_LMAC2_PD_NOTIFICATION));
358 IWL_INFO(mvm, "WFPM_AUTH_KEY_0: 0x%x\n",
359 iwl_read_umac_prph(mvm->trans, SB_MODIFY_CFG_FLAG));
360 IWL_INFO(mvm, "CNVI_SCU_SEQ_DATA_DW9: 0x%x\n",
361 iwl_read_prph(mvm->trans, CNVI_SCU_SEQ_DATA_DW9));
362 }
363
364 if (ret) {
365 struct iwl_trans *trans = mvm->trans;
366
367 /* SecBoot info */
368 if (trans->trans_cfg->device_family >=
369 IWL_DEVICE_FAMILY_22000) {
370 IWL_ERR(mvm,
371 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
372 iwl_read_umac_prph(trans, UMAG_SB_CPU_1_STATUS),
373 iwl_read_umac_prph(trans,
374 UMAG_SB_CPU_2_STATUS));
375 } else if (trans->trans_cfg->device_family >=
376 IWL_DEVICE_FAMILY_8000) {
377 IWL_ERR(mvm,
378 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
379 iwl_read_prph(trans, SB_CPU_1_STATUS),
380 iwl_read_prph(trans, SB_CPU_2_STATUS));
381 }
382
383 iwl_mvm_print_pd_notification(mvm);
384
385 /* LMAC/UMAC PC info */
386 if (trans->trans_cfg->device_family >=
387 IWL_DEVICE_FAMILY_22000) {
388 pc_data = trans->dbg.pc_data;
389 for (count = 0; count < trans->dbg.num_pc;
390 count++, pc_data++)
391 IWL_ERR(mvm, "%s: 0x%x\n",
392 pc_data->pc_name,
393 pc_data->pc_address);
394 } else if (trans->trans_cfg->device_family >=
395 IWL_DEVICE_FAMILY_9000) {
396 IWL_ERR(mvm, "UMAC PC: 0x%x\n",
397 iwl_read_umac_prph(trans,
398 UREG_UMAC_CURRENT_PC));
399 IWL_ERR(mvm, "LMAC PC: 0x%x\n",
400 iwl_read_umac_prph(trans,
401 UREG_LMAC1_CURRENT_PC));
402 if (iwl_mvm_is_cdb_supported(mvm))
403 IWL_ERR(mvm, "LMAC2 PC: 0x%x\n",
404 iwl_read_umac_prph(trans,
405 UREG_LMAC2_CURRENT_PC));
406 }
407
408 if (ret == -ETIMEDOUT && !mvm->fw_product_reset)
409 iwl_fw_dbg_error_collect(&mvm->fwrt,
410 FW_DBG_TRIGGER_ALIVE_TIMEOUT);
411
412 iwl_fw_set_current_image(&mvm->fwrt, old_type);
413 return ret;
414 }
415
416 if (!alive_data.valid) {
417 IWL_ERR(mvm, "Loaded ucode is not valid!\n");
418 iwl_fw_set_current_image(&mvm->fwrt, old_type);
419 return -EIO;
420 }
421
422 /* if reached this point, Alive notification was received */
423 iwl_mei_alive_notif(true);
424
425 ret = iwl_pnvm_load(mvm->trans, &mvm->notif_wait,
426 &mvm->fw->ucode_capa);
427 if (ret) {
428 IWL_ERR(mvm, "Timeout waiting for PNVM load!\n");
429 iwl_fw_set_current_image(&mvm->fwrt, old_type);
430 return ret;
431 }
432
433 iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
434
435 /*
436 * Note: all the queues are enabled as part of the interface
437 * initialization, but in firmware restart scenarios they
438 * could be stopped, so wake them up. In firmware restart,
439 * mac80211 will have the queues stopped as well until the
440 * reconfiguration completes. During normal startup, they
441 * will be empty.
442 */
443
444 memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
445 /*
446 * Set a 'fake' TID for the command queue, since we use the
447 * hweight() of the tid_bitmap as a refcount now. Not that
448 * we ever even consider the command queue as one we might
449 * want to reuse, but be safe nevertheless.
450 */
451 mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].tid_bitmap =
452 BIT(IWL_MAX_TID_COUNT + 2);
453
454 set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
455 #ifdef CONFIG_IWLWIFI_DEBUGFS
456 iwl_fw_set_dbg_rec_on(&mvm->fwrt);
457 #endif
458
459 /*
460 * For pre-MLD API (MLD API doesn't use the timestamps):
461 * All the BSSes in the BSS table include the GP2 in the system
462 * at the beacon Rx time, this is of course no longer relevant
463 * since we are resetting the firmware.
464 * Purge all the BSS table.
465 */
466 if (!mvm->mld_api_is_used)
467 cfg80211_bss_flush(mvm->hw->wiphy);
468
469 return 0;
470 }
471
iwl_mvm_phy_filter_init(struct iwl_mvm * mvm,struct iwl_phy_specific_cfg * phy_filters)472 static void iwl_mvm_phy_filter_init(struct iwl_mvm *mvm,
473 struct iwl_phy_specific_cfg *phy_filters)
474 {
475 #ifdef CONFIG_ACPI
476 *phy_filters = mvm->phy_filters;
477 #endif /* CONFIG_ACPI */
478 }
479
iwl_mvm_uats_init(struct iwl_mvm * mvm)480 static void iwl_mvm_uats_init(struct iwl_mvm *mvm)
481 {
482 u8 cmd_ver;
483 int ret;
484 struct iwl_host_cmd cmd = {
485 .id = WIDE_ID(REGULATORY_AND_NVM_GROUP,
486 MCC_ALLOWED_AP_TYPE_CMD),
487 .flags = 0,
488 .data[0] = &mvm->fwrt.uats_table,
489 .len[0] = sizeof(mvm->fwrt.uats_table),
490 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
491 };
492
493 if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
494 IWL_DEBUG_RADIO(mvm, "UATS feature is not supported\n");
495 return;
496 }
497
498 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id,
499 IWL_FW_CMD_VER_UNKNOWN);
500 if (cmd_ver != 1) {
501 IWL_DEBUG_RADIO(mvm,
502 "MCC_ALLOWED_AP_TYPE_CMD ver %d not supported\n",
503 cmd_ver);
504 return;
505 }
506
507 ret = iwl_uefi_get_uats_table(mvm->trans, &mvm->fwrt);
508 if (ret < 0) {
509 IWL_DEBUG_FW(mvm, "failed to read UATS table (%d)\n", ret);
510 return;
511 }
512
513 ret = iwl_mvm_send_cmd(mvm, &cmd);
514 if (ret < 0)
515 IWL_ERR(mvm, "failed to send MCC_ALLOWED_AP_TYPE_CMD (%d)\n",
516 ret);
517 else
518 IWL_DEBUG_RADIO(mvm, "MCC_ALLOWED_AP_TYPE_CMD sent to FW\n");
519 }
520
iwl_mvm_sgom_init(struct iwl_mvm * mvm)521 static int iwl_mvm_sgom_init(struct iwl_mvm *mvm)
522 {
523 u8 cmd_ver;
524 int ret;
525 struct iwl_host_cmd cmd = {
526 .id = WIDE_ID(REGULATORY_AND_NVM_GROUP,
527 SAR_OFFSET_MAPPING_TABLE_CMD),
528 .flags = 0,
529 .data[0] = &mvm->fwrt.sgom_table,
530 .len[0] = sizeof(mvm->fwrt.sgom_table),
531 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
532 };
533
534 if (!mvm->fwrt.sgom_enabled) {
535 IWL_DEBUG_RADIO(mvm, "SGOM table is disabled\n");
536 return 0;
537 }
538
539 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id,
540 IWL_FW_CMD_VER_UNKNOWN);
541
542 if (cmd_ver != 2) {
543 IWL_DEBUG_RADIO(mvm, "command version is unsupported. version = %d\n",
544 cmd_ver);
545 return 0;
546 }
547
548 ret = iwl_mvm_send_cmd(mvm, &cmd);
549 if (ret < 0)
550 IWL_ERR(mvm, "failed to send SAR_OFFSET_MAPPING_CMD (%d)\n", ret);
551
552 return ret;
553 }
554
iwl_send_phy_cfg_cmd(struct iwl_mvm * mvm)555 static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
556 {
557 u32 cmd_id = PHY_CONFIGURATION_CMD;
558 struct iwl_phy_cfg_cmd_v3 phy_cfg_cmd;
559 enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img;
560 u8 cmd_ver;
561 size_t cmd_size;
562
563 if (iwl_mvm_has_unified_ucode(mvm) &&
564 !mvm->trans->cfg->tx_with_siso_diversity)
565 return 0;
566
567 if (mvm->trans->cfg->tx_with_siso_diversity) {
568 /*
569 * TODO: currently we don't set the antenna but letting the NIC
570 * to decide which antenna to use. This should come from BIOS.
571 */
572 phy_cfg_cmd.phy_cfg =
573 cpu_to_le32(FW_PHY_CFG_CHAIN_SAD_ENABLED);
574 }
575
576 /* Set parameters */
577 phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm));
578
579 /* set flags extra PHY configuration flags from the device's cfg */
580 phy_cfg_cmd.phy_cfg |=
581 cpu_to_le32(mvm->trans->trans_cfg->extra_phy_cfg_flags);
582
583 phy_cfg_cmd.calib_control.event_trigger =
584 mvm->fw->default_calib[ucode_type].event_trigger;
585 phy_cfg_cmd.calib_control.flow_trigger =
586 mvm->fw->default_calib[ucode_type].flow_trigger;
587
588 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
589 IWL_FW_CMD_VER_UNKNOWN);
590 if (cmd_ver >= 3)
591 iwl_mvm_phy_filter_init(mvm, &phy_cfg_cmd.phy_specific_cfg);
592
593 IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n",
594 phy_cfg_cmd.phy_cfg);
595 cmd_size = (cmd_ver == 3) ? sizeof(struct iwl_phy_cfg_cmd_v3) :
596 sizeof(struct iwl_phy_cfg_cmd_v1);
597 return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &phy_cfg_cmd);
598 }
599
iwl_run_unified_mvm_ucode(struct iwl_mvm * mvm)600 static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm)
601 {
602 struct iwl_notification_wait init_wait;
603 struct iwl_nvm_access_complete_cmd nvm_complete = {};
604 struct iwl_init_extended_cfg_cmd init_cfg = {
605 .init_flags = cpu_to_le32(BIT(IWL_INIT_NVM)),
606 };
607 static const u16 init_complete[] = {
608 INIT_COMPLETE_NOTIF,
609 };
610 u32 sb_cfg;
611 int ret;
612
613 if (mvm->trans->cfg->tx_with_siso_diversity)
614 init_cfg.init_flags |= cpu_to_le32(BIT(IWL_INIT_PHY));
615
616 lockdep_assert_held(&mvm->mutex);
617
618 mvm->rfkill_safe_init_done = false;
619
620 if (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) {
621 sb_cfg = iwl_read_umac_prph(mvm->trans, SB_MODIFY_CFG_FLAG);
622 /* if needed, we'll reset this on our way out later */
623 mvm->fw_product_reset = sb_cfg == SB_CFG_RESIDES_IN_ROM;
624 if (mvm->fw_product_reset && iwl_mei_pldr_req())
625 return -EBUSY;
626 }
627
628 iwl_init_notification_wait(&mvm->notif_wait,
629 &init_wait,
630 init_complete,
631 ARRAY_SIZE(init_complete),
632 iwl_wait_init_complete,
633 NULL);
634
635 iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_EARLY, NULL);
636
637 /* Will also start the device */
638 ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
639 if (ret) {
640 IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
641
642 /* if we needed reset then fail here, but notify and remove */
643 if (mvm->fw_product_reset) {
644 iwl_mei_alive_notif(false);
645 iwl_trans_pcie_remove(mvm->trans, true);
646 }
647
648 goto error;
649 }
650 iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE,
651 NULL);
652
653 if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
654 mvm->trans->step_urm = !!(iwl_read_umac_prph(mvm->trans,
655 CNVI_PMU_STEP_FLOW) &
656 CNVI_PMU_STEP_FLOW_FORCE_URM);
657
658 /* Send init config command to mark that we are sending NVM access
659 * commands
660 */
661 ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(SYSTEM_GROUP,
662 INIT_EXTENDED_CFG_CMD),
663 CMD_SEND_IN_RFKILL,
664 sizeof(init_cfg), &init_cfg);
665 if (ret) {
666 IWL_ERR(mvm, "Failed to run init config command: %d\n",
667 ret);
668 goto error;
669 }
670
671 /* Load NVM to NIC if needed */
672 if (mvm->nvm_file_name) {
673 ret = iwl_read_external_nvm(mvm->trans, mvm->nvm_file_name,
674 mvm->nvm_sections);
675 if (ret)
676 goto error;
677 ret = iwl_mvm_load_nvm_to_nic(mvm);
678 if (ret)
679 goto error;
680 }
681
682 ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
683 NVM_ACCESS_COMPLETE),
684 CMD_SEND_IN_RFKILL,
685 sizeof(nvm_complete), &nvm_complete);
686 if (ret) {
687 IWL_ERR(mvm, "Failed to run complete NVM access: %d\n",
688 ret);
689 goto error;
690 }
691
692 ret = iwl_send_phy_cfg_cmd(mvm);
693 if (ret) {
694 IWL_ERR(mvm, "Failed to run PHY configuration: %d\n",
695 ret);
696 goto error;
697 }
698
699 /* We wait for the INIT complete notification */
700 ret = iwl_wait_notification(&mvm->notif_wait, &init_wait,
701 MVM_UCODE_ALIVE_TIMEOUT);
702 if (ret)
703 return ret;
704
705 /* Read the NVM only at driver load time, no need to do this twice */
706 if (!mvm->nvm_data) {
707 mvm->nvm_data = iwl_get_nvm(mvm->trans, mvm->fw,
708 mvm->set_tx_ant, mvm->set_rx_ant);
709 if (IS_ERR(mvm->nvm_data)) {
710 ret = PTR_ERR(mvm->nvm_data);
711 mvm->nvm_data = NULL;
712 IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
713 return ret;
714 }
715 }
716
717 mvm->rfkill_safe_init_done = true;
718
719 return 0;
720
721 error:
722 iwl_remove_notification(&mvm->notif_wait, &init_wait);
723 return ret;
724 }
725
iwl_run_init_mvm_ucode(struct iwl_mvm * mvm)726 int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm)
727 {
728 struct iwl_notification_wait calib_wait;
729 static const u16 init_complete[] = {
730 INIT_COMPLETE_NOTIF,
731 CALIB_RES_NOTIF_PHY_DB
732 };
733 int ret;
734
735 if (iwl_mvm_has_unified_ucode(mvm))
736 return iwl_run_unified_mvm_ucode(mvm);
737
738 lockdep_assert_held(&mvm->mutex);
739
740 mvm->rfkill_safe_init_done = false;
741
742 iwl_init_notification_wait(&mvm->notif_wait,
743 &calib_wait,
744 init_complete,
745 ARRAY_SIZE(init_complete),
746 iwl_wait_phy_db_entry,
747 mvm->phy_db);
748
749 iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_EARLY, NULL);
750
751 /* Will also start the device */
752 ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT);
753 if (ret) {
754 IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret);
755 goto remove_notif;
756 }
757
758 if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) {
759 ret = iwl_mvm_send_bt_init_conf(mvm);
760 if (ret)
761 goto remove_notif;
762 }
763
764 /* Read the NVM only at driver load time, no need to do this twice */
765 if (!mvm->nvm_data) {
766 ret = iwl_nvm_init(mvm);
767 if (ret) {
768 IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
769 goto remove_notif;
770 }
771 }
772
773 /* In case we read the NVM from external file, load it to the NIC */
774 if (mvm->nvm_file_name) {
775 ret = iwl_mvm_load_nvm_to_nic(mvm);
776 if (ret)
777 goto remove_notif;
778 }
779
780 WARN_ONCE(mvm->nvm_data->nvm_version < mvm->trans->cfg->nvm_ver,
781 "Too old NVM version (0x%0x, required = 0x%0x)",
782 mvm->nvm_data->nvm_version, mvm->trans->cfg->nvm_ver);
783
784 /*
785 * abort after reading the nvm in case RF Kill is on, we will complete
786 * the init seq later when RF kill will switch to off
787 */
788 if (iwl_mvm_is_radio_hw_killed(mvm)) {
789 IWL_DEBUG_RF_KILL(mvm,
790 "jump over all phy activities due to RF kill\n");
791 goto remove_notif;
792 }
793
794 mvm->rfkill_safe_init_done = true;
795
796 /* Send TX valid antennas before triggering calibrations */
797 ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
798 if (ret)
799 goto remove_notif;
800
801 ret = iwl_send_phy_cfg_cmd(mvm);
802 if (ret) {
803 IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
804 ret);
805 goto remove_notif;
806 }
807
808 /*
809 * Some things may run in the background now, but we
810 * just wait for the calibration complete notification.
811 */
812 ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
813 MVM_UCODE_CALIB_TIMEOUT);
814 if (!ret)
815 goto out;
816
817 if (iwl_mvm_is_radio_hw_killed(mvm)) {
818 IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
819 ret = 0;
820 } else {
821 IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
822 ret);
823 }
824
825 goto out;
826
827 remove_notif:
828 iwl_remove_notification(&mvm->notif_wait, &calib_wait);
829 out:
830 mvm->rfkill_safe_init_done = false;
831 if (!mvm->nvm_data) {
832 /* we want to debug INIT and we have no NVM - fake */
833 mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
834 sizeof(struct ieee80211_channel) +
835 sizeof(struct ieee80211_rate),
836 GFP_KERNEL);
837 if (!mvm->nvm_data)
838 return -ENOMEM;
839 mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels;
840 mvm->nvm_data->bands[0].n_channels = 1;
841 mvm->nvm_data->bands[0].n_bitrates = 1;
842 mvm->nvm_data->bands[0].bitrates =
843 (void *)(mvm->nvm_data->channels + 1);
844 mvm->nvm_data->bands[0].bitrates->hw_value = 10;
845 }
846
847 return ret;
848 }
849
iwl_mvm_config_ltr(struct iwl_mvm * mvm)850 static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
851 {
852 struct iwl_ltr_config_cmd cmd = {
853 .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
854 };
855
856 if (!mvm->trans->ltr_enabled)
857 return 0;
858
859 return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
860 sizeof(cmd), &cmd);
861 }
862
iwl_mvm_sar_select_profile(struct iwl_mvm * mvm,int prof_a,int prof_b)863 int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
864 {
865 u32 cmd_id = REDUCE_TX_POWER_CMD;
866 struct iwl_dev_tx_power_cmd_v3_v8 cmd = {
867 .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
868 };
869 struct iwl_dev_tx_power_cmd cmd_v9_v10 = {
870 .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
871 };
872 __le16 *per_chain;
873 int ret;
874 u16 len = 0;
875 u32 n_subbands;
876 u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 3);
877 void *cmd_data = &cmd;
878
879 if (cmd_ver == 10) {
880 len = sizeof(cmd_v9_v10.v10);
881 n_subbands = IWL_NUM_SUB_BANDS_V2;
882 per_chain = &cmd_v9_v10.v10.per_chain[0][0][0];
883 cmd_v9_v10.v10.flags =
884 cpu_to_le32(mvm->fwrt.reduced_power_flags);
885 } else if (cmd_ver == 9) {
886 len = sizeof(cmd_v9_v10.v9);
887 n_subbands = IWL_NUM_SUB_BANDS_V1;
888 per_chain = &cmd_v9_v10.v9.per_chain[0][0];
889 } else if (cmd_ver >= 7) {
890 len = sizeof(cmd.v7);
891 n_subbands = IWL_NUM_SUB_BANDS_V2;
892 per_chain = cmd.v7.per_chain[0][0];
893 cmd.v7.flags = cpu_to_le32(mvm->fwrt.reduced_power_flags);
894 if (cmd_ver == 8)
895 len = sizeof(cmd.v8);
896 } else if (cmd_ver == 6) {
897 len = sizeof(cmd.v6);
898 n_subbands = IWL_NUM_SUB_BANDS_V2;
899 per_chain = cmd.v6.per_chain[0][0];
900 } else if (fw_has_api(&mvm->fw->ucode_capa,
901 IWL_UCODE_TLV_API_REDUCE_TX_POWER)) {
902 len = sizeof(cmd.v5);
903 n_subbands = IWL_NUM_SUB_BANDS_V1;
904 per_chain = cmd.v5.per_chain[0][0];
905 } else if (fw_has_capa(&mvm->fw->ucode_capa,
906 IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) {
907 len = sizeof(cmd.v4);
908 n_subbands = IWL_NUM_SUB_BANDS_V1;
909 per_chain = cmd.v4.per_chain[0][0];
910 } else {
911 len = sizeof(cmd.v3);
912 n_subbands = IWL_NUM_SUB_BANDS_V1;
913 per_chain = cmd.v3.per_chain[0][0];
914 }
915
916 /* all structs have the same common part, add its length */
917 len += sizeof(cmd.common);
918
919 if (cmd_ver < 9)
920 len += sizeof(cmd.per_band);
921 else
922 cmd_data = &cmd_v9_v10;
923
924 ret = iwl_sar_fill_profile(&mvm->fwrt, per_chain,
925 IWL_NUM_CHAIN_TABLES,
926 n_subbands, prof_a, prof_b);
927
928 /* return on error or if the profile is disabled (positive number) */
929 if (ret)
930 return ret;
931
932 iwl_mei_set_power_limit(per_chain);
933
934 IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
935 return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, cmd_data);
936 }
937
iwl_mvm_get_sar_geo_profile(struct iwl_mvm * mvm)938 int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
939 {
940 union iwl_geo_tx_power_profiles_cmd geo_tx_cmd;
941 struct iwl_geo_tx_power_profiles_resp *resp;
942 u16 len;
943 int ret;
944 struct iwl_host_cmd cmd = {
945 .id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD),
946 .flags = CMD_WANT_SKB,
947 .data = { &geo_tx_cmd },
948 };
949 u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id,
950 IWL_FW_CMD_VER_UNKNOWN);
951
952 /* the ops field is at the same spot for all versions, so set in v1 */
953 geo_tx_cmd.v1.ops =
954 cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE);
955
956 if (cmd_ver == 5)
957 len = sizeof(geo_tx_cmd.v5);
958 else if (cmd_ver == 4)
959 len = sizeof(geo_tx_cmd.v4);
960 else if (cmd_ver == 3)
961 len = sizeof(geo_tx_cmd.v3);
962 else if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
963 IWL_UCODE_TLV_API_SAR_TABLE_VER))
964 len = sizeof(geo_tx_cmd.v2);
965 else
966 len = sizeof(geo_tx_cmd.v1);
967
968 if (!iwl_sar_geo_support(&mvm->fwrt))
969 return -EOPNOTSUPP;
970
971 cmd.len[0] = len;
972
973 ret = iwl_mvm_send_cmd(mvm, &cmd);
974 if (ret) {
975 IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret);
976 return ret;
977 }
978
979 resp = (void *)cmd.resp_pkt->data;
980 ret = le32_to_cpu(resp->profile_idx);
981
982 if (WARN_ON(ret > BIOS_GEO_MAX_PROFILE_NUM))
983 ret = -EIO;
984
985 iwl_free_resp(&cmd);
986 return ret;
987 }
988
iwl_mvm_sar_geo_init(struct iwl_mvm * mvm)989 static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
990 {
991 u32 cmd_id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD);
992 union iwl_geo_tx_power_profiles_cmd cmd;
993 u16 len;
994 u32 n_bands;
995 u32 n_profiles;
996 __le32 sk = cpu_to_le32(0);
997 int ret;
998 u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
999 IWL_FW_CMD_VER_UNKNOWN);
1000
1001 BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v1, ops) !=
1002 offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, ops) ||
1003 offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, ops) !=
1004 offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, ops) ||
1005 offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, ops) !=
1006 offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, ops) ||
1007 offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, ops) !=
1008 offsetof(struct iwl_geo_tx_power_profiles_cmd_v5, ops));
1009
1010 /* the ops field is at the same spot for all versions, so set in v1 */
1011 cmd.v1.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES);
1012
1013 /* Only set to South Korea if the table revision is 1 */
1014 if (mvm->fwrt.geo_rev == 1)
1015 sk = cpu_to_le32(1);
1016
1017 if (cmd_ver == 5) {
1018 len = sizeof(cmd.v5);
1019 n_bands = ARRAY_SIZE(cmd.v5.table[0]);
1020 n_profiles = BIOS_GEO_MAX_PROFILE_NUM;
1021 cmd.v5.table_revision = sk;
1022 } else if (cmd_ver == 4) {
1023 len = sizeof(cmd.v4);
1024 n_bands = ARRAY_SIZE(cmd.v4.table[0]);
1025 n_profiles = BIOS_GEO_MAX_PROFILE_NUM;
1026 cmd.v4.table_revision = sk;
1027 } else if (cmd_ver == 3) {
1028 len = sizeof(cmd.v3);
1029 n_bands = ARRAY_SIZE(cmd.v3.table[0]);
1030 n_profiles = BIOS_GEO_MIN_PROFILE_NUM;
1031 cmd.v3.table_revision = sk;
1032 } else if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
1033 IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
1034 len = sizeof(cmd.v2);
1035 n_bands = ARRAY_SIZE(cmd.v2.table[0]);
1036 n_profiles = BIOS_GEO_MIN_PROFILE_NUM;
1037 cmd.v2.table_revision = sk;
1038 } else {
1039 len = sizeof(cmd.v1);
1040 n_bands = ARRAY_SIZE(cmd.v1.table[0]);
1041 n_profiles = BIOS_GEO_MIN_PROFILE_NUM;
1042 }
1043
1044 BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v1, table) !=
1045 offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, table) ||
1046 offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, table) !=
1047 offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, table) ||
1048 offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, table) !=
1049 offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, table) ||
1050 offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, table) !=
1051 offsetof(struct iwl_geo_tx_power_profiles_cmd_v5, table));
1052 /* the table is at the same position for all versions, so set use v1 */
1053 ret = iwl_sar_geo_fill_table(&mvm->fwrt, &cmd.v1.table[0][0],
1054 n_bands, n_profiles);
1055
1056 /*
1057 * It is a valid scenario to not support SAR, or miss wgds table,
1058 * but in that case there is no need to send the command.
1059 */
1060 if (ret)
1061 return 0;
1062
1063 return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd);
1064 }
1065
iwl_mvm_ppag_send_cmd(struct iwl_mvm * mvm)1066 int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
1067 {
1068 union iwl_ppag_table_cmd cmd;
1069 int ret, cmd_size;
1070
1071 ret = iwl_fill_ppag_table(&mvm->fwrt, &cmd, &cmd_size);
1072 /* Not supporting PPAG table is a valid scenario */
1073 if (ret < 0)
1074 return 0;
1075
1076 IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n");
1077 ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP,
1078 PER_PLATFORM_ANT_GAIN_CMD),
1079 0, cmd_size, &cmd);
1080 if (ret < 0)
1081 IWL_ERR(mvm, "failed to send PER_PLATFORM_ANT_GAIN_CMD (%d)\n",
1082 ret);
1083
1084 return ret;
1085 }
1086
iwl_mvm_ppag_init(struct iwl_mvm * mvm)1087 static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
1088 {
1089 /* no need to read the table, done in INIT stage */
1090 if (!(iwl_is_ppag_approved(&mvm->fwrt)))
1091 return 0;
1092
1093 return iwl_mvm_ppag_send_cmd(mvm);
1094 }
1095
iwl_mvm_add_to_tas_block_list(__le32 * list,__le32 * le_size,unsigned int mcc)1096 static bool iwl_mvm_add_to_tas_block_list(__le32 *list, __le32 *le_size, unsigned int mcc)
1097 {
1098 int i;
1099 u32 size = le32_to_cpu(*le_size);
1100
1101 /* Verify that there is room for another country */
1102 if (size >= IWL_WTAS_BLACK_LIST_MAX)
1103 return false;
1104
1105 for (i = 0; i < size; i++) {
1106 if (list[i] == cpu_to_le32(mcc))
1107 return true;
1108 }
1109
1110 list[size++] = cpu_to_le32(mcc);
1111 *le_size = cpu_to_le32(size);
1112 return true;
1113 }
1114
iwl_mvm_tas_init(struct iwl_mvm * mvm)1115 static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
1116 {
1117 u32 cmd_id = WIDE_ID(REGULATORY_AND_NVM_GROUP, TAS_CONFIG);
1118 int ret;
1119 struct iwl_tas_data data = {};
1120 struct iwl_tas_config_cmd cmd = {};
1121 int cmd_size, fw_ver;
1122
1123 BUILD_BUG_ON(ARRAY_SIZE(data.block_list_array) !=
1124 IWL_WTAS_BLACK_LIST_MAX);
1125 BUILD_BUG_ON(ARRAY_SIZE(cmd.common.block_list_array) !=
1126 IWL_WTAS_BLACK_LIST_MAX);
1127
1128 if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TAS_CFG)) {
1129 IWL_DEBUG_RADIO(mvm, "TAS not enabled in FW\n");
1130 return;
1131 }
1132
1133 ret = iwl_bios_get_tas_table(&mvm->fwrt, &data);
1134 if (ret < 0) {
1135 IWL_DEBUG_RADIO(mvm,
1136 "TAS table invalid or unavailable. (%d)\n",
1137 ret);
1138 return;
1139 }
1140
1141 if (ret == 0)
1142 return;
1143
1144 if (!iwl_is_tas_approved()) {
1145 IWL_DEBUG_RADIO(mvm,
1146 "System vendor '%s' is not in the approved list, disabling TAS in US and Canada.\n",
1147 dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>");
1148 if ((!iwl_mvm_add_to_tas_block_list(data.block_list_array,
1149 &data.block_list_size,
1150 IWL_MCC_US)) ||
1151 (!iwl_mvm_add_to_tas_block_list(data.block_list_array,
1152 &data.block_list_size,
1153 IWL_MCC_CANADA))) {
1154 IWL_DEBUG_RADIO(mvm,
1155 "Unable to add US/Canada to TAS block list, disabling TAS\n");
1156 return;
1157 }
1158 } else {
1159 IWL_DEBUG_RADIO(mvm,
1160 "System vendor '%s' is in the approved list.\n",
1161 dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>");
1162 }
1163
1164 fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
1165 IWL_FW_CMD_VER_UNKNOWN);
1166
1167 memcpy(&cmd.common, &data, sizeof(struct iwl_tas_config_cmd_common));
1168
1169 /* Set v3 or v4 specific parts. will be trunctated for fw_ver < 3 */
1170 if (fw_ver == 4) {
1171 cmd.v4.override_tas_iec = data.override_tas_iec;
1172 cmd.v4.enable_tas_iec = data.enable_tas_iec;
1173 cmd.v4.usa_tas_uhb_allowed = data.usa_tas_uhb_allowed;
1174 } else {
1175 cmd.v3.override_tas_iec = cpu_to_le16(data.override_tas_iec);
1176 cmd.v3.enable_tas_iec = cpu_to_le16(data.enable_tas_iec);
1177 }
1178
1179 cmd_size = sizeof(struct iwl_tas_config_cmd_common);
1180 if (fw_ver >= 3)
1181 /* v4 is the same size as v3 */
1182 cmd_size += sizeof(struct iwl_tas_config_cmd_v3);
1183
1184 ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, cmd_size, &cmd);
1185 if (ret < 0)
1186 IWL_DEBUG_RADIO(mvm, "failed to send TAS_CONFIG (%d)\n", ret);
1187 }
1188
iwl_mvm_eval_dsm_rfi(struct iwl_mvm * mvm)1189 static bool iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm)
1190 {
1191 u32 value = 0;
1192 /* default behaviour is disabled */
1193 bool bios_enable_rfi = false;
1194 int ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_RFI_CONFIG, &value);
1195
1196
1197 if (ret < 0) {
1198 IWL_DEBUG_RADIO(mvm, "Failed to get DSM RFI, ret=%d\n", ret);
1199 return bios_enable_rfi;
1200 }
1201
1202 value &= DSM_VALUE_RFI_DISABLE;
1203 /* RFI BIOS CONFIG value can be 0 or 3 only.
1204 * i.e 0 means DDR and DLVR enabled. 3 means DDR and DLVR disabled.
1205 * 1 and 2 are invalid BIOS configurations, So, it's not possible to
1206 * disable ddr/dlvr separately.
1207 */
1208 if (!value) {
1209 IWL_DEBUG_RADIO(mvm, "DSM RFI is evaluated to enable\n");
1210 bios_enable_rfi = true;
1211 } else if (value == DSM_VALUE_RFI_DISABLE) {
1212 IWL_DEBUG_RADIO(mvm, "DSM RFI is evaluated to disable\n");
1213 } else {
1214 IWL_DEBUG_RADIO(mvm,
1215 "DSM RFI got invalid value, value=%d\n", value);
1216 }
1217
1218 return bios_enable_rfi;
1219 }
1220
iwl_mvm_lari_cfg(struct iwl_mvm * mvm)1221 static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
1222 {
1223 struct iwl_lari_config_change_cmd cmd;
1224 size_t cmd_size;
1225 int ret;
1226
1227 ret = iwl_fill_lari_config(&mvm->fwrt, &cmd, &cmd_size);
1228 if (!ret) {
1229 ret = iwl_mvm_send_cmd_pdu(mvm,
1230 WIDE_ID(REGULATORY_AND_NVM_GROUP,
1231 LARI_CONFIG_CHANGE),
1232 0, cmd_size, &cmd);
1233 if (ret < 0)
1234 IWL_DEBUG_RADIO(mvm,
1235 "Failed to send LARI_CONFIG_CHANGE (%d)\n",
1236 ret);
1237 }
1238 }
1239
iwl_mvm_get_bios_tables(struct iwl_mvm * mvm)1240 void iwl_mvm_get_bios_tables(struct iwl_mvm *mvm)
1241 {
1242 int ret;
1243
1244 iwl_acpi_get_guid_lock_status(&mvm->fwrt);
1245
1246 /* read PPAG table */
1247 ret = iwl_bios_get_ppag_table(&mvm->fwrt);
1248 if (ret < 0) {
1249 IWL_DEBUG_RADIO(mvm,
1250 "PPAG BIOS table invalid or unavailable. (%d)\n",
1251 ret);
1252 }
1253
1254 /* read SAR tables */
1255 ret = iwl_bios_get_wrds_table(&mvm->fwrt);
1256 if (ret < 0) {
1257 IWL_DEBUG_RADIO(mvm,
1258 "WRDS SAR BIOS table invalid or unavailable. (%d)\n",
1259 ret);
1260 /*
1261 * If not available, don't fail and don't bother with EWRD and
1262 * WGDS */
1263
1264 if (!iwl_bios_get_wgds_table(&mvm->fwrt)) {
1265 /*
1266 * If basic SAR is not available, we check for WGDS,
1267 * which should *not* be available either. If it is
1268 * available, issue an error, because we can't use SAR
1269 * Geo without basic SAR.
1270 */
1271 IWL_ERR(mvm, "BIOS contains WGDS but no WRDS\n");
1272 }
1273
1274 } else {
1275 ret = iwl_bios_get_ewrd_table(&mvm->fwrt);
1276 /* if EWRD is not available, we can still use
1277 * WRDS, so don't fail */
1278 if (ret < 0)
1279 IWL_DEBUG_RADIO(mvm,
1280 "EWRD SAR BIOS table invalid or unavailable. (%d)\n",
1281 ret);
1282
1283 /* read geo SAR table */
1284 if (iwl_sar_geo_support(&mvm->fwrt)) {
1285 ret = iwl_bios_get_wgds_table(&mvm->fwrt);
1286 if (ret < 0)
1287 IWL_DEBUG_RADIO(mvm,
1288 "Geo SAR BIOS table invalid or unavailable. (%d)\n",
1289 ret);
1290 /* we don't fail if the table is not available */
1291 }
1292 }
1293
1294 iwl_acpi_get_phy_filters(&mvm->fwrt, &mvm->phy_filters);
1295
1296 if (iwl_bios_get_eckv(&mvm->fwrt, &mvm->ext_clock_valid))
1297 IWL_DEBUG_RADIO(mvm, "ECKV table doesn't exist in BIOS\n");
1298 }
1299
iwl_mvm_disconnect_iterator(void * data,u8 * mac,struct ieee80211_vif * vif)1300 static void iwl_mvm_disconnect_iterator(void *data, u8 *mac,
1301 struct ieee80211_vif *vif)
1302 {
1303 if (vif->type == NL80211_IFTYPE_STATION)
1304 ieee80211_hw_restart_disconnect(vif);
1305 }
1306
iwl_mvm_send_recovery_cmd(struct iwl_mvm * mvm,u32 flags)1307 void iwl_mvm_send_recovery_cmd(struct iwl_mvm *mvm, u32 flags)
1308 {
1309 u32 error_log_size = mvm->fw->ucode_capa.error_log_size;
1310 u32 status = 0;
1311 int ret;
1312
1313 struct iwl_fw_error_recovery_cmd recovery_cmd = {
1314 .flags = cpu_to_le32(flags),
1315 .buf_size = 0,
1316 };
1317 struct iwl_host_cmd host_cmd = {
1318 .id = WIDE_ID(SYSTEM_GROUP, FW_ERROR_RECOVERY_CMD),
1319 .data = {&recovery_cmd, },
1320 .len = {sizeof(recovery_cmd), },
1321 };
1322
1323 /* no error log was defined in TLV */
1324 if (!error_log_size)
1325 return;
1326
1327 if (flags & ERROR_RECOVERY_UPDATE_DB) {
1328 /* no buf was allocated while HW reset */
1329 if (!mvm->error_recovery_buf)
1330 return;
1331
1332 host_cmd.data[1] = mvm->error_recovery_buf;
1333 host_cmd.len[1] = error_log_size;
1334 host_cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY;
1335 recovery_cmd.buf_size = cpu_to_le32(error_log_size);
1336 }
1337
1338 ret = iwl_mvm_send_cmd_status(mvm, &host_cmd, &status);
1339 kfree(mvm->error_recovery_buf);
1340 mvm->error_recovery_buf = NULL;
1341
1342 if (ret) {
1343 IWL_ERR(mvm, "Failed to send recovery cmd %d\n", ret);
1344 return;
1345 }
1346
1347 /* skb respond is only relevant in ERROR_RECOVERY_UPDATE_DB */
1348 if (flags & ERROR_RECOVERY_UPDATE_DB) {
1349 if (status) {
1350 IWL_ERR(mvm,
1351 "Failed to send recovery cmd blob was invalid %d\n",
1352 status);
1353
1354 ieee80211_iterate_interfaces(mvm->hw, 0,
1355 iwl_mvm_disconnect_iterator,
1356 mvm);
1357 }
1358 }
1359 }
1360
iwl_mvm_sar_init(struct iwl_mvm * mvm)1361 static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
1362 {
1363 return iwl_mvm_sar_select_profile(mvm, 1, 1);
1364 }
1365
iwl_mvm_load_rt_fw(struct iwl_mvm * mvm)1366 static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
1367 {
1368 int ret;
1369
1370 if (iwl_mvm_has_unified_ucode(mvm))
1371 return iwl_run_unified_mvm_ucode(mvm);
1372
1373 ret = iwl_run_init_mvm_ucode(mvm);
1374
1375 if (ret) {
1376 IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
1377 return ret;
1378 }
1379
1380 iwl_fw_dbg_stop_sync(&mvm->fwrt);
1381 iwl_trans_stop_device(mvm->trans);
1382 ret = iwl_trans_start_hw(mvm->trans);
1383 if (ret)
1384 return ret;
1385
1386 mvm->rfkill_safe_init_done = false;
1387 ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
1388 if (ret)
1389 return ret;
1390
1391 mvm->rfkill_safe_init_done = true;
1392
1393 iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE,
1394 NULL);
1395
1396 return iwl_init_paging(&mvm->fwrt, mvm->fwrt.cur_fw_img);
1397 }
1398
iwl_mvm_up(struct iwl_mvm * mvm)1399 int iwl_mvm_up(struct iwl_mvm *mvm)
1400 {
1401 int ret, i;
1402 struct ieee80211_supported_band *sband = NULL;
1403
1404 lockdep_assert_wiphy(mvm->hw->wiphy);
1405 lockdep_assert_held(&mvm->mutex);
1406
1407 ret = iwl_trans_start_hw(mvm->trans);
1408 if (ret)
1409 return ret;
1410
1411 ret = iwl_mvm_load_rt_fw(mvm);
1412 if (ret) {
1413 IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
1414 if (ret != -ERFKILL && !mvm->fw_product_reset)
1415 iwl_fw_dbg_error_collect(&mvm->fwrt,
1416 FW_DBG_TRIGGER_DRIVER);
1417 goto error;
1418 }
1419
1420 /* FW loaded successfully */
1421 mvm->fw_product_reset = false;
1422
1423 iwl_fw_disable_dbg_asserts(&mvm->fwrt);
1424 iwl_get_shared_mem_conf(&mvm->fwrt);
1425
1426 ret = iwl_mvm_sf_update(mvm, NULL, false);
1427 if (ret)
1428 IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
1429
1430 if (!iwl_trans_dbg_ini_valid(mvm->trans)) {
1431 mvm->fwrt.dump.conf = FW_DBG_INVALID;
1432 /* if we have a destination, assume EARLY START */
1433 if (mvm->fw->dbg.dest_tlv)
1434 mvm->fwrt.dump.conf = FW_DBG_START_FROM_ALIVE;
1435 iwl_fw_start_dbg_conf(&mvm->fwrt, FW_DBG_START_FROM_ALIVE);
1436 }
1437
1438 ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
1439 if (ret)
1440 goto error;
1441
1442 if (!iwl_mvm_has_unified_ucode(mvm)) {
1443 /* Send phy db control command and then phy db calibration */
1444 ret = iwl_send_phy_db_data(mvm->phy_db);
1445 if (ret)
1446 goto error;
1447 ret = iwl_send_phy_cfg_cmd(mvm);
1448 if (ret)
1449 goto error;
1450 }
1451
1452 ret = iwl_mvm_send_bt_init_conf(mvm);
1453 if (ret)
1454 goto error;
1455
1456 if (fw_has_capa(&mvm->fw->ucode_capa,
1457 IWL_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT)) {
1458 ret = iwl_set_soc_latency(&mvm->fwrt);
1459 if (ret)
1460 goto error;
1461 }
1462
1463 iwl_mvm_lari_cfg(mvm);
1464
1465 /* Init RSS configuration */
1466 ret = iwl_configure_rxq(&mvm->fwrt);
1467 if (ret)
1468 goto error;
1469
1470 if (iwl_mvm_has_new_rx_api(mvm)) {
1471 ret = iwl_send_rss_cfg_cmd(mvm);
1472 if (ret) {
1473 IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
1474 ret);
1475 goto error;
1476 }
1477 }
1478
1479 /* init the fw <-> mac80211 STA mapping */
1480 for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
1481 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
1482 RCU_INIT_POINTER(mvm->fw_id_to_link_sta[i], NULL);
1483 }
1484
1485 for (i = 0; i < IWL_FW_MAX_LINK_ID + 1; i++)
1486 RCU_INIT_POINTER(mvm->link_id_to_link_conf[i], NULL);
1487
1488 mvm->tdls_cs.peer.sta_id = IWL_INVALID_STA;
1489
1490 /* reset quota debouncing buffer - 0xff will yield invalid data */
1491 memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd));
1492
1493 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_DQA_SUPPORT)) {
1494 ret = iwl_mvm_send_dqa_cmd(mvm);
1495 if (ret)
1496 goto error;
1497 }
1498
1499 /*
1500 * Add auxiliary station for scanning.
1501 * Newer versions of this command implies that the fw uses
1502 * internal aux station for all aux activities that don't
1503 * requires a dedicated data queue.
1504 */
1505 if (!iwl_mvm_has_new_station_api(mvm->fw)) {
1506 /*
1507 * In old version the aux station uses mac id like other
1508 * station and not lmac id
1509 */
1510 ret = iwl_mvm_add_aux_sta(mvm, MAC_INDEX_AUX);
1511 if (ret)
1512 goto error;
1513 }
1514
1515 /* Add all the PHY contexts */
1516 i = 0;
1517 while (!sband && i < NUM_NL80211_BANDS)
1518 sband = mvm->hw->wiphy->bands[i++];
1519
1520 if (WARN_ON_ONCE(!sband)) {
1521 ret = -ENODEV;
1522 goto error;
1523 }
1524
1525 if (iwl_mvm_is_tt_in_fw(mvm)) {
1526 /* in order to give the responsibility of ct-kill and
1527 * TX backoff to FW we need to send empty temperature reporting
1528 * cmd during init time
1529 */
1530 iwl_mvm_send_temp_report_ths_cmd(mvm);
1531 } else {
1532 /* Initialize tx backoffs to the minimal possible */
1533 iwl_mvm_tt_tx_backoff(mvm, 0);
1534 }
1535
1536 #ifdef CONFIG_THERMAL
1537 /* TODO: read the budget from BIOS / Platform NVM */
1538
1539 /*
1540 * In case there is no budget from BIOS / Platform NVM the default
1541 * budget should be 2000mW (cooling state 0).
1542 */
1543 if (iwl_mvm_is_ctdp_supported(mvm)) {
1544 ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
1545 mvm->cooling_dev.cur_state);
1546 if (ret)
1547 goto error;
1548 }
1549 #endif
1550
1551 if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_LTR_GEN2))
1552 WARN_ON(iwl_mvm_config_ltr(mvm));
1553
1554 ret = iwl_mvm_power_update_device(mvm);
1555 if (ret)
1556 goto error;
1557
1558 /*
1559 * RTNL is not taken during Ct-kill, but we don't need to scan/Tx
1560 * anyway, so don't init MCC.
1561 */
1562 if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) {
1563 ret = iwl_mvm_init_mcc(mvm);
1564 if (ret)
1565 goto error;
1566 }
1567
1568 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
1569 mvm->scan_type = IWL_SCAN_TYPE_NOT_SET;
1570 mvm->hb_scan_type = IWL_SCAN_TYPE_NOT_SET;
1571 ret = iwl_mvm_config_scan(mvm);
1572 if (ret)
1573 goto error;
1574 }
1575
1576 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1577 iwl_mvm_send_recovery_cmd(mvm, ERROR_RECOVERY_UPDATE_DB);
1578
1579 if (mvm->time_sync.active)
1580 iwl_mvm_time_sync_config(mvm, mvm->time_sync.peer_addr,
1581 IWL_TIME_SYNC_PROTOCOL_TM |
1582 IWL_TIME_SYNC_PROTOCOL_FTM);
1583 }
1584
1585 if (!mvm->ptp_data.ptp_clock)
1586 iwl_mvm_ptp_init(mvm);
1587
1588 ret = iwl_mvm_ppag_init(mvm);
1589 if (ret)
1590 goto error;
1591
1592 ret = iwl_mvm_sar_init(mvm);
1593 if (ret == 0)
1594 ret = iwl_mvm_sar_geo_init(mvm);
1595 if (ret < 0)
1596 goto error;
1597
1598 ret = iwl_mvm_sgom_init(mvm);
1599 if (ret)
1600 goto error;
1601
1602 iwl_mvm_tas_init(mvm);
1603 iwl_mvm_leds_sync(mvm);
1604 iwl_mvm_uats_init(mvm);
1605
1606 if (iwl_rfi_supported(mvm)) {
1607 if (iwl_mvm_eval_dsm_rfi(mvm))
1608 iwl_rfi_send_config_cmd(mvm, NULL);
1609 }
1610
1611 iwl_mvm_mei_device_state(mvm, true);
1612
1613 IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
1614 return 0;
1615 error:
1616 iwl_mvm_stop_device(mvm);
1617 return ret;
1618 }
1619
iwl_mvm_load_d3_fw(struct iwl_mvm * mvm)1620 int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
1621 {
1622 int ret, i;
1623
1624 lockdep_assert_wiphy(mvm->hw->wiphy);
1625 lockdep_assert_held(&mvm->mutex);
1626
1627 ret = iwl_trans_start_hw(mvm->trans);
1628 if (ret)
1629 return ret;
1630
1631 ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_WOWLAN);
1632 if (ret) {
1633 IWL_ERR(mvm, "Failed to start WoWLAN firmware: %d\n", ret);
1634 goto error;
1635 }
1636
1637 ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
1638 if (ret)
1639 goto error;
1640
1641 /* Send phy db control command and then phy db calibration*/
1642 ret = iwl_send_phy_db_data(mvm->phy_db);
1643 if (ret)
1644 goto error;
1645
1646 ret = iwl_send_phy_cfg_cmd(mvm);
1647 if (ret)
1648 goto error;
1649
1650 /* init the fw <-> mac80211 STA mapping */
1651 for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
1652 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
1653 RCU_INIT_POINTER(mvm->fw_id_to_link_sta[i], NULL);
1654 }
1655
1656 if (!iwl_mvm_has_new_station_api(mvm->fw)) {
1657 /*
1658 * Add auxiliary station for scanning.
1659 * Newer versions of this command implies that the fw uses
1660 * internal aux station for all aux activities that don't
1661 * requires a dedicated data queue.
1662 * In old version the aux station uses mac id like other
1663 * station and not lmac id
1664 */
1665 ret = iwl_mvm_add_aux_sta(mvm, MAC_INDEX_AUX);
1666 if (ret)
1667 goto error;
1668 }
1669
1670 return 0;
1671 error:
1672 iwl_mvm_stop_device(mvm);
1673 return ret;
1674 }
1675
iwl_mvm_rx_mfuart_notif(struct iwl_mvm * mvm,struct iwl_rx_cmd_buffer * rxb)1676 void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
1677 struct iwl_rx_cmd_buffer *rxb)
1678 {
1679 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1680 struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data;
1681
1682 IWL_DEBUG_INFO(mvm,
1683 "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n",
1684 le32_to_cpu(mfuart_notif->installed_ver),
1685 le32_to_cpu(mfuart_notif->external_ver),
1686 le32_to_cpu(mfuart_notif->status),
1687 le32_to_cpu(mfuart_notif->duration));
1688
1689 if (iwl_rx_packet_payload_len(pkt) == sizeof(*mfuart_notif))
1690 IWL_DEBUG_INFO(mvm,
1691 "MFUART: image size: 0x%08x\n",
1692 le32_to_cpu(mfuart_notif->image_size));
1693 }
1694