xref: /linux/drivers/net/wireless/ath/ath10k/qmi.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (c) 2018 The Linux Foundation. All rights reserved.
4  * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6 
7 #include <linux/completion.h>
8 #include <linux/device.h>
9 #include <linux/debugfs.h>
10 #include <linux/idr.h>
11 #include <linux/kernel.h>
12 #include <linux/of.h>
13 #include <linux/of_address.h>
14 #include <linux/module.h>
15 #include <linux/net.h>
16 #include <linux/platform_device.h>
17 #include <linux/firmware/qcom/qcom_scm.h>
18 #include <linux/soc/qcom/smem.h>
19 #include <linux/string.h>
20 #include <net/sock.h>
21 
22 #include "debug.h"
23 #include "snoc.h"
24 
25 #define ATH10K_QMI_CLIENT_ID		0x4b4e454c
26 #define ATH10K_QMI_TIMEOUT		30
27 #define SMEM_IMAGE_VERSION_TABLE       469
28 #define SMEM_IMAGE_TABLE_CNSS_INDEX     13
29 #define SMEM_IMAGE_VERSION_ENTRY_SIZE	128
30 #define SMEM_IMAGE_VERSION_NAME_SIZE	75
31 
ath10k_qmi_map_msa_permission(struct ath10k_qmi * qmi,struct ath10k_msa_mem_info * mem_info)32 static int ath10k_qmi_map_msa_permission(struct ath10k_qmi *qmi,
33 					 struct ath10k_msa_mem_info *mem_info)
34 {
35 	struct qcom_scm_vmperm dst_perms[3];
36 	struct ath10k *ar = qmi->ar;
37 	u64 src_perms;
38 	u32 perm_count;
39 	int ret;
40 
41 	src_perms = BIT(QCOM_SCM_VMID_HLOS);
42 
43 	dst_perms[0].vmid = QCOM_SCM_VMID_MSS_MSA;
44 	dst_perms[0].perm = QCOM_SCM_PERM_RW;
45 	dst_perms[1].vmid = QCOM_SCM_VMID_WLAN;
46 	dst_perms[1].perm = QCOM_SCM_PERM_RW;
47 
48 	if (mem_info->secure) {
49 		perm_count = 2;
50 	} else {
51 		dst_perms[2].vmid = QCOM_SCM_VMID_WLAN_CE;
52 		dst_perms[2].perm = QCOM_SCM_PERM_RW;
53 		perm_count = 3;
54 	}
55 
56 	ret = qcom_scm_assign_mem(mem_info->addr, mem_info->size,
57 				  &src_perms, dst_perms, perm_count);
58 	if (ret < 0)
59 		ath10k_err(ar, "failed to assign msa map permissions: %d\n", ret);
60 
61 	return ret;
62 }
63 
ath10k_qmi_unmap_msa_permission(struct ath10k_qmi * qmi,struct ath10k_msa_mem_info * mem_info)64 static int ath10k_qmi_unmap_msa_permission(struct ath10k_qmi *qmi,
65 					   struct ath10k_msa_mem_info *mem_info)
66 {
67 	struct qcom_scm_vmperm dst_perms;
68 	struct ath10k *ar = qmi->ar;
69 	u64 src_perms;
70 	int ret;
71 
72 	src_perms = BIT(QCOM_SCM_VMID_MSS_MSA) | BIT(QCOM_SCM_VMID_WLAN);
73 
74 	if (!mem_info->secure)
75 		src_perms |= BIT(QCOM_SCM_VMID_WLAN_CE);
76 
77 	dst_perms.vmid = QCOM_SCM_VMID_HLOS;
78 	dst_perms.perm = QCOM_SCM_PERM_RW;
79 
80 	ret = qcom_scm_assign_mem(mem_info->addr, mem_info->size,
81 				  &src_perms, &dst_perms, 1);
82 	if (ret < 0)
83 		ath10k_err(ar, "failed to unmap msa permissions: %d\n", ret);
84 
85 	return ret;
86 }
87 
ath10k_qmi_setup_msa_permissions(struct ath10k_qmi * qmi)88 static int ath10k_qmi_setup_msa_permissions(struct ath10k_qmi *qmi)
89 {
90 	int ret;
91 	int i;
92 
93 	if (qmi->msa_fixed_perm)
94 		return 0;
95 
96 	for (i = 0; i < qmi->nr_mem_region; i++) {
97 		ret = ath10k_qmi_map_msa_permission(qmi, &qmi->mem_region[i]);
98 		if (ret)
99 			goto err_unmap;
100 	}
101 
102 	return 0;
103 
104 err_unmap:
105 	for (i--; i >= 0; i--)
106 		ath10k_qmi_unmap_msa_permission(qmi, &qmi->mem_region[i]);
107 	return ret;
108 }
109 
ath10k_qmi_remove_msa_permission(struct ath10k_qmi * qmi)110 static void ath10k_qmi_remove_msa_permission(struct ath10k_qmi *qmi)
111 {
112 	int i;
113 
114 	if (qmi->msa_fixed_perm)
115 		return;
116 
117 	for (i = 0; i < qmi->nr_mem_region; i++)
118 		ath10k_qmi_unmap_msa_permission(qmi, &qmi->mem_region[i]);
119 }
120 
ath10k_qmi_msa_mem_info_send_sync_msg(struct ath10k_qmi * qmi)121 static int ath10k_qmi_msa_mem_info_send_sync_msg(struct ath10k_qmi *qmi)
122 {
123 	struct wlfw_msa_info_resp_msg_v01 resp = {};
124 	struct wlfw_msa_info_req_msg_v01 req = {};
125 	struct ath10k *ar = qmi->ar;
126 	phys_addr_t max_mapped_addr;
127 	struct qmi_txn txn;
128 	int ret;
129 	int i;
130 
131 	req.msa_addr = ar->msa.paddr;
132 	req.size = ar->msa.mem_size;
133 
134 	ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
135 			   wlfw_msa_info_resp_msg_v01_ei, &resp);
136 	if (ret < 0)
137 		goto out;
138 
139 	ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
140 			       QMI_WLFW_MSA_INFO_REQ_V01,
141 			       WLFW_MSA_INFO_REQ_MSG_V01_MAX_MSG_LEN,
142 			       wlfw_msa_info_req_msg_v01_ei, &req);
143 	if (ret < 0) {
144 		qmi_txn_cancel(&txn);
145 		ath10k_err(ar, "failed to send msa mem info req: %d\n", ret);
146 		goto out;
147 	}
148 
149 	ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
150 	if (ret < 0)
151 		goto out;
152 
153 	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
154 		ath10k_err(ar, "msa info req rejected: %d\n", resp.resp.error);
155 		ret = -EINVAL;
156 		goto out;
157 	}
158 
159 	if (resp.mem_region_info_len > QMI_WLFW_MAX_MEM_REG_V01) {
160 		ath10k_err(ar, "invalid memory region length received: %d\n",
161 			   resp.mem_region_info_len);
162 		ret = -EINVAL;
163 		goto out;
164 	}
165 
166 	max_mapped_addr = ar->msa.paddr + ar->msa.mem_size;
167 	qmi->nr_mem_region = resp.mem_region_info_len;
168 	for (i = 0; i < resp.mem_region_info_len; i++) {
169 		if (resp.mem_region_info[i].size > ar->msa.mem_size ||
170 		    resp.mem_region_info[i].region_addr > max_mapped_addr ||
171 		    resp.mem_region_info[i].region_addr < ar->msa.paddr ||
172 		    resp.mem_region_info[i].size +
173 		    resp.mem_region_info[i].region_addr > max_mapped_addr) {
174 			ath10k_err(ar, "received out of range memory region address 0x%llx with size 0x%x, aborting\n",
175 				   resp.mem_region_info[i].region_addr,
176 				   resp.mem_region_info[i].size);
177 			ret = -EINVAL;
178 			goto fail_unwind;
179 		}
180 		qmi->mem_region[i].addr = resp.mem_region_info[i].region_addr;
181 		qmi->mem_region[i].size = resp.mem_region_info[i].size;
182 		qmi->mem_region[i].secure = resp.mem_region_info[i].secure_flag;
183 		ath10k_dbg(ar, ATH10K_DBG_QMI,
184 			   "qmi msa mem region %d addr 0x%pa size 0x%x flag 0x%08x\n",
185 			   i, &qmi->mem_region[i].addr,
186 			   qmi->mem_region[i].size,
187 			   qmi->mem_region[i].secure);
188 	}
189 
190 	ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi msa mem info request completed\n");
191 	return 0;
192 
193 fail_unwind:
194 	memset(&qmi->mem_region[0], 0, sizeof(qmi->mem_region[0]) * i);
195 out:
196 	return ret;
197 }
198 
ath10k_qmi_msa_ready_send_sync_msg(struct ath10k_qmi * qmi)199 static int ath10k_qmi_msa_ready_send_sync_msg(struct ath10k_qmi *qmi)
200 {
201 	struct wlfw_msa_ready_resp_msg_v01 resp = {};
202 	struct wlfw_msa_ready_req_msg_v01 req = {};
203 	struct ath10k *ar = qmi->ar;
204 	struct qmi_txn txn;
205 	int ret;
206 
207 	ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
208 			   wlfw_msa_ready_resp_msg_v01_ei, &resp);
209 	if (ret < 0)
210 		goto out;
211 
212 	ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
213 			       QMI_WLFW_MSA_READY_REQ_V01,
214 			       WLFW_MSA_READY_REQ_MSG_V01_MAX_MSG_LEN,
215 			       wlfw_msa_ready_req_msg_v01_ei, &req);
216 	if (ret < 0) {
217 		qmi_txn_cancel(&txn);
218 		ath10k_err(ar, "failed to send msa mem ready request: %d\n", ret);
219 		goto out;
220 	}
221 
222 	ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
223 	if (ret < 0)
224 		goto out;
225 
226 	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
227 		ath10k_err(ar, "msa ready request rejected: %d\n", resp.resp.error);
228 		ret = -EINVAL;
229 	}
230 
231 	ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi msa mem ready request completed\n");
232 	return 0;
233 
234 out:
235 	return ret;
236 }
237 
ath10k_qmi_bdf_dnld_send_sync(struct ath10k_qmi * qmi)238 static int ath10k_qmi_bdf_dnld_send_sync(struct ath10k_qmi *qmi)
239 {
240 	struct wlfw_bdf_download_resp_msg_v01 resp = {};
241 	struct wlfw_bdf_download_req_msg_v01 *req;
242 	struct ath10k *ar = qmi->ar;
243 	unsigned int remaining;
244 	struct qmi_txn txn;
245 	const u8 *temp;
246 	int ret;
247 
248 	req = kzalloc(sizeof(*req), GFP_KERNEL);
249 	if (!req)
250 		return -ENOMEM;
251 
252 	temp = ar->normal_mode_fw.board_data;
253 	remaining = ar->normal_mode_fw.board_len;
254 
255 	while (remaining) {
256 		req->valid = 1;
257 		req->file_id_valid = 1;
258 		req->file_id = 0;
259 		req->total_size_valid = 1;
260 		req->total_size = ar->normal_mode_fw.board_len;
261 		req->seg_id_valid = 1;
262 		req->data_valid = 1;
263 		req->end_valid = 1;
264 
265 		if (remaining > QMI_WLFW_MAX_DATA_SIZE_V01) {
266 			req->data_len = QMI_WLFW_MAX_DATA_SIZE_V01;
267 		} else {
268 			req->data_len = remaining;
269 			req->end = 1;
270 		}
271 
272 		memcpy(req->data, temp, req->data_len);
273 
274 		ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
275 				   wlfw_bdf_download_resp_msg_v01_ei,
276 				   &resp);
277 		if (ret < 0)
278 			goto out;
279 
280 		ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
281 				       QMI_WLFW_BDF_DOWNLOAD_REQ_V01,
282 				       WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN,
283 				       wlfw_bdf_download_req_msg_v01_ei, req);
284 		if (ret < 0) {
285 			qmi_txn_cancel(&txn);
286 			goto out;
287 		}
288 
289 		ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
290 
291 		if (ret < 0)
292 			goto out;
293 
294 		/* end = 1 triggers a CRC check on the BDF.  If this fails, we
295 		 * get a QMI_ERR_MALFORMED_MSG_V01 error, but the FW is still
296 		 * willing to use the BDF.  For some platforms, all the valid
297 		 * released BDFs fail this CRC check, so attempt to detect this
298 		 * scenario and treat it as non-fatal.
299 		 */
300 		if (resp.resp.result != QMI_RESULT_SUCCESS_V01 &&
301 		    !(req->end == 1 &&
302 		      resp.resp.result == QMI_ERR_MALFORMED_MSG_V01)) {
303 			ath10k_err(ar, "failed to download board data file: %d\n",
304 				   resp.resp.error);
305 			ret = -EINVAL;
306 			goto out;
307 		}
308 
309 		remaining -= req->data_len;
310 		temp += req->data_len;
311 		req->seg_id++;
312 	}
313 
314 	ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi bdf download request completed\n");
315 
316 	kfree(req);
317 	return 0;
318 
319 out:
320 	kfree(req);
321 	return ret;
322 }
323 
ath10k_qmi_send_cal_report_req(struct ath10k_qmi * qmi)324 static int ath10k_qmi_send_cal_report_req(struct ath10k_qmi *qmi)
325 {
326 	struct wlfw_cal_report_resp_msg_v01 resp = {};
327 	struct wlfw_cal_report_req_msg_v01 req = {};
328 	struct ath10k *ar = qmi->ar;
329 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
330 	struct qmi_txn txn;
331 	int i, j = 0;
332 	int ret;
333 
334 	if (ar_snoc->xo_cal_supported) {
335 		req.xo_cal_data_valid = 1;
336 		req.xo_cal_data = ar_snoc->xo_cal_data;
337 	}
338 
339 	ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_cal_report_resp_msg_v01_ei,
340 			   &resp);
341 	if (ret < 0)
342 		goto out;
343 
344 	for (i = 0; i < QMI_WLFW_MAX_NUM_CAL_V01; i++) {
345 		if (qmi->cal_data[i].total_size &&
346 		    qmi->cal_data[i].data) {
347 			req.meta_data[j] = qmi->cal_data[i].cal_id;
348 			j++;
349 		}
350 	}
351 	req.meta_data_len = j;
352 
353 	ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
354 			       QMI_WLFW_CAL_REPORT_REQ_V01,
355 			       WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN,
356 			       wlfw_cal_report_req_msg_v01_ei, &req);
357 	if (ret < 0) {
358 		qmi_txn_cancel(&txn);
359 		ath10k_err(ar, "failed to send calibration request: %d\n", ret);
360 		goto out;
361 	}
362 
363 	ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
364 	if (ret < 0)
365 		goto out;
366 
367 	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
368 		ath10k_err(ar, "calibration request rejected: %d\n", resp.resp.error);
369 		ret = -EINVAL;
370 		goto out;
371 	}
372 
373 	ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi cal report request completed\n");
374 	return 0;
375 
376 out:
377 	return ret;
378 }
379 
380 static int
ath10k_qmi_mode_send_sync_msg(struct ath10k * ar,enum wlfw_driver_mode_enum_v01 mode)381 ath10k_qmi_mode_send_sync_msg(struct ath10k *ar, enum wlfw_driver_mode_enum_v01 mode)
382 {
383 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
384 	struct ath10k_qmi *qmi = ar_snoc->qmi;
385 	struct wlfw_wlan_mode_resp_msg_v01 resp = {};
386 	struct wlfw_wlan_mode_req_msg_v01 req = {};
387 	struct qmi_txn txn;
388 	int ret;
389 
390 	ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
391 			   wlfw_wlan_mode_resp_msg_v01_ei,
392 			   &resp);
393 	if (ret < 0)
394 		goto out;
395 
396 	req.mode = mode;
397 	req.hw_debug_valid = 1;
398 	req.hw_debug = 0;
399 
400 	ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
401 			       QMI_WLFW_WLAN_MODE_REQ_V01,
402 			       WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN,
403 			       wlfw_wlan_mode_req_msg_v01_ei, &req);
404 	if (ret < 0) {
405 		qmi_txn_cancel(&txn);
406 		ath10k_err(ar, "failed to send wlan mode %d request: %d\n", mode, ret);
407 		goto out;
408 	}
409 
410 	ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
411 	if (ret < 0)
412 		goto out;
413 
414 	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
415 		ath10k_err(ar, "more request rejected: %d\n", resp.resp.error);
416 		ret = -EINVAL;
417 		goto out;
418 	}
419 
420 	ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi wlan mode req completed: %d\n", mode);
421 	return 0;
422 
423 out:
424 	return ret;
425 }
426 
427 static int
ath10k_qmi_cfg_send_sync_msg(struct ath10k * ar,struct ath10k_qmi_wlan_enable_cfg * config,const char * version)428 ath10k_qmi_cfg_send_sync_msg(struct ath10k *ar,
429 			     struct ath10k_qmi_wlan_enable_cfg *config,
430 			     const char *version)
431 {
432 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
433 	struct ath10k_qmi *qmi = ar_snoc->qmi;
434 	struct wlfw_wlan_cfg_resp_msg_v01 resp = {};
435 	struct wlfw_wlan_cfg_req_msg_v01 *req;
436 	struct qmi_txn txn;
437 	int ret;
438 	u32 i;
439 
440 	req = kzalloc(sizeof(*req), GFP_KERNEL);
441 	if (!req)
442 		return -ENOMEM;
443 
444 	ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
445 			   wlfw_wlan_cfg_resp_msg_v01_ei,
446 			   &resp);
447 	if (ret < 0)
448 		goto out;
449 
450 	req->host_version_valid = 0;
451 
452 	req->tgt_cfg_valid = 1;
453 	if (config->num_ce_tgt_cfg > QMI_WLFW_MAX_NUM_CE_V01)
454 		req->tgt_cfg_len = QMI_WLFW_MAX_NUM_CE_V01;
455 	else
456 		req->tgt_cfg_len = config->num_ce_tgt_cfg;
457 	for (i = 0; i < req->tgt_cfg_len; i++) {
458 		req->tgt_cfg[i].pipe_num = config->ce_tgt_cfg[i].pipe_num;
459 		req->tgt_cfg[i].pipe_dir = config->ce_tgt_cfg[i].pipe_dir;
460 		req->tgt_cfg[i].nentries = config->ce_tgt_cfg[i].nentries;
461 		req->tgt_cfg[i].nbytes_max = config->ce_tgt_cfg[i].nbytes_max;
462 		req->tgt_cfg[i].flags = config->ce_tgt_cfg[i].flags;
463 	}
464 
465 	req->svc_cfg_valid = 1;
466 	if (config->num_ce_svc_pipe_cfg > QMI_WLFW_MAX_NUM_SVC_V01)
467 		req->svc_cfg_len = QMI_WLFW_MAX_NUM_SVC_V01;
468 	else
469 		req->svc_cfg_len = config->num_ce_svc_pipe_cfg;
470 	for (i = 0; i < req->svc_cfg_len; i++) {
471 		req->svc_cfg[i].service_id = config->ce_svc_cfg[i].service_id;
472 		req->svc_cfg[i].pipe_dir = config->ce_svc_cfg[i].pipe_dir;
473 		req->svc_cfg[i].pipe_num = config->ce_svc_cfg[i].pipe_num;
474 	}
475 
476 	req->shadow_reg_valid = 1;
477 	if (config->num_shadow_reg_cfg >
478 	    QMI_WLFW_MAX_NUM_SHADOW_REG_V01)
479 		req->shadow_reg_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V01;
480 	else
481 		req->shadow_reg_len = config->num_shadow_reg_cfg;
482 
483 	memcpy(req->shadow_reg, config->shadow_reg_cfg,
484 	       sizeof(struct wlfw_shadow_reg_cfg_s_v01) * req->shadow_reg_len);
485 
486 	ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
487 			       QMI_WLFW_WLAN_CFG_REQ_V01,
488 			       WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN,
489 			       wlfw_wlan_cfg_req_msg_v01_ei, req);
490 	if (ret < 0) {
491 		qmi_txn_cancel(&txn);
492 		ath10k_err(ar, "failed to send config request: %d\n", ret);
493 		goto out;
494 	}
495 
496 	ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
497 	if (ret < 0)
498 		goto out;
499 
500 	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
501 		ath10k_err(ar, "config request rejected: %d\n", resp.resp.error);
502 		ret = -EINVAL;
503 		goto out;
504 	}
505 
506 	ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi config request completed\n");
507 	kfree(req);
508 	return 0;
509 
510 out:
511 	kfree(req);
512 	return ret;
513 }
514 
ath10k_qmi_wlan_enable(struct ath10k * ar,struct ath10k_qmi_wlan_enable_cfg * config,enum wlfw_driver_mode_enum_v01 mode,const char * version)515 int ath10k_qmi_wlan_enable(struct ath10k *ar,
516 			   struct ath10k_qmi_wlan_enable_cfg *config,
517 			   enum wlfw_driver_mode_enum_v01 mode,
518 			   const char *version)
519 {
520 	int ret;
521 
522 	ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi mode %d config %p\n",
523 		   mode, config);
524 
525 	ret = ath10k_qmi_cfg_send_sync_msg(ar, config, version);
526 	if (ret) {
527 		ath10k_err(ar, "failed to send qmi config: %d\n", ret);
528 		return ret;
529 	}
530 
531 	ret = ath10k_qmi_mode_send_sync_msg(ar, mode);
532 	if (ret) {
533 		ath10k_err(ar, "failed to send qmi mode: %d\n", ret);
534 		return ret;
535 	}
536 
537 	return 0;
538 }
539 
ath10k_qmi_wlan_disable(struct ath10k * ar)540 int ath10k_qmi_wlan_disable(struct ath10k *ar)
541 {
542 	return ath10k_qmi_mode_send_sync_msg(ar, QMI_WLFW_OFF_V01);
543 }
544 
ath10k_qmi_add_wlan_ver_smem(struct ath10k * ar,const char * fw_build_id)545 static void ath10k_qmi_add_wlan_ver_smem(struct ath10k *ar, const char *fw_build_id)
546 {
547 	u8 *table_ptr;
548 	size_t smem_item_size;
549 	const u32 smem_img_idx_wlan = SMEM_IMAGE_TABLE_CNSS_INDEX *
550 				      SMEM_IMAGE_VERSION_ENTRY_SIZE;
551 
552 	table_ptr = qcom_smem_get(QCOM_SMEM_HOST_ANY,
553 				  SMEM_IMAGE_VERSION_TABLE,
554 				  &smem_item_size);
555 
556 	if (IS_ERR(table_ptr)) {
557 		ath10k_err(ar, "smem image version table not found\n");
558 		return;
559 	}
560 
561 	if (smem_img_idx_wlan + SMEM_IMAGE_VERSION_ENTRY_SIZE >
562 	    smem_item_size) {
563 		ath10k_err(ar, "smem block size too small: %zu\n",
564 			   smem_item_size);
565 		return;
566 	}
567 
568 	strscpy(table_ptr + smem_img_idx_wlan, fw_build_id,
569 		SMEM_IMAGE_VERSION_NAME_SIZE);
570 }
571 
ath10k_qmi_cap_send_sync_msg(struct ath10k_qmi * qmi)572 static int ath10k_qmi_cap_send_sync_msg(struct ath10k_qmi *qmi)
573 {
574 	struct wlfw_cap_resp_msg_v01 *resp;
575 	struct wlfw_cap_req_msg_v01 req = {};
576 	struct ath10k *ar = qmi->ar;
577 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
578 	struct qmi_txn txn;
579 	int ret;
580 
581 	resp = kzalloc(sizeof(*resp), GFP_KERNEL);
582 	if (!resp)
583 		return -ENOMEM;
584 
585 	ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_cap_resp_msg_v01_ei, resp);
586 	if (ret < 0)
587 		goto out;
588 
589 	ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
590 			       QMI_WLFW_CAP_REQ_V01,
591 			       WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN,
592 			       wlfw_cap_req_msg_v01_ei, &req);
593 	if (ret < 0) {
594 		qmi_txn_cancel(&txn);
595 		ath10k_err(ar, "failed to send capability request: %d\n", ret);
596 		goto out;
597 	}
598 
599 	ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
600 	if (ret < 0)
601 		goto out;
602 
603 	if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
604 		ath10k_err(ar, "capability req rejected: %d\n", resp->resp.error);
605 		ret = -EINVAL;
606 		goto out;
607 	}
608 
609 	if (resp->chip_info_valid) {
610 		qmi->chip_info.chip_id = resp->chip_info.chip_id;
611 		qmi->chip_info.chip_family = resp->chip_info.chip_family;
612 	} else {
613 		qmi->chip_info.chip_id = 0xFF;
614 	}
615 
616 	if (resp->board_info_valid)
617 		qmi->board_info.board_id = resp->board_info.board_id;
618 	else
619 		qmi->board_info.board_id = 0xFF;
620 
621 	if (resp->soc_info_valid)
622 		qmi->soc_info.soc_id = resp->soc_info.soc_id;
623 
624 	if (resp->fw_version_info_valid) {
625 		qmi->fw_version = resp->fw_version_info.fw_version;
626 		strscpy(qmi->fw_build_timestamp, resp->fw_version_info.fw_build_timestamp,
627 			sizeof(qmi->fw_build_timestamp));
628 	}
629 
630 	if (resp->fw_build_id_valid)
631 		strscpy(qmi->fw_build_id, resp->fw_build_id,
632 			MAX_BUILD_ID_LEN + 1);
633 
634 	if (!test_bit(ATH10K_SNOC_FLAG_REGISTERED, &ar_snoc->flags)) {
635 		ath10k_info(ar, "qmi chip_id 0x%x chip_family 0x%x board_id 0x%x soc_id 0x%x",
636 			    qmi->chip_info.chip_id, qmi->chip_info.chip_family,
637 			    qmi->board_info.board_id, qmi->soc_info.soc_id);
638 		ath10k_info(ar, "qmi fw_version 0x%x fw_build_timestamp %s fw_build_id %s",
639 			    qmi->fw_version, qmi->fw_build_timestamp, qmi->fw_build_id);
640 	}
641 
642 	if (resp->fw_build_id_valid)
643 		ath10k_qmi_add_wlan_ver_smem(ar, qmi->fw_build_id);
644 
645 	kfree(resp);
646 	return 0;
647 
648 out:
649 	kfree(resp);
650 	return ret;
651 }
652 
ath10k_qmi_host_cap_send_sync(struct ath10k_qmi * qmi)653 static int ath10k_qmi_host_cap_send_sync(struct ath10k_qmi *qmi)
654 {
655 	struct wlfw_host_cap_resp_msg_v01 resp = {};
656 	struct wlfw_host_cap_req_msg_v01 req = {};
657 	const struct qmi_elem_info *req_ei;
658 	struct ath10k *ar = qmi->ar;
659 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
660 	struct qmi_txn txn;
661 	int ret;
662 
663 	req.daemon_support_valid = 1;
664 	req.daemon_support = 0;
665 
666 	ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_host_cap_resp_msg_v01_ei,
667 			   &resp);
668 	if (ret < 0)
669 		goto out;
670 
671 	if (test_bit(ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK, &ar_snoc->flags))
672 		req_ei = wlfw_host_cap_8bit_req_msg_v01_ei;
673 	else
674 		req_ei = wlfw_host_cap_req_msg_v01_ei;
675 
676 	ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
677 			       QMI_WLFW_HOST_CAP_REQ_V01,
678 			       WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN,
679 			       req_ei, &req);
680 	if (ret < 0) {
681 		qmi_txn_cancel(&txn);
682 		ath10k_err(ar, "failed to send host capability request: %d\n", ret);
683 		goto out;
684 	}
685 
686 	ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
687 	if (ret < 0)
688 		goto out;
689 
690 	/* older FW didn't support this request, which is not fatal */
691 	if (resp.resp.result != QMI_RESULT_SUCCESS_V01 &&
692 	    resp.resp.error != QMI_ERR_NOT_SUPPORTED_V01) {
693 		ath10k_err(ar, "host capability request rejected: %d\n", resp.resp.error);
694 		ret = -EINVAL;
695 		goto out;
696 	}
697 
698 	ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi host capability request completed\n");
699 	return 0;
700 
701 out:
702 	return ret;
703 }
704 
ath10k_qmi_set_fw_log_mode(struct ath10k * ar,u8 fw_log_mode)705 int ath10k_qmi_set_fw_log_mode(struct ath10k *ar, u8 fw_log_mode)
706 {
707 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
708 	struct wlfw_ini_resp_msg_v01 resp = {};
709 	struct ath10k_qmi *qmi = ar_snoc->qmi;
710 	struct wlfw_ini_req_msg_v01 req = {};
711 	struct qmi_txn txn;
712 	int ret;
713 
714 	req.enablefwlog_valid = 1;
715 	req.enablefwlog = fw_log_mode;
716 
717 	ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_ini_resp_msg_v01_ei,
718 			   &resp);
719 	if (ret < 0)
720 		goto out;
721 
722 	ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
723 			       QMI_WLFW_INI_REQ_V01,
724 			       WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN,
725 			       wlfw_ini_req_msg_v01_ei, &req);
726 	if (ret < 0) {
727 		qmi_txn_cancel(&txn);
728 		ath10k_err(ar, "failed to send fw log request: %d\n", ret);
729 		goto out;
730 	}
731 
732 	ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
733 	if (ret < 0)
734 		goto out;
735 
736 	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
737 		ath10k_err(ar, "fw log request rejected: %d\n",
738 			   resp.resp.error);
739 		ret = -EINVAL;
740 		goto out;
741 	}
742 	ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi fw log request completed, mode: %d\n",
743 		   fw_log_mode);
744 	return 0;
745 
746 out:
747 	return ret;
748 }
749 
750 static int
ath10k_qmi_ind_register_send_sync_msg(struct ath10k_qmi * qmi)751 ath10k_qmi_ind_register_send_sync_msg(struct ath10k_qmi *qmi)
752 {
753 	struct wlfw_ind_register_resp_msg_v01 resp = {};
754 	struct wlfw_ind_register_req_msg_v01 req = {};
755 	struct ath10k *ar = qmi->ar;
756 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
757 	struct qmi_txn txn;
758 	int ret;
759 
760 	req.client_id_valid = 1;
761 	req.client_id = ATH10K_QMI_CLIENT_ID;
762 	req.fw_ready_enable_valid = 1;
763 	req.fw_ready_enable = 1;
764 	req.msa_ready_enable_valid = 1;
765 	req.msa_ready_enable = 1;
766 
767 	if (ar_snoc->xo_cal_supported) {
768 		req.xo_cal_enable_valid = 1;
769 		req.xo_cal_enable = 1;
770 	}
771 
772 	ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
773 			   wlfw_ind_register_resp_msg_v01_ei, &resp);
774 	if (ret < 0)
775 		goto out;
776 
777 	ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
778 			       QMI_WLFW_IND_REGISTER_REQ_V01,
779 			       WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN,
780 			       wlfw_ind_register_req_msg_v01_ei, &req);
781 	if (ret < 0) {
782 		qmi_txn_cancel(&txn);
783 		ath10k_err(ar, "failed to send indication registered request: %d\n", ret);
784 		goto out;
785 	}
786 
787 	ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
788 	if (ret < 0)
789 		goto out;
790 
791 	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
792 		ath10k_err(ar, "indication request rejected: %d\n", resp.resp.error);
793 		ret = -EINVAL;
794 		goto out;
795 	}
796 
797 	if (resp.fw_status_valid) {
798 		if (resp.fw_status & QMI_WLFW_FW_READY_V01)
799 			qmi->fw_ready = true;
800 	}
801 	ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi indication register request completed\n");
802 	return 0;
803 
804 out:
805 	return ret;
806 }
807 
ath10k_qmi_event_server_arrive(struct ath10k_qmi * qmi)808 static void ath10k_qmi_event_server_arrive(struct ath10k_qmi *qmi)
809 {
810 	struct ath10k *ar = qmi->ar;
811 	int ret;
812 
813 	ret = ath10k_qmi_ind_register_send_sync_msg(qmi);
814 	if (ret)
815 		return;
816 
817 	if (qmi->fw_ready) {
818 		ath10k_snoc_fw_indication(ar, ATH10K_QMI_EVENT_FW_READY_IND);
819 		return;
820 	}
821 
822 	ret = ath10k_qmi_host_cap_send_sync(qmi);
823 	if (ret)
824 		return;
825 
826 	ret = ath10k_qmi_msa_mem_info_send_sync_msg(qmi);
827 	if (ret)
828 		return;
829 
830 	/*
831 	 * HACK: sleep for a while between receiving the msa info response
832 	 * and the XPU update to prevent SDM845 from crashing due to a security
833 	 * violation, when running MPSS.AT.4.0.c2-01184-SDM845_GEN_PACK-1.
834 	 */
835 	msleep(20);
836 
837 	ret = ath10k_qmi_setup_msa_permissions(qmi);
838 	if (ret)
839 		return;
840 
841 	ret = ath10k_qmi_msa_ready_send_sync_msg(qmi);
842 	if (ret)
843 		goto err_setup_msa;
844 
845 	ret = ath10k_qmi_cap_send_sync_msg(qmi);
846 	if (ret)
847 		goto err_setup_msa;
848 
849 	return;
850 
851 err_setup_msa:
852 	ath10k_qmi_remove_msa_permission(qmi);
853 }
854 
ath10k_qmi_fetch_board_file(struct ath10k_qmi * qmi)855 static int ath10k_qmi_fetch_board_file(struct ath10k_qmi *qmi)
856 {
857 	struct ath10k *ar = qmi->ar;
858 	int ret;
859 
860 	ar->hif.bus = ATH10K_BUS_SNOC;
861 	ar->id.qmi_ids_valid = true;
862 	ar->id.qmi_board_id = qmi->board_info.board_id;
863 	ar->id.qmi_chip_id = qmi->chip_info.chip_id;
864 	ar->hw_params.fw.dir = WCN3990_HW_1_0_FW_DIR;
865 
866 	ret = ath10k_core_check_dt(ar);
867 	if (ret)
868 		ath10k_dbg(ar, ATH10K_DBG_QMI, "DT bdf variant name not set.\n");
869 
870 	return ath10k_core_fetch_board_file(qmi->ar, ATH10K_BD_IE_BOARD);
871 }
872 
873 static int
ath10k_qmi_driver_event_post(struct ath10k_qmi * qmi,enum ath10k_qmi_driver_event_type type,void * data)874 ath10k_qmi_driver_event_post(struct ath10k_qmi *qmi,
875 			     enum ath10k_qmi_driver_event_type type,
876 			     void *data)
877 {
878 	struct ath10k_qmi_driver_event *event;
879 
880 	event = kzalloc(sizeof(*event), GFP_ATOMIC);
881 	if (!event)
882 		return -ENOMEM;
883 
884 	event->type = type;
885 	event->data = data;
886 
887 	spin_lock(&qmi->event_lock);
888 	list_add_tail(&event->list, &qmi->event_list);
889 	spin_unlock(&qmi->event_lock);
890 
891 	queue_work(qmi->event_wq, &qmi->event_work);
892 
893 	return 0;
894 }
895 
ath10k_qmi_event_server_exit(struct ath10k_qmi * qmi)896 static void ath10k_qmi_event_server_exit(struct ath10k_qmi *qmi)
897 {
898 	struct ath10k *ar = qmi->ar;
899 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
900 
901 	ath10k_qmi_remove_msa_permission(qmi);
902 	ath10k_core_free_board_files(ar);
903 	if (!test_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags) &&
904 	    !test_bit(ATH10K_SNOC_FLAG_MODEM_STOPPED, &ar_snoc->flags))
905 		ath10k_snoc_fw_crashed_dump(ar);
906 
907 	ath10k_snoc_fw_indication(ar, ATH10K_QMI_EVENT_FW_DOWN_IND);
908 	ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw qmi service disconnected\n");
909 }
910 
ath10k_qmi_event_msa_ready(struct ath10k_qmi * qmi)911 static void ath10k_qmi_event_msa_ready(struct ath10k_qmi *qmi)
912 {
913 	int ret;
914 
915 	ret = ath10k_qmi_fetch_board_file(qmi);
916 	if (ret)
917 		goto out;
918 
919 	ret = ath10k_qmi_bdf_dnld_send_sync(qmi);
920 	if (ret)
921 		goto out;
922 
923 	ret = ath10k_qmi_send_cal_report_req(qmi);
924 
925 out:
926 	return;
927 }
928 
ath10k_qmi_event_fw_ready_ind(struct ath10k_qmi * qmi)929 static int ath10k_qmi_event_fw_ready_ind(struct ath10k_qmi *qmi)
930 {
931 	struct ath10k *ar = qmi->ar;
932 
933 	ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw ready event received\n");
934 	ath10k_snoc_fw_indication(ar, ATH10K_QMI_EVENT_FW_READY_IND);
935 
936 	return 0;
937 }
938 
ath10k_qmi_fw_ready_ind(struct qmi_handle * qmi_hdl,struct sockaddr_qrtr * sq,struct qmi_txn * txn,const void * data)939 static void ath10k_qmi_fw_ready_ind(struct qmi_handle *qmi_hdl,
940 				    struct sockaddr_qrtr *sq,
941 				    struct qmi_txn *txn, const void *data)
942 {
943 	struct ath10k_qmi *qmi = container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl);
944 
945 	ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_FW_READY_IND, NULL);
946 }
947 
ath10k_qmi_msa_ready_ind(struct qmi_handle * qmi_hdl,struct sockaddr_qrtr * sq,struct qmi_txn * txn,const void * data)948 static void ath10k_qmi_msa_ready_ind(struct qmi_handle *qmi_hdl,
949 				     struct sockaddr_qrtr *sq,
950 				     struct qmi_txn *txn, const void *data)
951 {
952 	struct ath10k_qmi *qmi = container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl);
953 
954 	ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_MSA_READY_IND, NULL);
955 }
956 
957 static const struct qmi_msg_handler qmi_msg_handler[] = {
958 	{
959 		.type = QMI_INDICATION,
960 		.msg_id = QMI_WLFW_FW_READY_IND_V01,
961 		.ei = wlfw_fw_ready_ind_msg_v01_ei,
962 		.decoded_size = sizeof(struct wlfw_fw_ready_ind_msg_v01),
963 		.fn = ath10k_qmi_fw_ready_ind,
964 	},
965 	{
966 		.type = QMI_INDICATION,
967 		.msg_id = QMI_WLFW_MSA_READY_IND_V01,
968 		.ei = wlfw_msa_ready_ind_msg_v01_ei,
969 		.decoded_size = sizeof(struct wlfw_msa_ready_ind_msg_v01),
970 		.fn = ath10k_qmi_msa_ready_ind,
971 	},
972 	{}
973 };
974 
ath10k_qmi_new_server(struct qmi_handle * qmi_hdl,struct qmi_service * service)975 static int ath10k_qmi_new_server(struct qmi_handle *qmi_hdl,
976 				 struct qmi_service *service)
977 {
978 	struct ath10k_qmi *qmi = container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl);
979 	struct sockaddr_qrtr *sq = &qmi->sq;
980 	struct ath10k *ar = qmi->ar;
981 	int ret;
982 
983 	sq->sq_family = AF_QIPCRTR;
984 	sq->sq_node = service->node;
985 	sq->sq_port = service->port;
986 
987 	ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw qmi service found\n");
988 
989 	ret = kernel_connect(qmi_hdl->sock, (struct sockaddr *)&qmi->sq,
990 			     sizeof(qmi->sq), 0);
991 	if (ret) {
992 		ath10k_err(ar, "failed to connect to a remote QMI service port\n");
993 		return ret;
994 	}
995 
996 	ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi wifi fw qmi service connected\n");
997 	ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_SERVER_ARRIVE, NULL);
998 
999 	return ret;
1000 }
1001 
ath10k_qmi_del_server(struct qmi_handle * qmi_hdl,struct qmi_service * service)1002 static void ath10k_qmi_del_server(struct qmi_handle *qmi_hdl,
1003 				  struct qmi_service *service)
1004 {
1005 	struct ath10k_qmi *qmi =
1006 		container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl);
1007 
1008 	qmi->fw_ready = false;
1009 
1010 	/*
1011 	 * The del_server event is to be processed only if coming from
1012 	 * the qmi server. The qmi infrastructure sends del_server, when
1013 	 * any client releases the qmi handle. In this case do not process
1014 	 * this del_server event.
1015 	 */
1016 	if (qmi->state == ATH10K_QMI_STATE_INIT_DONE)
1017 		ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_SERVER_EXIT,
1018 					     NULL);
1019 }
1020 
1021 static const struct qmi_ops ath10k_qmi_ops = {
1022 	.new_server = ath10k_qmi_new_server,
1023 	.del_server = ath10k_qmi_del_server,
1024 };
1025 
ath10k_qmi_driver_event_work(struct work_struct * work)1026 static void ath10k_qmi_driver_event_work(struct work_struct *work)
1027 {
1028 	struct ath10k_qmi *qmi = container_of(work, struct ath10k_qmi,
1029 					      event_work);
1030 	struct ath10k_qmi_driver_event *event;
1031 	struct ath10k *ar = qmi->ar;
1032 
1033 	spin_lock(&qmi->event_lock);
1034 	while (!list_empty(&qmi->event_list)) {
1035 		event = list_first_entry(&qmi->event_list,
1036 					 struct ath10k_qmi_driver_event, list);
1037 		list_del(&event->list);
1038 		spin_unlock(&qmi->event_lock);
1039 
1040 		switch (event->type) {
1041 		case ATH10K_QMI_EVENT_SERVER_ARRIVE:
1042 			ath10k_qmi_event_server_arrive(qmi);
1043 			if (qmi->no_msa_ready_indicator) {
1044 				ath10k_info(ar, "qmi not waiting for msa_ready indicator");
1045 				ath10k_qmi_event_msa_ready(qmi);
1046 			}
1047 			break;
1048 		case ATH10K_QMI_EVENT_SERVER_EXIT:
1049 			ath10k_qmi_event_server_exit(qmi);
1050 			break;
1051 		case ATH10K_QMI_EVENT_FW_READY_IND:
1052 			ath10k_qmi_event_fw_ready_ind(qmi);
1053 			break;
1054 		case ATH10K_QMI_EVENT_MSA_READY_IND:
1055 			if (qmi->no_msa_ready_indicator) {
1056 				ath10k_warn(ar, "qmi unexpected msa_ready indicator");
1057 				break;
1058 			}
1059 			ath10k_qmi_event_msa_ready(qmi);
1060 			break;
1061 		default:
1062 			ath10k_warn(ar, "invalid event type: %d", event->type);
1063 			break;
1064 		}
1065 		kfree(event);
1066 		spin_lock(&qmi->event_lock);
1067 	}
1068 	spin_unlock(&qmi->event_lock);
1069 }
1070 
ath10k_qmi_init(struct ath10k * ar,u32 msa_size)1071 int ath10k_qmi_init(struct ath10k *ar, u32 msa_size)
1072 {
1073 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1074 	struct device *dev = ar->dev;
1075 	struct ath10k_qmi *qmi;
1076 	int ret;
1077 
1078 	qmi = kzalloc(sizeof(*qmi), GFP_KERNEL);
1079 	if (!qmi)
1080 		return -ENOMEM;
1081 
1082 	qmi->ar = ar;
1083 	ar_snoc->qmi = qmi;
1084 
1085 	if (of_property_read_bool(dev->of_node, "qcom,msa-fixed-perm"))
1086 		qmi->msa_fixed_perm = true;
1087 
1088 	if (of_property_read_bool(dev->of_node, "qcom,no-msa-ready-indicator"))
1089 		qmi->no_msa_ready_indicator = true;
1090 
1091 	ret = qmi_handle_init(&qmi->qmi_hdl,
1092 			      WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN,
1093 			      &ath10k_qmi_ops, qmi_msg_handler);
1094 	if (ret)
1095 		goto err;
1096 
1097 	qmi->event_wq = alloc_ordered_workqueue("ath10k_qmi_driver_event", 0);
1098 	if (!qmi->event_wq) {
1099 		ath10k_err(ar, "failed to allocate workqueue\n");
1100 		ret = -EFAULT;
1101 		goto err_release_qmi_handle;
1102 	}
1103 
1104 	INIT_LIST_HEAD(&qmi->event_list);
1105 	spin_lock_init(&qmi->event_lock);
1106 	INIT_WORK(&qmi->event_work, ath10k_qmi_driver_event_work);
1107 
1108 	ret = qmi_add_lookup(&qmi->qmi_hdl, WLFW_SERVICE_ID_V01,
1109 			     WLFW_SERVICE_VERS_V01, 0);
1110 	if (ret)
1111 		goto err_qmi_lookup;
1112 
1113 	qmi->state = ATH10K_QMI_STATE_INIT_DONE;
1114 	return 0;
1115 
1116 err_qmi_lookup:
1117 	destroy_workqueue(qmi->event_wq);
1118 
1119 err_release_qmi_handle:
1120 	qmi_handle_release(&qmi->qmi_hdl);
1121 
1122 err:
1123 	kfree(qmi);
1124 	return ret;
1125 }
1126 
ath10k_qmi_deinit(struct ath10k * ar)1127 int ath10k_qmi_deinit(struct ath10k *ar)
1128 {
1129 	struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
1130 	struct ath10k_qmi *qmi = ar_snoc->qmi;
1131 
1132 	qmi->state = ATH10K_QMI_STATE_DEINIT;
1133 	qmi_handle_release(&qmi->qmi_hdl);
1134 	cancel_work_sync(&qmi->event_work);
1135 	destroy_workqueue(qmi->event_wq);
1136 	kfree(qmi);
1137 	ar_snoc->qmi = NULL;
1138 
1139 	return 0;
1140 }
1141