Lines Matching defs:ab

2023 	struct ath12k_base *ab, *partner_ab;
2028 ab = ag->ab[i];
2031 partner_ab = ag->ab[j];
2033 if (partner_ab->wsi_info.index >= ab->wsi_info.index)
2039 ab->wsi_info.hw_link_id_base = hw_id_base;
2045 static int ath12k_host_cap_parse_mlo(struct ath12k_base *ab,
2049 struct ath12k_hw_group *ag = ab->ag;
2055 ath12k_dbg(ab, ATH12K_DBG_QMI,
2060 if (!ab->qmi.num_radios || ab->qmi.num_radios == U8_MAX) {
2062 ath12k_dbg(ab, ATH12K_DBG_QMI,
2064 ab->qmi.num_radios);
2068 if (ab->device_id == ATH12K_INVALID_DEVICE_ID) {
2069 ath12k_err(ab, "failed to send MLO cap due to invalid device id\n");
2076 req->mlo_chip_id = ab->device_id;
2083 req->max_mlo_peer = ab->hw_params->max_mlo_peer;
2087 ath12k_dbg(ab, ATH12K_DBG_QMI, "mlo capability advertisement device_id %d group_id %d num_devices %d",
2097 partner_ab = ag->ab[i];
2100 ath12k_err(ab, "failed to send MLO cap due to invalid partner device id\n");
2108 ath12k_dbg(ab, ATH12K_DBG_QMI, "mlo device id %d num_link %d\n",
2115 ath12k_dbg(ab, ATH12K_DBG_QMI, "mlo hw_link_id %d\n",
2159 int ath12k_qmi_host_cap_send(struct ath12k_base *ab)
2168 req.mem_cfg_mode = ab->qmi.target_mem_mode;
2173 if (ab->hw_params->fw.m3_loader == ath12k_m3_fw_loader_driver) {
2181 req.cal_done = ab->qmi.cal_done;
2183 if (ab->hw_params->qmi_cnss_feature_bitmap) {
2185 req.feature_list = ab->hw_params->qmi_cnss_feature_bitmap;
2191 if (ab->hw_params->internal_sleep_clock) {
2206 ret = ath12k_host_cap_parse_mlo(ab, &req);
2210 ret = qmi_txn_init(&ab->qmi.handle, &txn,
2215 ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
2221 ath12k_warn(ab, "Failed to send host capability request,err = %d\n", ret);
2230 ath12k_warn(ab, "Host capability request failed, result: %d, err: %d\n",
2240 static void ath12k_qmi_phy_cap_send(struct ath12k_base *ab)
2247 ret = qmi_txn_init(&ab->qmi.handle, &txn,
2252 ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
2258 ath12k_warn(ab, "failed to send phy capability request: %d\n", ret);
2272 ab->single_chip_mlo_support = true;
2279 ab->qmi.num_radios = resp.num_phy;
2281 ath12k_dbg(ab, ATH12K_DBG_QMI,
2291 ab->qmi.num_radios = ab->hw_params->def_num_link;
2293 ath12k_dbg(ab, ATH12K_DBG_QMI,
2295 ab->qmi.num_radios);
2298 static int ath12k_qmi_fw_ind_register_send(struct ath12k_base *ab)
2302 struct qmi_handle *handle = &ab->qmi.handle;
2337 ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
2343 ath12k_warn(ab, "Failed to send indication register request, err = %d\n",
2350 ath12k_warn(ab, "failed to register fw indication %d\n", ret);
2355 ath12k_warn(ab, "FW Ind register request failed, result: %d, err: %d\n",
2370 int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab)
2387 if (!test_bit(ATH12K_FLAG_FIXED_MEM_REGION, &ab->dev_flags) &&
2388 ab->qmi.target_mem_delayed) {
2390 ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi delays mem_request %d\n",
2391 ab->qmi.mem_seg_count);
2394 req->mem_seg_len = ab->qmi.mem_seg_count;
2396 req->mem_seg[i].addr = ab->qmi.target_mem[i].paddr;
2397 req->mem_seg[i].size = ab->qmi.target_mem[i].size;
2398 req->mem_seg[i].type = ab->qmi.target_mem[i].type;
2399 ath12k_dbg(ab, ATH12K_DBG_QMI,
2401 &ab->qmi.target_mem[i].paddr,
2402 ab->qmi.target_mem[i].size,
2403 ab->qmi.target_mem[i].type);
2407 ret = qmi_txn_init(&ab->qmi.handle, &txn,
2412 ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
2418 ath12k_warn(ab, "qmi failed to respond memory request, err = %d\n",
2425 ath12k_warn(ab, "qmi failed memory request, err = %d\n", ret);
2436 ath12k_warn(ab, "Respond mem req failed, result: %d, err: %d\n",
2468 static void ath12k_qmi_free_mlo_mem_chunk(struct ath12k_base *ab,
2472 struct ath12k_hw_group *ag = ab->ag;
2482 ath12k_warn(ab, "invalid index for MLO memory chunk free: %d\n", idx);
2486 fixed_mem = test_bit(ATH12K_FLAG_FIXED_MEM_REGION, &ab->dev_flags);
2493 dma_free_coherent(ab->dev,
2510 static void ath12k_qmi_free_target_mem_chunk(struct ath12k_base *ab)
2512 struct ath12k_hw_group *ag = ab->ag;
2515 for (i = 0, mlo_idx = 0; i < ab->qmi.mem_seg_count; i++) {
2516 if (ab->qmi.target_mem[i].type == MLO_GLOBAL_MEM_REGION_TYPE) {
2517 ath12k_qmi_free_mlo_mem_chunk(ab,
2518 &ab->qmi.target_mem[i],
2521 if (test_bit(ATH12K_FLAG_FIXED_MEM_REGION, &ab->dev_flags) &&
2522 ab->qmi.target_mem[i].v.ioaddr) {
2523 iounmap(ab->qmi.target_mem[i].v.ioaddr);
2524 ab->qmi.target_mem[i].v.ioaddr = NULL;
2526 if (!ab->qmi.target_mem[i].v.addr)
2528 dma_free_coherent(ab->dev,
2529 ab->qmi.target_mem[i].prev_size,
2530 ab->qmi.target_mem[i].v.addr,
2531 ab->qmi.target_mem[i].paddr);
2532 ab->qmi.target_mem[i].v.addr = NULL;
2543 static int ath12k_qmi_alloc_chunk(struct ath12k_base *ab,
2555 dma_free_coherent(ab->dev, chunk->prev_size,
2560 chunk->v.addr = dma_alloc_coherent(ab->dev,
2566 ab->qmi.target_mem_delayed = true;
2567 ath12k_warn(ab,
2571 ath12k_qmi_free_target_mem_chunk(ab);
2574 ath12k_warn(ab, "memory allocation failure for %u size: %d\n",
2584 static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab)
2587 struct ath12k_hw_group *ag = ab->ag;
2598 ab->qmi.target_mem_delayed = false;
2600 for (i = 0, mlo_idx = 0; i < ab->qmi.mem_seg_count; i++) {
2601 chunk = &ab->qmi.target_mem[i];
2612 ret = ath12k_qmi_alloc_chunk(ab, chunk);
2620 ath12k_err(ab, "QMI MLO memory allocation failure, requested size %d is more than allocated size %d",
2629 ath12k_err(ab, "QMI MLO chunk memory allocation failure for index %d, requested size %d is more than allocated size %d",
2637 ret = ath12k_qmi_alloc_chunk(ab, mlo_chunk);
2649 ath12k_warn(ab, "memory type %u not supported\n",
2660 ath12k_err(ab, "QMI MLO memory size error, expected size is %d but requested size is %d",
2671 ath12k_qmi_free_target_mem_chunk(ab);
2686 static int ath12k_qmi_assign_target_mem_chunk(struct ath12k_base *ab)
2692 for (i = 0, idx = 0; i < ab->qmi.mem_seg_count; i++) {
2693 switch (ab->qmi.target_mem[i].type) {
2695 rmem = ath12k_core_get_reserved_mem(ab, 0);
2702 if (avail_rmem_size < ab->qmi.target_mem[i].size) {
2703 ath12k_dbg(ab, ATH12K_DBG_QMI,
2705 ab->qmi.target_mem[i].type,
2706 ab->qmi.target_mem[i].size,
2712 ab->qmi.target_mem[idx].paddr = rmem->base;
2713 ab->qmi.target_mem[idx].v.ioaddr =
2714 ioremap(ab->qmi.target_mem[idx].paddr,
2715 ab->qmi.target_mem[i].size);
2716 if (!ab->qmi.target_mem[idx].v.ioaddr) {
2720 ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
2721 ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
2725 rmem = ath12k_core_get_reserved_mem(ab, 0);
2731 avail_rmem_size = rmem->size - ab->hw_params->bdf_addr_offset;
2732 if (avail_rmem_size < ab->qmi.target_mem[i].size) {
2733 ath12k_dbg(ab, ATH12K_DBG_QMI,
2735 ab->qmi.target_mem[i].type,
2736 ab->qmi.target_mem[i].size,
2741 ab->qmi.target_mem[idx].paddr =
2742 rmem->base + ab->hw_params->bdf_addr_offset;
2743 ab->qmi.target_mem[idx].v.ioaddr =
2744 ioremap(ab->qmi.target_mem[idx].paddr,
2745 ab->qmi.target_mem[i].size);
2746 if (!ab->qmi.target_mem[idx].v.ioaddr) {
2750 ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
2751 ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
2760 ab->qmi.target_mem[idx].paddr = 0;
2761 ab->qmi.target_mem[idx].v.ioaddr = NULL;
2762 ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
2763 ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
2767 rmem = ath12k_core_get_reserved_mem(ab, 1);
2774 if (avail_rmem_size < ab->qmi.target_mem[i].size) {
2775 ath12k_dbg(ab, ATH12K_DBG_QMI,
2777 ab->qmi.target_mem[i].type,
2778 ab->qmi.target_mem[i].size,
2784 ab->qmi.target_mem[idx].paddr = rmem->base;
2785 ab->qmi.target_mem[idx].v.ioaddr =
2786 ioremap(ab->qmi.target_mem[idx].paddr,
2787 ab->qmi.target_mem[i].size);
2788 if (!ab->qmi.target_mem[idx].v.ioaddr) {
2792 ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
2793 ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
2797 ath12k_warn(ab, "qmi ignore invalid mem req type %u\n",
2798 ab->qmi.target_mem[i].type);
2802 ab->qmi.mem_seg_count = idx;
2806 ath12k_qmi_free_target_mem_chunk(ab);
2812 int ath12k_qmi_request_target_cap(struct ath12k_base *ab)
2822 ret = qmi_txn_init(&ab->qmi.handle, &txn,
2827 ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
2833 ath12k_warn(ab, "qmi failed to send target cap request, err = %d\n",
2840 ath12k_warn(ab, "qmi failed target cap request %d\n", ret);
2845 ath12k_warn(ab, "qmi targetcap req failed, result: %d, err: %d\n",
2852 ab->qmi.target.chip_id = resp.chip_info.chip_id;
2853 ab->qmi.target.chip_family = resp.chip_info.chip_family;
2857 ab->qmi.target.board_id = resp.board_info.board_id;
2859 ab->qmi.target.board_id = board_id;
2862 ab->qmi.target.soc_id = resp.soc_info.soc_id;
2865 ab->qmi.target.fw_version = resp.fw_version_info.fw_version;
2866 strscpy(ab->qmi.target.fw_build_timestamp,
2868 sizeof(ab->qmi.target.fw_build_timestamp));
2872 strscpy(ab->qmi.target.fw_build_id, resp.fw_build_id,
2873 sizeof(ab->qmi.target.fw_build_id));
2877 ab->qmi.dev_mem[i].start =
2879 ab->qmi.dev_mem[i].size =
2881 ath12k_dbg(ab, ATH12K_DBG_QMI,
2883 ab->qmi.dev_mem[i].start,
2884 ab->qmi.dev_mem[i].size);
2889 ab->qmi.target.eeprom_caldata = resp.eeprom_caldata_read_timeout;
2890 ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi cal data supported from eeprom\n");
2893 ath12k_info(ab, "chip_id 0x%x chip_family 0x%x board_id 0x%x soc_id 0x%x\n",
2894 ab->qmi.target.chip_id, ab->qmi.target.chip_family,
2895 ab->qmi.target.board_id, ab->qmi.target.soc_id);
2897 ath12k_info(ab, "fw_version 0x%x fw_build_timestamp %s fw_build_id %s",
2898 ab->qmi.target.fw_version,
2899 ab->qmi.target.fw_build_timestamp,
2900 ab->qmi.target.fw_build_id);
2902 r = ath12k_core_check_smbios(ab);
2904 ath12k_dbg(ab, ATH12K_DBG_QMI, "SMBIOS bdf variant name not set.\n");
2906 r = ath12k_acpi_start(ab);
2909 ath12k_dbg(ab, ATH12K_DBG_BOOT, "acpi failed: %d\n", r);
2911 r = ath12k_acpi_check_bdf_variant_name(ab);
2913 ath12k_dbg(ab, ATH12K_DBG_BOOT, "ACPI bdf variant name not set.\n");
2919 static int ath12k_qmi_load_file_target_mem(struct ath12k_base *ab,
2936 req->file_id = ab->qmi.target.board_id;
2961 ret = qmi_txn_init(&ab->qmi.handle, &txn,
2967 ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi bdf download req fixed addr type %d\n",
2970 ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
2984 ath12k_warn(ab, "qmi BDF download failed, result: %d, err: %d\n",
2996 ath12k_dbg(ab, ATH12K_DBG_QMI,
3009 int ath12k_qmi_load_bdf_qmi(struct ath12k_base *ab,
3012 struct device *dev = ab->dev;
3024 ret = ath12k_core_fetch_bdf(ab, &bd);
3026 ath12k_warn(ab, "qmi failed to load bdf:\n");
3037 ret = ath12k_core_fetch_regdb(ab, &bd);
3039 ath12k_warn(ab, "qmi failed to load regdb bin:\n");
3045 if (ab->qmi.target.eeprom_caldata) {
3054 ath12k_bus_str(ab->hif.bus), dev_name(dev));
3055 fw_entry = ath12k_core_firmware_request(ab, filename);
3059 fw_entry = ath12k_core_firmware_request(ab,
3063 ath12k_warn(ab,
3070 fw_size = min_t(u32, ab->hw_params->fw.board_size,
3074 ret = ath12k_qmi_load_file_target_mem(ab, tmp, fw_size, file_type);
3076 ath12k_warn(ab, "qmi failed to load caldata\n");
3080 ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi caldata downloaded: type: %u\n",
3084 if (!ab->qmi.target.eeprom_caldata)
3088 ath12k_warn(ab, "unknown file type for load %d", type);
3092 ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi bdf_type %d\n", type);
3094 fw_size = min_t(u32, ab->hw_params->fw.board_size, bd.len);
3096 ret = ath12k_qmi_load_file_target_mem(ab, bd.data, fw_size, type);
3098 ath12k_warn(ab, "qmi failed to load bdf file\n");
3101 ath12k_core_free_bdf(ab, &bd);
3102 ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi BDF download sequence completed\n");
3107 static void ath12k_qmi_m3_free(struct ath12k_base *ab)
3109 struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
3111 if (ab->hw_params->fw.m3_loader == ath12k_m3_fw_loader_remoteproc)
3117 dma_free_coherent(ab->dev, m3_mem->size,
3123 static int ath12k_qmi_m3_load(struct ath12k_base *ab)
3125 struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
3132 if (ab->fw.m3_data && ab->fw.m3_len > 0) {
3134 m3_data = ab->fw.m3_data;
3135 m3_len = ab->fw.m3_len;
3140 fw = ath12k_core_firmware_request(ab, ATH12K_M3_FILE);
3143 ath12k_core_create_firmware_path(ab, ATH12K_M3_FILE,
3145 ath12k_err(ab, "failed to load %s: %d\n", path, ret);
3159 ath12k_qmi_m3_free(ab);
3162 m3_mem->vaddr = dma_alloc_coherent(ab->dev,
3166 ath12k_err(ab, "failed to allocate memory for M3 with size %zu\n",
3186 int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab)
3188 struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
3194 if (ab->hw_params->fw.m3_loader == ath12k_m3_fw_loader_driver) {
3195 ret = ath12k_qmi_m3_load(ab);
3197 ath12k_err(ab, "failed to load m3 firmware: %d", ret);
3204 ret = qmi_txn_init(&ab->qmi.handle, &txn,
3209 ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
3215 ath12k_warn(ab, "qmi failed to send M3 information request, err = %d\n",
3222 ath12k_warn(ab, "qmi failed M3 information request %d\n", ret);
3227 ath12k_warn(ab, "qmi M3 info request failed, result: %d, err: %d\n",
3236 static int ath12k_qmi_wlanfw_mode_send(struct ath12k_base *ab,
3248 ret = qmi_txn_init(&ab->qmi.handle, &txn,
3253 ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
3259 ath12k_warn(ab, "qmi failed to send mode request, mode: %d, err = %d\n",
3267 ath12k_warn(ab, "WLFW service is dis-connected\n");
3270 ath12k_warn(ab, "qmi failed set mode request, mode: %d, err = %d\n",
3276 ath12k_warn(ab, "Mode request failed, mode: %d, result: %d err: %d\n",
3286 static int ath12k_qmi_wlanfw_wlan_cfg_send(struct ath12k_base *ab)
3295 ce_cfg = (struct ce_pipe_config *)ab->qmi.ce_cfg.tgt_ce;
3296 svc_cfg = (struct service_to_pipe *)ab->qmi.ce_cfg.svc_to_ce_map;
3308 req->tgt_cfg_len = ab->qmi.ce_cfg.tgt_ce_len;
3319 req->svc_cfg_len = ab->qmi.ce_cfg.svc_to_ce_map_len;
3327 if (ab->hw_params->supports_shadow_regs) {
3330 ab->qmi.ce_cfg.shadow_reg_v3_len,
3332 memcpy(&req->shadow_reg_v3, ab->qmi.ce_cfg.shadow_reg_v3,
3338 ret = qmi_txn_init(&ab->qmi.handle, &txn,
3343 ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
3349 ath12k_warn(ab, "qmi failed to send wlan config request, err = %d\n",
3356 ath12k_warn(ab, "qmi failed wlan config request, err = %d\n", ret);
3361 ath12k_warn(ab, "qmi wlan config request failed, result: %d, err: %d\n",
3372 static int ath12k_qmi_wlanfw_wlan_ini_send(struct ath12k_base *ab)
3382 ret = qmi_txn_init(&ab->qmi.handle, &txn,
3387 ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
3393 ath12k_warn(ab, "failed to send QMI wlan ini request: %d\n",
3400 ath12k_warn(ab, "failed to receive QMI wlan ini request: %d\n", ret);
3405 ath12k_warn(ab, "QMI wlan ini response failure: %d %d\n",
3415 void ath12k_qmi_firmware_stop(struct ath12k_base *ab)
3419 clear_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags);
3421 ret = ath12k_qmi_wlanfw_mode_send(ab, ATH12K_FIRMWARE_MODE_OFF);
3423 ath12k_warn(ab, "qmi failed to send wlan mode off\n");
3428 int ath12k_qmi_firmware_start(struct ath12k_base *ab,
3433 ret = ath12k_qmi_wlanfw_wlan_ini_send(ab);
3435 ath12k_warn(ab, "qmi failed to send wlan fw ini: %d\n", ret);
3439 ret = ath12k_qmi_wlanfw_wlan_cfg_send(ab);
3441 ath12k_warn(ab, "qmi failed to send wlan cfg:%d\n", ret);
3445 ret = ath12k_qmi_wlanfw_mode_send(ab, mode);
3447 ath12k_warn(ab, "qmi failed to send wlan fw mode:%d\n", ret);
3477 void ath12k_qmi_trigger_host_cap(struct ath12k_base *ab)
3479 struct ath12k_qmi *qmi = &ab->qmi;
3488 ath12k_dbg(ab, ATH12K_DBG_QMI, "trigger host cap for device id %d\n",
3489 ab->device_id);
3496 struct ath12k_base *ab;
3500 ab = ag->ab[i];
3502 if (!(ab && ab->qmi.num_radios != U8_MAX))
3511 struct ath12k_base *ab;
3517 ab = ag->ab[i];
3518 if (!ab)
3521 spin_lock(&ab->qmi.event_lock);
3523 if (ath12k_qmi_get_event_block(&ab->qmi)) {
3524 spin_unlock(&ab->qmi.event_lock);
3525 return ab;
3528 spin_unlock(&ab->qmi.event_lock);
3538 struct ath12k_base *ab = qmi->ab, *block_ab;
3539 struct ath12k_hw_group *ag = ab->ag;
3542 ath12k_qmi_phy_cap_send(ab);
3544 ret = ath12k_qmi_fw_ind_register_send(ab);
3546 ath12k_warn(ab, "qmi failed to send FW indication QMI:%d\n", ret);
3575 struct ath12k_base *ab = qmi->ab;
3578 ret = ath12k_qmi_respond_fw_mem_request(ab);
3580 ath12k_warn(ab, "qmi failed to respond fw mem req:%d\n", ret);
3591 struct ath12k_base *ab = qmi->ab;
3594 ret = ath12k_qmi_request_target_cap(ab);
3596 ath12k_warn(ab, "qmi failed to req target capabilities:%d\n", ret);
3600 ret = ath12k_qmi_load_bdf_qmi(ab, ATH12K_QMI_BDF_TYPE_REGDB);
3602 ath12k_warn(ab, "qmi failed to load regdb file:%d\n", ret);
3606 ret = ath12k_qmi_load_bdf_qmi(ab, ATH12K_QMI_BDF_TYPE_ELF);
3608 ath12k_warn(ab, "qmi failed to load board data file:%d\n", ret);
3612 if (ab->hw_params->download_calib) {
3613 ret = ath12k_qmi_load_bdf_qmi(ab, ATH12K_QMI_BDF_TYPE_CALIBRATION);
3615 ath12k_warn(ab, "qmi failed to load calibrated data :%d\n", ret);
3618 ret = ath12k_qmi_wlanfw_m3_info_send(ab);
3620 ath12k_warn(ab, "qmi failed to send m3 info req:%d\n", ret);
3633 struct ath12k_base *ab = qmi->ab;
3637 ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi firmware request memory request\n");
3641 ath12k_warn(ab, "Invalid memory segment length: %u\n",
3644 ab->qmi.mem_seg_count = msg->mem_seg_len;
3647 ab->qmi.target_mem[i].type = msg->mem_seg[i].type;
3648 ab->qmi.target_mem[i].size = msg->mem_seg[i].size;
3649 ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi mem seg type %d size %d\n",
3653 if (test_bit(ATH12K_FLAG_FIXED_MEM_REGION, &ab->dev_flags)) {
3654 ret = ath12k_qmi_assign_target_mem_chunk(ab);
3656 ath12k_warn(ab, "failed to assign qmi target memory: %d\n",
3661 ret = ath12k_qmi_alloc_target_mem_chunk(ab);
3663 ath12k_warn(ab, "qmi failed to alloc target memory: %d\n",
3678 struct ath12k_base *ab = qmi->ab;
3680 ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi firmware memory ready indication\n");
3690 struct ath12k_base *ab = qmi->ab;
3692 ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi firmware ready\n");
3727 struct ath12k_base *ab = qmi->ab;
3738 ath12k_warn(ab, "qmi failed to connect to remote service %d\n", ret);
3742 ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi wifi fw qmi service connected\n");
3752 struct ath12k_base *ab = qmi->ab;
3754 ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi wifi fw del server\n");
3765 struct ath12k_base *ab = qmi->ab;
3768 ret = ath12k_qmi_host_cap_send(ab);
3770 ath12k_warn(ab, "failed to send qmi host cap for device id %d: %d\n",
3771 ab->device_id, ret);
3783 struct ath12k_base *ab = qmi->ab;
3793 if (test_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags))
3800 set_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags);
3803 set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
3808 set_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags);
3813 set_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags);
3816 clear_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags);
3817 if (test_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags)) {
3818 if (ab->is_reset)
3819 ath12k_hal_dump_srng_stats(ab);
3821 set_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
3822 queue_work(ab->workqueue, &ab->restart_work);
3827 &ab->dev_flags);
3828 ret = ath12k_core_qmi_firmware_ready(ab);
3831 &ab->dev_flags);
3837 set_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags);
3840 ath12k_warn(ab, "invalid event type: %d", event->type);
3851 int ath12k_qmi_init_service(struct ath12k_base *ab)
3855 memset(&ab->qmi.target, 0, sizeof(struct target_info));
3856 memset(&ab->qmi.target_mem, 0, sizeof(struct target_mem_chunk));
3857 ab->qmi.ab = ab;
3859 ab->qmi.target_mem_mode = ATH12K_QMI_TARGET_MEM_MODE_DEFAULT;
3860 ret = qmi_handle_init(&ab->qmi.handle, ATH12K_QMI_RESP_LEN_MAX,
3863 ath12k_warn(ab, "failed to initialize qmi handle\n");
3867 ab->qmi.event_wq = alloc_ordered_workqueue("ath12k_qmi_driver_event", 0);
3868 if (!ab->qmi.event_wq) {
3869 ath12k_err(ab, "failed to allocate workqueue\n");
3873 INIT_LIST_HEAD(&ab->qmi.event_list);
3874 spin_lock_init(&ab->qmi.event_lock);
3875 INIT_WORK(&ab->qmi.event_work, ath12k_qmi_driver_event_work);
3877 ret = qmi_add_lookup(&ab->qmi.handle, ATH12K_QMI_WLFW_SERVICE_ID_V01,
3879 ab->qmi.service_ins_id);
3881 ath12k_warn(ab, "failed to add qmi lookup\n");
3882 destroy_workqueue(ab->qmi.event_wq);
3889 void ath12k_qmi_deinit_service(struct ath12k_base *ab)
3891 if (!ab->qmi.ab)
3894 qmi_handle_release(&ab->qmi.handle);
3895 cancel_work_sync(&ab->qmi.event_work);
3896 destroy_workqueue(ab->qmi.event_wq);
3897 ath12k_qmi_m3_free(ab);
3898 ath12k_qmi_free_target_mem_chunk(ab);
3899 ab->qmi.ab = NULL;
3902 void ath12k_qmi_free_resource(struct ath12k_base *ab)
3904 ath12k_qmi_free_target_mem_chunk(ab);
3905 ath12k_qmi_m3_free(ab);