xref: /linux/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c (revision bf4afc53b77aeaa48b5409da5c8da6bb4eff7f43)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
3 
4 #include <linux/device.h>
5 
6 #include "hinic3_hw_cfg.h"
7 #include "hinic3_hwdev.h"
8 #include "hinic3_hwif.h"
9 #include "hinic3_mbox.h"
10 
11 #define HINIC3_CFG_MAX_QP  256
12 
hinic3_parse_pub_res_cap(struct hinic3_hwdev * hwdev,struct hinic3_dev_cap * cap,const struct cfg_cmd_dev_cap * dev_cap,enum hinic3_func_type type)13 static void hinic3_parse_pub_res_cap(struct hinic3_hwdev *hwdev,
14 				     struct hinic3_dev_cap *cap,
15 				     const struct cfg_cmd_dev_cap *dev_cap,
16 				     enum hinic3_func_type type)
17 {
18 	cap->port_id = dev_cap->port_id;
19 	cap->supp_svcs_bitmap = dev_cap->svc_cap_en;
20 }
21 
hinic3_parse_l2nic_res_cap(struct hinic3_hwdev * hwdev,struct hinic3_dev_cap * cap,const struct cfg_cmd_dev_cap * dev_cap,enum hinic3_func_type type)22 static void hinic3_parse_l2nic_res_cap(struct hinic3_hwdev *hwdev,
23 				       struct hinic3_dev_cap *cap,
24 				       const struct cfg_cmd_dev_cap *dev_cap,
25 				       enum hinic3_func_type type)
26 {
27 	struct hinic3_nic_service_cap *nic_svc_cap = &cap->nic_svc_cap;
28 
29 	nic_svc_cap->max_sqs = min(dev_cap->nic_max_sq_id + 1,
30 				   HINIC3_CFG_MAX_QP);
31 }
32 
hinic3_parse_dev_cap(struct hinic3_hwdev * hwdev,const struct cfg_cmd_dev_cap * dev_cap,enum hinic3_func_type type)33 static void hinic3_parse_dev_cap(struct hinic3_hwdev *hwdev,
34 				 const struct cfg_cmd_dev_cap *dev_cap,
35 				 enum hinic3_func_type type)
36 {
37 	struct hinic3_dev_cap *cap = &hwdev->cfg_mgmt->cap;
38 
39 	/* Public resource */
40 	hinic3_parse_pub_res_cap(hwdev, cap, dev_cap, type);
41 
42 	/* L2 NIC resource */
43 	if (hinic3_support_nic(hwdev))
44 		hinic3_parse_l2nic_res_cap(hwdev, cap, dev_cap, type);
45 }
46 
get_cap_from_fw(struct hinic3_hwdev * hwdev,enum hinic3_func_type type)47 static int get_cap_from_fw(struct hinic3_hwdev *hwdev,
48 			   enum hinic3_func_type type)
49 {
50 	struct mgmt_msg_params msg_params = {};
51 	struct cfg_cmd_dev_cap dev_cap = {};
52 	int err;
53 
54 	dev_cap.func_id = hinic3_global_func_id(hwdev);
55 
56 	mgmt_msg_params_init_default(&msg_params, &dev_cap, sizeof(dev_cap));
57 
58 	err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_CFGM,
59 				       CFG_CMD_GET_DEV_CAP, &msg_params);
60 	if (err || dev_cap.head.status) {
61 		dev_err(hwdev->dev,
62 			"Failed to get capability from FW, err: %d, status: 0x%x\n",
63 			err, dev_cap.head.status);
64 		return -EIO;
65 	}
66 
67 	hinic3_parse_dev_cap(hwdev, &dev_cap, type);
68 
69 	return 0;
70 }
71 
hinic3_init_irq_info(struct hinic3_hwdev * hwdev)72 static int hinic3_init_irq_info(struct hinic3_hwdev *hwdev)
73 {
74 	struct hinic3_cfg_mgmt_info *cfg_mgmt = hwdev->cfg_mgmt;
75 	struct hinic3_hwif *hwif = hwdev->hwif;
76 	u16 intr_num = hwif->attr.num_irqs;
77 	struct hinic3_irq_info *irq_info;
78 	u16 intr_needed;
79 
80 	intr_needed = hwif->attr.msix_flex_en ? (hwif->attr.num_aeqs +
81 		      hwif->attr.num_ceqs + hwif->attr.num_sq) : intr_num;
82 	if (intr_needed > intr_num) {
83 		dev_warn(hwdev->dev, "Irq num cfg %d is less than the needed irq num %d msix_flex_en %d\n",
84 			 intr_num, intr_needed, hwdev->hwif->attr.msix_flex_en);
85 		intr_needed = intr_num;
86 	}
87 
88 	irq_info = &cfg_mgmt->irq_info;
89 	irq_info->irq = kzalloc_objs(struct hinic3_irq, intr_num);
90 	if (!irq_info->irq)
91 		return -ENOMEM;
92 
93 	irq_info->num_irq_hw = intr_needed;
94 	mutex_init(&irq_info->irq_mutex);
95 
96 	return 0;
97 }
98 
hinic3_init_irq_alloc_info(struct hinic3_hwdev * hwdev)99 static int hinic3_init_irq_alloc_info(struct hinic3_hwdev *hwdev)
100 {
101 	struct hinic3_cfg_mgmt_info *cfg_mgmt = hwdev->cfg_mgmt;
102 	struct hinic3_irq *irq = cfg_mgmt->irq_info.irq;
103 	u16 nreq = cfg_mgmt->irq_info.num_irq_hw;
104 	struct pci_dev *pdev = hwdev->pdev;
105 	int actual_irq;
106 	u16 i;
107 
108 	actual_irq = pci_alloc_irq_vectors(pdev, 2, nreq, PCI_IRQ_MSIX);
109 	if (actual_irq < 0) {
110 		dev_err(hwdev->dev, "Alloc msix entries with threshold 2 failed. actual_irq: %d\n",
111 			actual_irq);
112 		return -ENOMEM;
113 	}
114 
115 	nreq = actual_irq;
116 	cfg_mgmt->irq_info.num_irq = nreq;
117 
118 	for (i = 0; i < nreq; ++i) {
119 		irq[i].msix_entry_idx = i;
120 		irq[i].irq_id = pci_irq_vector(pdev, i);
121 		irq[i].allocated = false;
122 	}
123 
124 	return 0;
125 }
126 
hinic3_init_cfg_mgmt(struct hinic3_hwdev * hwdev)127 int hinic3_init_cfg_mgmt(struct hinic3_hwdev *hwdev)
128 {
129 	struct hinic3_cfg_mgmt_info *cfg_mgmt;
130 	int err;
131 
132 	cfg_mgmt = kzalloc_obj(*cfg_mgmt);
133 	if (!cfg_mgmt)
134 		return -ENOMEM;
135 
136 	hwdev->cfg_mgmt = cfg_mgmt;
137 
138 	err = hinic3_init_irq_info(hwdev);
139 	if (err) {
140 		dev_err(hwdev->dev, "Failed to init hinic3_irq_mgmt_info, err: %d\n",
141 			err);
142 		goto err_free_cfg_mgmt;
143 	}
144 
145 	err = hinic3_init_irq_alloc_info(hwdev);
146 	if (err) {
147 		dev_err(hwdev->dev, "Failed to init hinic3_irq_info, err: %d\n",
148 			err);
149 		goto err_free_irq_info;
150 	}
151 
152 	return 0;
153 
154 err_free_irq_info:
155 	kfree(cfg_mgmt->irq_info.irq);
156 	cfg_mgmt->irq_info.irq = NULL;
157 err_free_cfg_mgmt:
158 	kfree(cfg_mgmt);
159 
160 	return err;
161 }
162 
hinic3_free_cfg_mgmt(struct hinic3_hwdev * hwdev)163 void hinic3_free_cfg_mgmt(struct hinic3_hwdev *hwdev)
164 {
165 	struct hinic3_cfg_mgmt_info *cfg_mgmt = hwdev->cfg_mgmt;
166 
167 	pci_free_irq_vectors(hwdev->pdev);
168 	kfree(cfg_mgmt->irq_info.irq);
169 	cfg_mgmt->irq_info.irq = NULL;
170 	kfree(cfg_mgmt);
171 }
172 
hinic3_alloc_irqs(struct hinic3_hwdev * hwdev,u16 num,struct msix_entry * alloc_arr,u16 * act_num)173 int hinic3_alloc_irqs(struct hinic3_hwdev *hwdev, u16 num,
174 		      struct msix_entry *alloc_arr, u16 *act_num)
175 {
176 	struct hinic3_irq_info *irq_info;
177 	struct hinic3_irq *curr;
178 	u16 i, found = 0;
179 
180 	irq_info = &hwdev->cfg_mgmt->irq_info;
181 	mutex_lock(&irq_info->irq_mutex);
182 	for (i = 0; i < irq_info->num_irq && found < num; i++) {
183 		curr = irq_info->irq + i;
184 		if (curr->allocated)
185 			continue;
186 		curr->allocated = true;
187 		alloc_arr[found].vector = curr->irq_id;
188 		alloc_arr[found].entry = curr->msix_entry_idx;
189 		found++;
190 	}
191 	mutex_unlock(&irq_info->irq_mutex);
192 
193 	*act_num = found;
194 
195 	return found == 0 ? -ENOMEM : 0;
196 }
197 
hinic3_free_irq(struct hinic3_hwdev * hwdev,u32 irq_id)198 void hinic3_free_irq(struct hinic3_hwdev *hwdev, u32 irq_id)
199 {
200 	struct hinic3_irq_info *irq_info;
201 	struct hinic3_irq *curr;
202 	u16 i;
203 
204 	irq_info = &hwdev->cfg_mgmt->irq_info;
205 	mutex_lock(&irq_info->irq_mutex);
206 	for (i = 0; i < irq_info->num_irq; i++) {
207 		curr = irq_info->irq + i;
208 		if (curr->irq_id == irq_id) {
209 			curr->allocated = false;
210 			break;
211 		}
212 	}
213 	mutex_unlock(&irq_info->irq_mutex);
214 }
215 
hinic3_init_capability(struct hinic3_hwdev * hwdev)216 int hinic3_init_capability(struct hinic3_hwdev *hwdev)
217 {
218 	return get_cap_from_fw(hwdev, HINIC3_FUNC_TYPE_VF);
219 }
220 
hinic3_support_nic(struct hinic3_hwdev * hwdev)221 bool hinic3_support_nic(struct hinic3_hwdev *hwdev)
222 {
223 	return hwdev->cfg_mgmt->cap.supp_svcs_bitmap &
224 	       BIT(HINIC3_SERVICE_T_NIC);
225 }
226 
hinic3_func_max_qnum(struct hinic3_hwdev * hwdev)227 u16 hinic3_func_max_qnum(struct hinic3_hwdev *hwdev)
228 {
229 	return hwdev->cfg_mgmt->cap.nic_svc_cap.max_sqs;
230 }
231 
hinic3_physical_port_id(struct hinic3_hwdev * hwdev)232 u8 hinic3_physical_port_id(struct hinic3_hwdev *hwdev)
233 {
234 	return hwdev->cfg_mgmt->cap.port_id;
235 }
236