1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021, Intel Corporation. */
3
4 /* Inter-Driver Communication */
5 #include "ice.h"
6 #include "ice_lib.h"
7 #include "ice_dcb_lib.h"
8
9 static DEFINE_XARRAY_ALLOC1(ice_aux_id);
10
11 /**
12 * ice_get_auxiliary_drv - retrieve iidc_rdma_core_auxiliary_drv struct
13 * @cdev: pointer to iidc_rdma_core_dev_info struct
14 *
15 * This function has to be called with a device_lock on the
16 * cdev->adev.dev to avoid race conditions.
17 *
18 * Return: pointer to the matched auxiliary driver struct
19 */
20 static struct iidc_rdma_core_auxiliary_drv *
ice_get_auxiliary_drv(struct iidc_rdma_core_dev_info * cdev)21 ice_get_auxiliary_drv(struct iidc_rdma_core_dev_info *cdev)
22 {
23 struct auxiliary_device *adev;
24
25 adev = cdev->adev;
26 if (!adev || !adev->dev.driver)
27 return NULL;
28
29 return container_of(adev->dev.driver,
30 struct iidc_rdma_core_auxiliary_drv, adrv.driver);
31 }
32
33 /**
34 * ice_send_event_to_aux - send event to RDMA AUX driver
35 * @pf: pointer to PF struct
36 * @event: event struct
37 */
ice_send_event_to_aux(struct ice_pf * pf,struct iidc_rdma_event * event)38 void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_rdma_event *event)
39 {
40 struct iidc_rdma_core_auxiliary_drv *iadrv;
41 struct iidc_rdma_core_dev_info *cdev;
42
43 if (WARN_ON_ONCE(!in_task()))
44 return;
45
46 cdev = pf->cdev_info;
47 if (!cdev)
48 return;
49
50 mutex_lock(&pf->adev_mutex);
51 if (!cdev->adev)
52 goto finish;
53
54 device_lock(&cdev->adev->dev);
55 iadrv = ice_get_auxiliary_drv(cdev);
56 if (iadrv && iadrv->event_handler)
57 iadrv->event_handler(cdev, event);
58 device_unlock(&cdev->adev->dev);
59 finish:
60 mutex_unlock(&pf->adev_mutex);
61 }
62
63 /**
64 * ice_add_rdma_qset - Add Leaf Node for RDMA Qset
65 * @cdev: pointer to iidc_rdma_core_dev_info struct
66 * @qset: Resource to be allocated
67 *
68 * Return: Zero on success or error code encountered
69 */
ice_add_rdma_qset(struct iidc_rdma_core_dev_info * cdev,struct iidc_rdma_qset_params * qset)70 int ice_add_rdma_qset(struct iidc_rdma_core_dev_info *cdev,
71 struct iidc_rdma_qset_params *qset)
72 {
73 u16 max_rdmaqs[ICE_MAX_TRAFFIC_CLASS];
74 struct ice_vsi *vsi;
75 struct device *dev;
76 struct ice_pf *pf;
77 u32 qset_teid;
78 u16 qs_handle;
79 int status;
80 int i;
81
82 if (WARN_ON(!cdev || !qset))
83 return -EINVAL;
84
85 pf = pci_get_drvdata(cdev->pdev);
86 dev = ice_pf_to_dev(pf);
87
88 if (!ice_is_rdma_ena(pf))
89 return -EINVAL;
90
91 vsi = ice_get_main_vsi(pf);
92 if (!vsi) {
93 dev_err(dev, "RDMA QSet invalid VSI\n");
94 return -EINVAL;
95 }
96
97 ice_for_each_traffic_class(i)
98 max_rdmaqs[i] = 0;
99
100 max_rdmaqs[qset->tc]++;
101 qs_handle = qset->qs_handle;
102
103 status = ice_cfg_vsi_rdma(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
104 max_rdmaqs);
105 if (status) {
106 dev_err(dev, "Failed VSI RDMA Qset config\n");
107 return status;
108 }
109
110 status = ice_ena_vsi_rdma_qset(vsi->port_info, vsi->idx, qset->tc,
111 &qs_handle, 1, &qset_teid);
112 if (status) {
113 dev_err(dev, "Failed VSI RDMA Qset enable\n");
114 return status;
115 }
116 qset->teid = qset_teid;
117
118 return 0;
119 }
120 EXPORT_SYMBOL_GPL(ice_add_rdma_qset);
121
122 /**
123 * ice_del_rdma_qset - Delete leaf node for RDMA Qset
124 * @cdev: pointer to iidc_rdma_core_dev_info struct
125 * @qset: Resource to be freed
126 *
127 * Return: Zero on success, error code on failure
128 */
ice_del_rdma_qset(struct iidc_rdma_core_dev_info * cdev,struct iidc_rdma_qset_params * qset)129 int ice_del_rdma_qset(struct iidc_rdma_core_dev_info *cdev,
130 struct iidc_rdma_qset_params *qset)
131 {
132 struct ice_vsi *vsi;
133 struct ice_pf *pf;
134 u32 teid;
135 u16 q_id;
136
137 if (WARN_ON(!cdev || !qset))
138 return -EINVAL;
139
140 pf = pci_get_drvdata(cdev->pdev);
141 vsi = ice_find_vsi(pf, qset->vport_id);
142 if (!vsi) {
143 dev_err(ice_pf_to_dev(pf), "RDMA Invalid VSI\n");
144 return -EINVAL;
145 }
146
147 q_id = qset->qs_handle;
148 teid = qset->teid;
149
150 return ice_dis_vsi_rdma_qset(vsi->port_info, 1, &teid, &q_id);
151 }
152 EXPORT_SYMBOL_GPL(ice_del_rdma_qset);
153
154 /**
155 * ice_rdma_request_reset - accept request from RDMA to perform a reset
156 * @cdev: pointer to iidc_rdma_core_dev_info struct
157 * @reset_type: type of reset
158 *
159 * Return: Zero on success, error code on failure
160 */
ice_rdma_request_reset(struct iidc_rdma_core_dev_info * cdev,enum iidc_rdma_reset_type reset_type)161 int ice_rdma_request_reset(struct iidc_rdma_core_dev_info *cdev,
162 enum iidc_rdma_reset_type reset_type)
163 {
164 enum ice_reset_req reset;
165 struct ice_pf *pf;
166
167 if (WARN_ON(!cdev))
168 return -EINVAL;
169
170 pf = pci_get_drvdata(cdev->pdev);
171
172 switch (reset_type) {
173 case IIDC_FUNC_RESET:
174 reset = ICE_RESET_PFR;
175 break;
176 case IIDC_DEV_RESET:
177 reset = ICE_RESET_CORER;
178 break;
179 default:
180 return -EINVAL;
181 }
182
183 return ice_schedule_reset(pf, reset);
184 }
185 EXPORT_SYMBOL_GPL(ice_rdma_request_reset);
186
187 /**
188 * ice_rdma_update_vsi_filter - update main VSI filters for RDMA
189 * @cdev: pointer to iidc_rdma_core_dev_info struct
190 * @vsi_id: VSI HW idx to update filter on
191 * @enable: bool whether to enable or disable filters
192 *
193 * Return: Zero on success, error code on failure
194 */
ice_rdma_update_vsi_filter(struct iidc_rdma_core_dev_info * cdev,u16 vsi_id,bool enable)195 int ice_rdma_update_vsi_filter(struct iidc_rdma_core_dev_info *cdev,
196 u16 vsi_id, bool enable)
197 {
198 struct ice_vsi *vsi;
199 struct ice_pf *pf;
200 int status;
201
202 if (WARN_ON(!cdev))
203 return -EINVAL;
204
205 pf = pci_get_drvdata(cdev->pdev);
206 vsi = ice_find_vsi(pf, vsi_id);
207 if (!vsi)
208 return -EINVAL;
209
210 status = ice_cfg_rdma_fltr(&pf->hw, vsi->idx, enable);
211 if (status) {
212 dev_err(ice_pf_to_dev(pf), "Failed to %sable RDMA filtering\n",
213 enable ? "en" : "dis");
214 } else {
215 if (enable)
216 vsi->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
217 else
218 vsi->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
219 }
220
221 return status;
222 }
223 EXPORT_SYMBOL_GPL(ice_rdma_update_vsi_filter);
224
225 /**
226 * ice_alloc_rdma_qvector - alloc vector resources reserved for RDMA driver
227 * @cdev: pointer to iidc_rdma_core_dev_info struct
228 * @entry: MSI-X entry to be removed
229 *
230 * Return: Zero on success, error code on failure
231 */
ice_alloc_rdma_qvector(struct iidc_rdma_core_dev_info * cdev,struct msix_entry * entry)232 int ice_alloc_rdma_qvector(struct iidc_rdma_core_dev_info *cdev,
233 struct msix_entry *entry)
234 {
235 struct msi_map map;
236 struct ice_pf *pf;
237
238 if (WARN_ON(!cdev))
239 return -EINVAL;
240
241 pf = pci_get_drvdata(cdev->pdev);
242 map = ice_alloc_irq(pf, true);
243 if (map.index < 0)
244 return -ENOMEM;
245
246 entry->entry = map.index;
247 entry->vector = map.virq;
248
249 return 0;
250 }
251 EXPORT_SYMBOL_GPL(ice_alloc_rdma_qvector);
252
253 /**
254 * ice_free_rdma_qvector - free vector resources reserved for RDMA driver
255 * @cdev: pointer to iidc_rdma_core_dev_info struct
256 * @entry: MSI-X entry to be removed
257 */
ice_free_rdma_qvector(struct iidc_rdma_core_dev_info * cdev,struct msix_entry * entry)258 void ice_free_rdma_qvector(struct iidc_rdma_core_dev_info *cdev,
259 struct msix_entry *entry)
260 {
261 struct msi_map map;
262 struct ice_pf *pf;
263
264 if (WARN_ON(!cdev || !entry))
265 return;
266
267 pf = pci_get_drvdata(cdev->pdev);
268
269 map.index = entry->entry;
270 map.virq = entry->vector;
271 ice_free_irq(pf, map);
272 }
273 EXPORT_SYMBOL_GPL(ice_free_rdma_qvector);
274
275 /**
276 * ice_adev_release - function to be mapped to AUX dev's release op
277 * @dev: pointer to device to free
278 */
ice_adev_release(struct device * dev)279 static void ice_adev_release(struct device *dev)
280 {
281 struct iidc_rdma_core_auxiliary_dev *iadev;
282
283 iadev = container_of(dev, struct iidc_rdma_core_auxiliary_dev,
284 adev.dev);
285 kfree(iadev);
286 }
287
288 /**
289 * ice_plug_aux_dev - allocate and register AUX device
290 * @pf: pointer to pf struct
291 *
292 * Return: Zero on success, error code on failure
293 */
ice_plug_aux_dev(struct ice_pf * pf)294 int ice_plug_aux_dev(struct ice_pf *pf)
295 {
296 struct iidc_rdma_core_auxiliary_dev *iadev;
297 struct iidc_rdma_core_dev_info *cdev;
298 struct auxiliary_device *adev;
299 int ret;
300
301 /* if this PF doesn't support a technology that requires auxiliary
302 * devices, then gracefully exit
303 */
304 if (!ice_is_rdma_ena(pf))
305 return 0;
306
307 cdev = pf->cdev_info;
308 if (!cdev)
309 return -ENODEV;
310
311 iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
312 if (!iadev)
313 return -ENOMEM;
314
315 adev = &iadev->adev;
316 iadev->cdev_info = cdev;
317
318 adev->id = pf->aux_idx;
319 adev->dev.release = ice_adev_release;
320 adev->dev.parent = &pf->pdev->dev;
321 adev->name = cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_ROCEV2 ?
322 "roce" : "iwarp";
323
324 ret = auxiliary_device_init(adev);
325 if (ret) {
326 kfree(iadev);
327 return ret;
328 }
329
330 ret = auxiliary_device_add(adev);
331 if (ret) {
332 auxiliary_device_uninit(adev);
333 return ret;
334 }
335
336 mutex_lock(&pf->adev_mutex);
337 cdev->adev = adev;
338 mutex_unlock(&pf->adev_mutex);
339
340 return 0;
341 }
342
343 /* ice_unplug_aux_dev - unregister and free AUX device
344 * @pf: pointer to pf struct
345 */
ice_unplug_aux_dev(struct ice_pf * pf)346 void ice_unplug_aux_dev(struct ice_pf *pf)
347 {
348 struct auxiliary_device *adev;
349
350 mutex_lock(&pf->adev_mutex);
351 adev = pf->cdev_info->adev;
352 pf->cdev_info->adev = NULL;
353 mutex_unlock(&pf->adev_mutex);
354
355 if (adev) {
356 auxiliary_device_delete(adev);
357 auxiliary_device_uninit(adev);
358 }
359 }
360
361 /**
362 * ice_init_rdma - initializes PF for RDMA use
363 * @pf: ptr to ice_pf
364 */
ice_init_rdma(struct ice_pf * pf)365 int ice_init_rdma(struct ice_pf *pf)
366 {
367 struct iidc_rdma_priv_dev_info *privd;
368 struct device *dev = &pf->pdev->dev;
369 struct iidc_rdma_core_dev_info *cdev;
370 int ret;
371
372 if (!ice_is_rdma_ena(pf)) {
373 dev_warn(dev, "RDMA is not supported on this device\n");
374 return 0;
375 }
376
377 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
378 if (!cdev)
379 return -ENOMEM;
380
381 pf->cdev_info = cdev;
382
383 privd = kzalloc(sizeof(*privd), GFP_KERNEL);
384 if (!privd) {
385 ret = -ENOMEM;
386 goto err_privd_alloc;
387 }
388
389 privd->pf_id = pf->hw.pf_id;
390 ret = xa_alloc(&ice_aux_id, &pf->aux_idx, NULL, XA_LIMIT(1, INT_MAX),
391 GFP_KERNEL);
392 if (ret) {
393 dev_err(dev, "Failed to allocate device ID for AUX driver\n");
394 ret = -ENOMEM;
395 goto err_alloc_xa;
396 }
397
398 cdev->iidc_priv = privd;
399 privd->netdev = pf->vsi[0]->netdev;
400
401 privd->hw_addr = (u8 __iomem *)pf->hw.hw_addr;
402 cdev->pdev = pf->pdev;
403 privd->vport_id = pf->vsi[0]->vsi_num;
404
405 pf->cdev_info->rdma_protocol |= IIDC_RDMA_PROTOCOL_ROCEV2;
406 ice_setup_dcb_qos_info(pf, &privd->qos_info);
407 ret = ice_plug_aux_dev(pf);
408 if (ret)
409 goto err_plug_aux_dev;
410 return 0;
411
412 err_plug_aux_dev:
413 pf->cdev_info->adev = NULL;
414 xa_erase(&ice_aux_id, pf->aux_idx);
415 err_alloc_xa:
416 kfree(privd);
417 err_privd_alloc:
418 kfree(cdev);
419 pf->cdev_info = NULL;
420
421 return ret;
422 }
423
424 /**
425 * ice_deinit_rdma - deinitialize RDMA on PF
426 * @pf: ptr to ice_pf
427 */
ice_deinit_rdma(struct ice_pf * pf)428 void ice_deinit_rdma(struct ice_pf *pf)
429 {
430 if (!ice_is_rdma_ena(pf))
431 return;
432
433 ice_unplug_aux_dev(pf);
434 xa_erase(&ice_aux_id, pf->aux_idx);
435 kfree(pf->cdev_info->iidc_priv);
436 kfree(pf->cdev_info);
437 pf->cdev_info = NULL;
438 }
439