xref: /linux/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c (revision 32a92f8c89326985e05dce8b22d3f0aa07a3e1bd)
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2016-2018 Broadcom Limited
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  */
9 
10 #include <linux/module.h>
11 
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/interrupt.h>
15 #include <linux/pci.h>
16 #include <linux/netdevice.h>
17 #include <linux/rtnetlink.h>
18 #include <linux/bitops.h>
19 #include <linux/irq.h>
20 #include <asm/byteorder.h>
21 #include <linux/bitmap.h>
22 #include <linux/auxiliary_bus.h>
23 #include <net/netdev_lock.h>
24 #include <linux/bnxt/hsi.h>
25 
26 #include "bnxt.h"
27 #include "bnxt_hwrm.h"
28 #include "bnxt_ulp.h"
29 
30 static DEFINE_IDA(bnxt_aux_dev_ids);
31 
bnxt_fill_msix_vecs(struct bnxt * bp,struct bnxt_msix_entry * ent)32 static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
33 {
34 	struct bnxt_en_dev *edev = bp->edev;
35 	int num_msix, i;
36 
37 	if (!edev->ulp_tbl->msix_requested) {
38 		netdev_warn(bp->dev, "Requested MSI-X vectors insufficient\n");
39 		return;
40 	}
41 	num_msix = edev->ulp_tbl->msix_requested;
42 	for (i = 0; i < num_msix; i++) {
43 		ent[i].vector = bp->irq_tbl[i].vector;
44 		ent[i].ring_idx = i;
45 		if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
46 			ent[i].db_offset = bp->db_offset;
47 		else
48 			ent[i].db_offset = i * 0x80;
49 	}
50 }
51 
bnxt_get_ulp_msix_num(struct bnxt * bp)52 int bnxt_get_ulp_msix_num(struct bnxt *bp)
53 {
54 	if (bp->edev)
55 		return bp->edev->ulp_num_msix_vec;
56 	return 0;
57 }
58 
bnxt_set_ulp_msix_num(struct bnxt * bp,int num)59 void bnxt_set_ulp_msix_num(struct bnxt *bp, int num)
60 {
61 	if (bp->edev)
62 		bp->edev->ulp_num_msix_vec = num;
63 }
64 
bnxt_get_ulp_msix_num_in_use(struct bnxt * bp)65 int bnxt_get_ulp_msix_num_in_use(struct bnxt *bp)
66 {
67 	if (bnxt_ulp_registered(bp->edev))
68 		return bp->edev->ulp_num_msix_vec;
69 	return 0;
70 }
71 
bnxt_get_ulp_stat_ctxs(struct bnxt * bp)72 int bnxt_get_ulp_stat_ctxs(struct bnxt *bp)
73 {
74 	if (bp->edev)
75 		return bp->edev->ulp_num_ctxs;
76 	return 0;
77 }
78 
bnxt_set_ulp_stat_ctxs(struct bnxt * bp,int num_ulp_ctx)79 void bnxt_set_ulp_stat_ctxs(struct bnxt *bp, int num_ulp_ctx)
80 {
81 	if (bp->edev)
82 		bp->edev->ulp_num_ctxs = num_ulp_ctx;
83 }
84 
bnxt_get_ulp_stat_ctxs_in_use(struct bnxt * bp)85 int bnxt_get_ulp_stat_ctxs_in_use(struct bnxt *bp)
86 {
87 	if (bnxt_ulp_registered(bp->edev))
88 		return bp->edev->ulp_num_ctxs;
89 	return 0;
90 }
91 
bnxt_set_dflt_ulp_stat_ctxs(struct bnxt * bp)92 void bnxt_set_dflt_ulp_stat_ctxs(struct bnxt *bp)
93 {
94 	if (bp->edev) {
95 		bp->edev->ulp_num_ctxs = BNXT_MIN_ROCE_STAT_CTXS;
96 		/* Reserve one additional stat_ctx for PF0 (except
97 		 * on 1-port NICs) as it also creates one stat_ctx
98 		 * for PF1 in case of RoCE bonding.
99 		 */
100 		if (BNXT_PF(bp) && !bp->pf.port_id &&
101 		    bp->port_count > 1)
102 			bp->edev->ulp_num_ctxs++;
103 
104 		/* Reserve one additional stat_ctx when the device is capable
105 		 * of supporting port mirroring on RDMA device.
106 		 */
107 		if (BNXT_MIRROR_ON_ROCE_CAP(bp))
108 			bp->edev->ulp_num_ctxs++;
109 	}
110 }
111 
bnxt_register_dev(struct bnxt_en_dev * edev,struct bnxt_ulp_ops * ulp_ops,void * handle)112 int bnxt_register_dev(struct bnxt_en_dev *edev,
113 		      struct bnxt_ulp_ops *ulp_ops,
114 		      void *handle)
115 {
116 	struct net_device *dev = edev->net;
117 	struct bnxt *bp = netdev_priv(dev);
118 	unsigned int max_stat_ctxs;
119 	struct bnxt_ulp *ulp;
120 	int rc = 0;
121 
122 	netdev_lock(dev);
123 	mutex_lock(&edev->en_dev_lock);
124 	if (!bp->irq_tbl) {
125 		rc = -ENODEV;
126 		goto exit;
127 	}
128 	max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
129 	if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS ||
130 	    bp->cp_nr_rings == max_stat_ctxs) {
131 		rc = -ENOMEM;
132 		goto exit;
133 	}
134 
135 	ulp = edev->ulp_tbl;
136 	ulp->handle = handle;
137 	rcu_assign_pointer(ulp->ulp_ops, ulp_ops);
138 
139 	if (test_bit(BNXT_STATE_OPEN, &bp->state))
140 		bnxt_hwrm_vnic_cfg(bp, &bp->vnic_info[BNXT_VNIC_DEFAULT]);
141 
142 	edev->ulp_tbl->msix_requested = bnxt_get_ulp_msix_num(bp);
143 
144 	bnxt_fill_msix_vecs(bp, bp->edev->msix_entries);
145 exit:
146 	mutex_unlock(&edev->en_dev_lock);
147 	netdev_unlock(dev);
148 	return rc;
149 }
150 EXPORT_SYMBOL(bnxt_register_dev);
151 
bnxt_unregister_dev(struct bnxt_en_dev * edev)152 void bnxt_unregister_dev(struct bnxt_en_dev *edev)
153 {
154 	struct net_device *dev = edev->net;
155 	struct bnxt *bp = netdev_priv(dev);
156 	struct bnxt_ulp *ulp;
157 
158 	ulp = edev->ulp_tbl;
159 	netdev_lock(dev);
160 	mutex_lock(&edev->en_dev_lock);
161 	edev->ulp_tbl->msix_requested = 0;
162 
163 	if (ulp->max_async_event_id)
164 		bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true);
165 
166 	RCU_INIT_POINTER(ulp->ulp_ops, NULL);
167 	synchronize_rcu();
168 	ulp->max_async_event_id = 0;
169 	ulp->async_events_bmap = NULL;
170 	mutex_unlock(&edev->en_dev_lock);
171 	netdev_unlock(dev);
172 	return;
173 }
174 EXPORT_SYMBOL(bnxt_unregister_dev);
175 
bnxt_set_dflt_ulp_msix(struct bnxt * bp)176 static int bnxt_set_dflt_ulp_msix(struct bnxt *bp)
177 {
178 	int roce_msix = BNXT_MAX_ROCE_MSIX;
179 
180 	if (BNXT_VF(bp))
181 		roce_msix = BNXT_MAX_ROCE_MSIX_VF;
182 	else if (bp->port_partition_type)
183 		roce_msix = BNXT_MAX_ROCE_MSIX_NPAR_PF;
184 
185 	/* NQ MSIX vectors should match the number of CPUs plus 1 more for
186 	 * the CREQ MSIX, up to the default.
187 	 */
188 	return min_t(int, roce_msix, num_online_cpus() + 1);
189 }
190 
bnxt_send_msg(struct bnxt_en_dev * edev,struct bnxt_fw_msg * fw_msg)191 int bnxt_send_msg(struct bnxt_en_dev *edev,
192 			 struct bnxt_fw_msg *fw_msg)
193 {
194 	struct net_device *dev = edev->net;
195 	struct bnxt *bp = netdev_priv(dev);
196 	struct output *resp;
197 	struct input *req;
198 	u32 resp_len;
199 	int rc;
200 
201 	if (bp->fw_reset_state)
202 		return -EBUSY;
203 
204 	rc = hwrm_req_init(bp, req, 0 /* don't care */);
205 	if (rc)
206 		return rc;
207 
208 	rc = hwrm_req_replace(bp, req, fw_msg->msg, fw_msg->msg_len);
209 	if (rc)
210 		goto drop_req;
211 
212 	hwrm_req_timeout(bp, req, fw_msg->timeout);
213 	resp = hwrm_req_hold(bp, req);
214 	rc = hwrm_req_send(bp, req);
215 	resp_len = le16_to_cpu(resp->resp_len);
216 	if (resp_len) {
217 		if (fw_msg->resp_max_len < resp_len)
218 			resp_len = fw_msg->resp_max_len;
219 
220 		memcpy(fw_msg->resp, resp, resp_len);
221 	}
222 drop_req:
223 	hwrm_req_drop(bp, req);
224 	return rc;
225 }
226 EXPORT_SYMBOL(bnxt_send_msg);
227 
bnxt_ulp_stop(struct bnxt * bp)228 void bnxt_ulp_stop(struct bnxt *bp)
229 {
230 	struct bnxt_aux_priv *aux_priv = bp->aux_priv;
231 	struct bnxt_en_dev *edev = bp->edev;
232 
233 	if (!edev)
234 		return;
235 
236 	mutex_lock(&edev->en_dev_lock);
237 	if (!bnxt_ulp_registered(edev) ||
238 	    (edev->flags & BNXT_EN_FLAG_ULP_STOPPED))
239 		goto ulp_stop_exit;
240 
241 	edev->flags |= BNXT_EN_FLAG_ULP_STOPPED;
242 	if (aux_priv) {
243 		struct auxiliary_device *adev;
244 
245 		adev = &aux_priv->aux_dev;
246 		if (adev->dev.driver) {
247 			const struct auxiliary_driver *adrv;
248 			pm_message_t pm = {};
249 
250 			adrv = to_auxiliary_drv(adev->dev.driver);
251 			edev->en_state = bp->state;
252 			adrv->suspend(adev, pm);
253 		}
254 	}
255 ulp_stop_exit:
256 	mutex_unlock(&edev->en_dev_lock);
257 }
258 
bnxt_ulp_start(struct bnxt * bp,int err)259 void bnxt_ulp_start(struct bnxt *bp, int err)
260 {
261 	struct bnxt_aux_priv *aux_priv = bp->aux_priv;
262 	struct bnxt_en_dev *edev = bp->edev;
263 
264 	if (!edev || err)
265 		return;
266 
267 	mutex_lock(&edev->en_dev_lock);
268 	if (!bnxt_ulp_registered(edev) ||
269 	    !(edev->flags & BNXT_EN_FLAG_ULP_STOPPED))
270 		goto ulp_start_exit;
271 
272 	if (edev->ulp_tbl->msix_requested)
273 		bnxt_fill_msix_vecs(bp, edev->msix_entries);
274 
275 	if (aux_priv) {
276 		struct auxiliary_device *adev;
277 
278 		adev = &aux_priv->aux_dev;
279 		if (adev->dev.driver) {
280 			const struct auxiliary_driver *adrv;
281 
282 			adrv = to_auxiliary_drv(adev->dev.driver);
283 			edev->en_state = bp->state;
284 			adrv->resume(adev);
285 		}
286 	}
287 ulp_start_exit:
288 	edev->flags &= ~BNXT_EN_FLAG_ULP_STOPPED;
289 	mutex_unlock(&edev->en_dev_lock);
290 }
291 
bnxt_ulp_irq_stop(struct bnxt * bp)292 void bnxt_ulp_irq_stop(struct bnxt *bp)
293 {
294 	struct bnxt_en_dev *edev = bp->edev;
295 	struct bnxt_ulp_ops *ops;
296 	bool reset = false;
297 
298 	if (!edev)
299 		return;
300 
301 	if (bnxt_ulp_registered(bp->edev)) {
302 		struct bnxt_ulp *ulp = edev->ulp_tbl;
303 
304 		if (!ulp->msix_requested)
305 			return;
306 
307 		ops = netdev_lock_dereference(ulp->ulp_ops, bp->dev);
308 		if (!ops || !ops->ulp_irq_stop)
309 			return;
310 		if (test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
311 			reset = true;
312 		ops->ulp_irq_stop(ulp->handle, reset);
313 	}
314 }
315 
bnxt_ulp_irq_restart(struct bnxt * bp,int err)316 void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
317 {
318 	struct bnxt_en_dev *edev = bp->edev;
319 	struct bnxt_ulp_ops *ops;
320 
321 	if (!edev)
322 		return;
323 
324 	if (bnxt_ulp_registered(bp->edev)) {
325 		struct bnxt_ulp *ulp = edev->ulp_tbl;
326 		struct bnxt_msix_entry *ent = NULL;
327 
328 		if (!ulp->msix_requested)
329 			return;
330 
331 		ops = netdev_lock_dereference(ulp->ulp_ops, bp->dev);
332 		if (!ops || !ops->ulp_irq_restart)
333 			return;
334 
335 		if (!err) {
336 			ent = kzalloc_objs(*ent, ulp->msix_requested);
337 			if (!ent)
338 				return;
339 			bnxt_fill_msix_vecs(bp, ent);
340 		}
341 		ops->ulp_irq_restart(ulp->handle, ent);
342 		kfree(ent);
343 	}
344 }
345 
bnxt_ulp_async_events(struct bnxt * bp,struct hwrm_async_event_cmpl * cmpl)346 void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl)
347 {
348 	u16 event_id = le16_to_cpu(cmpl->event_id);
349 	struct bnxt_en_dev *edev = bp->edev;
350 	struct bnxt_ulp_ops *ops;
351 	struct bnxt_ulp *ulp;
352 
353 	if (!bnxt_ulp_registered(edev))
354 		return;
355 	ulp = edev->ulp_tbl;
356 
357 	rcu_read_lock();
358 
359 	ops = rcu_dereference(ulp->ulp_ops);
360 	if (!ops || !ops->ulp_async_notifier)
361 		goto exit_unlock_rcu;
362 	if (!ulp->async_events_bmap || event_id > ulp->max_async_event_id)
363 		goto exit_unlock_rcu;
364 
365 	/* Read max_async_event_id first before testing the bitmap. */
366 	smp_rmb();
367 
368 	if (test_bit(event_id, ulp->async_events_bmap))
369 		ops->ulp_async_notifier(ulp->handle, cmpl);
370 exit_unlock_rcu:
371 	rcu_read_unlock();
372 }
373 
bnxt_register_async_events(struct bnxt_en_dev * edev,unsigned long * events_bmap,u16 max_id)374 void bnxt_register_async_events(struct bnxt_en_dev *edev,
375 				unsigned long *events_bmap, u16 max_id)
376 {
377 	struct net_device *dev = edev->net;
378 	struct bnxt *bp = netdev_priv(dev);
379 	struct bnxt_ulp *ulp;
380 
381 	ulp = edev->ulp_tbl;
382 	ulp->async_events_bmap = events_bmap;
383 	/* Make sure bnxt_ulp_async_events() sees this order */
384 	smp_wmb();
385 	ulp->max_async_event_id = max_id;
386 	bnxt_hwrm_func_drv_rgtr(bp, events_bmap, max_id + 1, true);
387 }
388 EXPORT_SYMBOL(bnxt_register_async_events);
389 
bnxt_rdma_aux_device_uninit(struct bnxt * bp)390 void bnxt_rdma_aux_device_uninit(struct bnxt *bp)
391 {
392 	struct bnxt_aux_priv *aux_priv;
393 	struct auxiliary_device *adev;
394 
395 	/* Skip if no auxiliary device init was done. */
396 	if (!bp->aux_priv)
397 		return;
398 
399 	aux_priv = bp->aux_priv;
400 	adev = &aux_priv->aux_dev;
401 	auxiliary_device_uninit(adev);
402 }
403 
bnxt_aux_dev_release(struct device * dev)404 static void bnxt_aux_dev_release(struct device *dev)
405 {
406 	struct bnxt_aux_priv *aux_priv =
407 		container_of(dev, struct bnxt_aux_priv, aux_dev.dev);
408 	struct bnxt *bp = netdev_priv(aux_priv->edev->net);
409 
410 	ida_free(&bnxt_aux_dev_ids, aux_priv->id);
411 	kfree(aux_priv->edev->ulp_tbl);
412 	bp->edev = NULL;
413 	kfree(aux_priv->edev);
414 	kfree(aux_priv);
415 	bp->aux_priv = NULL;
416 }
417 
bnxt_rdma_aux_device_del(struct bnxt * bp)418 void bnxt_rdma_aux_device_del(struct bnxt *bp)
419 {
420 	if (!bp->edev)
421 		return;
422 
423 	auxiliary_device_delete(&bp->aux_priv->aux_dev);
424 }
425 
bnxt_set_edev_info(struct bnxt_en_dev * edev,struct bnxt * bp)426 static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp)
427 {
428 	edev->net = bp->dev;
429 	edev->pdev = bp->pdev;
430 	edev->l2_db_size = bp->db_size;
431 	edev->l2_db_size_nc = bp->db_size;
432 	edev->l2_db_offset = bp->db_offset;
433 	mutex_init(&edev->en_dev_lock);
434 
435 	if (bp->flags & BNXT_FLAG_ROCEV1_CAP)
436 		edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP;
437 	if (bp->flags & BNXT_FLAG_ROCEV2_CAP)
438 		edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP;
439 	if (bp->flags & BNXT_FLAG_VF)
440 		edev->flags |= BNXT_EN_FLAG_VF;
441 	if (BNXT_ROCE_VF_RESC_CAP(bp))
442 		edev->flags |= BNXT_EN_FLAG_ROCE_VF_RES_MGMT;
443 	if (BNXT_SW_RES_LMT(bp))
444 		edev->flags |= BNXT_EN_FLAG_SW_RES_LMT;
445 
446 	edev->chip_num = bp->chip_num;
447 	edev->hw_ring_stats_size = bp->hw_ring_stats_size;
448 	edev->pf_port_id = bp->pf.port_id;
449 	edev->en_state = bp->state;
450 	edev->bar0 = bp->bar0;
451 }
452 
bnxt_rdma_aux_device_add(struct bnxt * bp)453 void bnxt_rdma_aux_device_add(struct bnxt *bp)
454 {
455 	struct auxiliary_device *aux_dev;
456 	int rc;
457 
458 	if (!bp->edev)
459 		return;
460 
461 	aux_dev = &bp->aux_priv->aux_dev;
462 	rc = auxiliary_device_add(aux_dev);
463 	if (rc) {
464 		netdev_warn(bp->dev, "Failed to add auxiliary device for ROCE\n");
465 		auxiliary_device_uninit(aux_dev);
466 		bp->flags &= ~BNXT_FLAG_ROCE_CAP;
467 	}
468 }
469 
bnxt_rdma_aux_device_init(struct bnxt * bp)470 void bnxt_rdma_aux_device_init(struct bnxt *bp)
471 {
472 	struct auxiliary_device *aux_dev;
473 	struct bnxt_aux_priv *aux_priv;
474 	struct bnxt_en_dev *edev;
475 	struct bnxt_ulp *ulp;
476 	int rc;
477 
478 	if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
479 		return;
480 
481 	aux_priv = kzalloc_obj(*bp->aux_priv);
482 	if (!aux_priv)
483 		goto exit;
484 
485 	aux_priv->id = ida_alloc(&bnxt_aux_dev_ids, GFP_KERNEL);
486 	if (aux_priv->id < 0) {
487 		netdev_warn(bp->dev,
488 			    "ida alloc failed for ROCE auxiliary device\n");
489 		kfree(aux_priv);
490 		goto exit;
491 	}
492 
493 	aux_dev = &aux_priv->aux_dev;
494 	aux_dev->id = aux_priv->id;
495 	aux_dev->name = "rdma";
496 	aux_dev->dev.parent = &bp->pdev->dev;
497 	aux_dev->dev.release = bnxt_aux_dev_release;
498 
499 	rc = auxiliary_device_init(aux_dev);
500 	if (rc) {
501 		ida_free(&bnxt_aux_dev_ids, aux_priv->id);
502 		kfree(aux_priv);
503 		goto exit;
504 	}
505 	bp->aux_priv = aux_priv;
506 
507 	/* From this point, all cleanup will happen via the .release callback &
508 	 * any error unwinding will need to include a call to
509 	 * auxiliary_device_uninit.
510 	 */
511 	edev = kzalloc_obj(*edev);
512 	if (!edev)
513 		goto aux_dev_uninit;
514 
515 	aux_priv->edev = edev;
516 
517 	ulp = kzalloc_obj(*ulp);
518 	if (!ulp)
519 		goto aux_dev_uninit;
520 
521 	edev->ulp_tbl = ulp;
522 	bp->edev = edev;
523 	bnxt_set_edev_info(edev, bp);
524 	bp->ulp_num_msix_want = bnxt_set_dflt_ulp_msix(bp);
525 
526 	return;
527 
528 aux_dev_uninit:
529 	auxiliary_device_uninit(aux_dev);
530 exit:
531 	bp->flags &= ~BNXT_FLAG_ROCE_CAP;
532 }
533