1 /* Broadcom NetXtreme-C/E network driver.
2 *
3 * Copyright (c) 2016-2018 Broadcom Limited
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 */
9
10 #include <linux/module.h>
11
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/interrupt.h>
15 #include <linux/pci.h>
16 #include <linux/netdevice.h>
17 #include <linux/rtnetlink.h>
18 #include <linux/bitops.h>
19 #include <linux/irq.h>
20 #include <asm/byteorder.h>
21 #include <linux/bitmap.h>
22 #include <linux/auxiliary_bus.h>
23
24 #include "bnxt_hsi.h"
25 #include "bnxt.h"
26 #include "bnxt_hwrm.h"
27 #include "bnxt_ulp.h"
28
29 static DEFINE_IDA(bnxt_aux_dev_ids);
30
bnxt_fill_msix_vecs(struct bnxt * bp,struct bnxt_msix_entry * ent)31 static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
32 {
33 struct bnxt_en_dev *edev = bp->edev;
34 int num_msix, i;
35
36 if (!edev->ulp_tbl->msix_requested) {
37 netdev_warn(bp->dev, "Requested MSI-X vectors insufficient\n");
38 return;
39 }
40 num_msix = edev->ulp_tbl->msix_requested;
41 for (i = 0; i < num_msix; i++) {
42 ent[i].vector = bp->irq_tbl[i].vector;
43 ent[i].ring_idx = i;
44 if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
45 ent[i].db_offset = bp->db_offset;
46 else
47 ent[i].db_offset = i * 0x80;
48 }
49 }
50
bnxt_get_ulp_msix_num(struct bnxt * bp)51 int bnxt_get_ulp_msix_num(struct bnxt *bp)
52 {
53 if (bp->edev)
54 return bp->edev->ulp_num_msix_vec;
55 return 0;
56 }
57
bnxt_set_ulp_msix_num(struct bnxt * bp,int num)58 void bnxt_set_ulp_msix_num(struct bnxt *bp, int num)
59 {
60 if (bp->edev)
61 bp->edev->ulp_num_msix_vec = num;
62 }
63
bnxt_get_ulp_msix_num_in_use(struct bnxt * bp)64 int bnxt_get_ulp_msix_num_in_use(struct bnxt *bp)
65 {
66 if (bnxt_ulp_registered(bp->edev))
67 return bp->edev->ulp_num_msix_vec;
68 return 0;
69 }
70
bnxt_get_ulp_stat_ctxs(struct bnxt * bp)71 int bnxt_get_ulp_stat_ctxs(struct bnxt *bp)
72 {
73 if (bp->edev)
74 return bp->edev->ulp_num_ctxs;
75 return 0;
76 }
77
bnxt_set_ulp_stat_ctxs(struct bnxt * bp,int num_ulp_ctx)78 void bnxt_set_ulp_stat_ctxs(struct bnxt *bp, int num_ulp_ctx)
79 {
80 if (bp->edev)
81 bp->edev->ulp_num_ctxs = num_ulp_ctx;
82 }
83
bnxt_get_ulp_stat_ctxs_in_use(struct bnxt * bp)84 int bnxt_get_ulp_stat_ctxs_in_use(struct bnxt *bp)
85 {
86 if (bnxt_ulp_registered(bp->edev))
87 return bp->edev->ulp_num_ctxs;
88 return 0;
89 }
90
bnxt_set_dflt_ulp_stat_ctxs(struct bnxt * bp)91 void bnxt_set_dflt_ulp_stat_ctxs(struct bnxt *bp)
92 {
93 if (bp->edev) {
94 bp->edev->ulp_num_ctxs = BNXT_MIN_ROCE_STAT_CTXS;
95 /* Reserve one additional stat_ctx for PF0 (except
96 * on 1-port NICs) as it also creates one stat_ctx
97 * for PF1 in case of RoCE bonding.
98 */
99 if (BNXT_PF(bp) && !bp->pf.port_id &&
100 bp->port_count > 1)
101 bp->edev->ulp_num_ctxs++;
102 }
103 }
104
bnxt_register_dev(struct bnxt_en_dev * edev,struct bnxt_ulp_ops * ulp_ops,void * handle)105 int bnxt_register_dev(struct bnxt_en_dev *edev,
106 struct bnxt_ulp_ops *ulp_ops,
107 void *handle)
108 {
109 struct net_device *dev = edev->net;
110 struct bnxt *bp = netdev_priv(dev);
111 unsigned int max_stat_ctxs;
112 struct bnxt_ulp *ulp;
113 int rc = 0;
114
115 rtnl_lock();
116 mutex_lock(&edev->en_dev_lock);
117 if (!bp->irq_tbl) {
118 rc = -ENODEV;
119 goto exit;
120 }
121 max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
122 if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS ||
123 bp->cp_nr_rings == max_stat_ctxs) {
124 rc = -ENOMEM;
125 goto exit;
126 }
127
128 ulp = edev->ulp_tbl;
129 ulp->handle = handle;
130 rcu_assign_pointer(ulp->ulp_ops, ulp_ops);
131
132 if (test_bit(BNXT_STATE_OPEN, &bp->state))
133 bnxt_hwrm_vnic_cfg(bp, &bp->vnic_info[BNXT_VNIC_DEFAULT]);
134
135 edev->ulp_tbl->msix_requested = bnxt_get_ulp_msix_num(bp);
136
137 bnxt_fill_msix_vecs(bp, bp->edev->msix_entries);
138 edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
139 exit:
140 mutex_unlock(&edev->en_dev_lock);
141 rtnl_unlock();
142 return rc;
143 }
144 EXPORT_SYMBOL(bnxt_register_dev);
145
bnxt_unregister_dev(struct bnxt_en_dev * edev)146 void bnxt_unregister_dev(struct bnxt_en_dev *edev)
147 {
148 struct net_device *dev = edev->net;
149 struct bnxt *bp = netdev_priv(dev);
150 struct bnxt_ulp *ulp;
151 int i = 0;
152
153 ulp = edev->ulp_tbl;
154 rtnl_lock();
155 mutex_lock(&edev->en_dev_lock);
156 if (ulp->msix_requested)
157 edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
158 edev->ulp_tbl->msix_requested = 0;
159
160 if (ulp->max_async_event_id)
161 bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true);
162
163 RCU_INIT_POINTER(ulp->ulp_ops, NULL);
164 synchronize_rcu();
165 ulp->max_async_event_id = 0;
166 ulp->async_events_bmap = NULL;
167 while (atomic_read(&ulp->ref_count) != 0 && i < 10) {
168 msleep(100);
169 i++;
170 }
171 mutex_unlock(&edev->en_dev_lock);
172 rtnl_unlock();
173 return;
174 }
175 EXPORT_SYMBOL(bnxt_unregister_dev);
176
bnxt_set_dflt_ulp_msix(struct bnxt * bp)177 static int bnxt_set_dflt_ulp_msix(struct bnxt *bp)
178 {
179 int roce_msix = BNXT_MAX_ROCE_MSIX;
180
181 if (BNXT_VF(bp))
182 roce_msix = BNXT_MAX_ROCE_MSIX_VF;
183 else if (bp->port_partition_type)
184 roce_msix = BNXT_MAX_ROCE_MSIX_NPAR_PF;
185
186 /* NQ MSIX vectors should match the number of CPUs plus 1 more for
187 * the CREQ MSIX, up to the default.
188 */
189 return min_t(int, roce_msix, num_online_cpus() + 1);
190 }
191
bnxt_send_msg(struct bnxt_en_dev * edev,struct bnxt_fw_msg * fw_msg)192 int bnxt_send_msg(struct bnxt_en_dev *edev,
193 struct bnxt_fw_msg *fw_msg)
194 {
195 struct net_device *dev = edev->net;
196 struct bnxt *bp = netdev_priv(dev);
197 struct output *resp;
198 struct input *req;
199 u32 resp_len;
200 int rc;
201
202 if (bp->fw_reset_state)
203 return -EBUSY;
204
205 rc = hwrm_req_init(bp, req, 0 /* don't care */);
206 if (rc)
207 return rc;
208
209 rc = hwrm_req_replace(bp, req, fw_msg->msg, fw_msg->msg_len);
210 if (rc)
211 goto drop_req;
212
213 hwrm_req_timeout(bp, req, fw_msg->timeout);
214 resp = hwrm_req_hold(bp, req);
215 rc = hwrm_req_send(bp, req);
216 resp_len = le16_to_cpu(resp->resp_len);
217 if (resp_len) {
218 if (fw_msg->resp_max_len < resp_len)
219 resp_len = fw_msg->resp_max_len;
220
221 memcpy(fw_msg->resp, resp, resp_len);
222 }
223 drop_req:
224 hwrm_req_drop(bp, req);
225 return rc;
226 }
227 EXPORT_SYMBOL(bnxt_send_msg);
228
bnxt_ulp_stop(struct bnxt * bp)229 void bnxt_ulp_stop(struct bnxt *bp)
230 {
231 struct bnxt_aux_priv *aux_priv = bp->aux_priv;
232 struct bnxt_en_dev *edev = bp->edev;
233
234 if (!edev)
235 return;
236
237 mutex_lock(&edev->en_dev_lock);
238 if (!bnxt_ulp_registered(edev)) {
239 mutex_unlock(&edev->en_dev_lock);
240 return;
241 }
242
243 edev->flags |= BNXT_EN_FLAG_ULP_STOPPED;
244 if (aux_priv) {
245 struct auxiliary_device *adev;
246
247 adev = &aux_priv->aux_dev;
248 if (adev->dev.driver) {
249 const struct auxiliary_driver *adrv;
250 pm_message_t pm = {};
251
252 adrv = to_auxiliary_drv(adev->dev.driver);
253 edev->en_state = bp->state;
254 adrv->suspend(adev, pm);
255 }
256 }
257 mutex_unlock(&edev->en_dev_lock);
258 }
259
bnxt_ulp_start(struct bnxt * bp,int err)260 void bnxt_ulp_start(struct bnxt *bp, int err)
261 {
262 struct bnxt_aux_priv *aux_priv = bp->aux_priv;
263 struct bnxt_en_dev *edev = bp->edev;
264
265 if (!edev)
266 return;
267
268 edev->flags &= ~BNXT_EN_FLAG_ULP_STOPPED;
269
270 if (err)
271 return;
272
273 mutex_lock(&edev->en_dev_lock);
274 if (!bnxt_ulp_registered(edev)) {
275 mutex_unlock(&edev->en_dev_lock);
276 return;
277 }
278
279 if (edev->ulp_tbl->msix_requested)
280 bnxt_fill_msix_vecs(bp, edev->msix_entries);
281
282 if (aux_priv) {
283 struct auxiliary_device *adev;
284
285 adev = &aux_priv->aux_dev;
286 if (adev->dev.driver) {
287 const struct auxiliary_driver *adrv;
288
289 adrv = to_auxiliary_drv(adev->dev.driver);
290 edev->en_state = bp->state;
291 adrv->resume(adev);
292 }
293 }
294 mutex_unlock(&edev->en_dev_lock);
295 }
296
bnxt_ulp_irq_stop(struct bnxt * bp)297 void bnxt_ulp_irq_stop(struct bnxt *bp)
298 {
299 struct bnxt_en_dev *edev = bp->edev;
300 struct bnxt_ulp_ops *ops;
301 bool reset = false;
302
303 if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
304 return;
305
306 if (bnxt_ulp_registered(bp->edev)) {
307 struct bnxt_ulp *ulp = edev->ulp_tbl;
308
309 if (!ulp->msix_requested)
310 return;
311
312 ops = rtnl_dereference(ulp->ulp_ops);
313 if (!ops || !ops->ulp_irq_stop)
314 return;
315 if (test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
316 reset = true;
317 ops->ulp_irq_stop(ulp->handle, reset);
318 }
319 }
320
bnxt_ulp_irq_restart(struct bnxt * bp,int err)321 void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
322 {
323 struct bnxt_en_dev *edev = bp->edev;
324 struct bnxt_ulp_ops *ops;
325
326 if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
327 return;
328
329 if (bnxt_ulp_registered(bp->edev)) {
330 struct bnxt_ulp *ulp = edev->ulp_tbl;
331 struct bnxt_msix_entry *ent = NULL;
332
333 if (!ulp->msix_requested)
334 return;
335
336 ops = rtnl_dereference(ulp->ulp_ops);
337 if (!ops || !ops->ulp_irq_restart)
338 return;
339
340 if (!err) {
341 ent = kcalloc(ulp->msix_requested, sizeof(*ent),
342 GFP_KERNEL);
343 if (!ent)
344 return;
345 bnxt_fill_msix_vecs(bp, ent);
346 }
347 ops->ulp_irq_restart(ulp->handle, ent);
348 kfree(ent);
349 }
350 }
351
bnxt_ulp_async_events(struct bnxt * bp,struct hwrm_async_event_cmpl * cmpl)352 void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl)
353 {
354 u16 event_id = le16_to_cpu(cmpl->event_id);
355 struct bnxt_en_dev *edev = bp->edev;
356 struct bnxt_ulp_ops *ops;
357 struct bnxt_ulp *ulp;
358
359 if (!bnxt_ulp_registered(edev))
360 return;
361 ulp = edev->ulp_tbl;
362
363 rcu_read_lock();
364
365 ops = rcu_dereference(ulp->ulp_ops);
366 if (!ops || !ops->ulp_async_notifier)
367 goto exit_unlock_rcu;
368 if (!ulp->async_events_bmap || event_id > ulp->max_async_event_id)
369 goto exit_unlock_rcu;
370
371 /* Read max_async_event_id first before testing the bitmap. */
372 smp_rmb();
373
374 if (test_bit(event_id, ulp->async_events_bmap))
375 ops->ulp_async_notifier(ulp->handle, cmpl);
376 exit_unlock_rcu:
377 rcu_read_unlock();
378 }
379
bnxt_register_async_events(struct bnxt_en_dev * edev,unsigned long * events_bmap,u16 max_id)380 void bnxt_register_async_events(struct bnxt_en_dev *edev,
381 unsigned long *events_bmap, u16 max_id)
382 {
383 struct net_device *dev = edev->net;
384 struct bnxt *bp = netdev_priv(dev);
385 struct bnxt_ulp *ulp;
386
387 ulp = edev->ulp_tbl;
388 ulp->async_events_bmap = events_bmap;
389 /* Make sure bnxt_ulp_async_events() sees this order */
390 smp_wmb();
391 ulp->max_async_event_id = max_id;
392 bnxt_hwrm_func_drv_rgtr(bp, events_bmap, max_id + 1, true);
393 }
394 EXPORT_SYMBOL(bnxt_register_async_events);
395
bnxt_rdma_aux_device_uninit(struct bnxt * bp)396 void bnxt_rdma_aux_device_uninit(struct bnxt *bp)
397 {
398 struct bnxt_aux_priv *aux_priv;
399 struct auxiliary_device *adev;
400
401 /* Skip if no auxiliary device init was done. */
402 if (!bp->aux_priv)
403 return;
404
405 aux_priv = bp->aux_priv;
406 adev = &aux_priv->aux_dev;
407 auxiliary_device_uninit(adev);
408 }
409
bnxt_aux_dev_release(struct device * dev)410 static void bnxt_aux_dev_release(struct device *dev)
411 {
412 struct bnxt_aux_priv *aux_priv =
413 container_of(dev, struct bnxt_aux_priv, aux_dev.dev);
414 struct bnxt *bp = netdev_priv(aux_priv->edev->net);
415
416 ida_free(&bnxt_aux_dev_ids, aux_priv->id);
417 kfree(aux_priv->edev->ulp_tbl);
418 bp->edev = NULL;
419 kfree(aux_priv->edev);
420 kfree(aux_priv);
421 bp->aux_priv = NULL;
422 }
423
bnxt_rdma_aux_device_del(struct bnxt * bp)424 void bnxt_rdma_aux_device_del(struct bnxt *bp)
425 {
426 if (!bp->edev)
427 return;
428
429 auxiliary_device_delete(&bp->aux_priv->aux_dev);
430 }
431
bnxt_set_edev_info(struct bnxt_en_dev * edev,struct bnxt * bp)432 static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp)
433 {
434 edev->net = bp->dev;
435 edev->pdev = bp->pdev;
436 edev->l2_db_size = bp->db_size;
437 edev->l2_db_size_nc = bp->db_size;
438 edev->l2_db_offset = bp->db_offset;
439 mutex_init(&edev->en_dev_lock);
440
441 if (bp->flags & BNXT_FLAG_ROCEV1_CAP)
442 edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP;
443 if (bp->flags & BNXT_FLAG_ROCEV2_CAP)
444 edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP;
445 if (bp->flags & BNXT_FLAG_VF)
446 edev->flags |= BNXT_EN_FLAG_VF;
447 if (BNXT_ROCE_VF_RESC_CAP(bp))
448 edev->flags |= BNXT_EN_FLAG_ROCE_VF_RES_MGMT;
449 if (BNXT_SW_RES_LMT(bp))
450 edev->flags |= BNXT_EN_FLAG_SW_RES_LMT;
451
452 edev->chip_num = bp->chip_num;
453 edev->hw_ring_stats_size = bp->hw_ring_stats_size;
454 edev->pf_port_id = bp->pf.port_id;
455 edev->en_state = bp->state;
456 edev->bar0 = bp->bar0;
457 }
458
bnxt_rdma_aux_device_add(struct bnxt * bp)459 void bnxt_rdma_aux_device_add(struct bnxt *bp)
460 {
461 struct auxiliary_device *aux_dev;
462 int rc;
463
464 if (!bp->edev)
465 return;
466
467 aux_dev = &bp->aux_priv->aux_dev;
468 rc = auxiliary_device_add(aux_dev);
469 if (rc) {
470 netdev_warn(bp->dev, "Failed to add auxiliary device for ROCE\n");
471 auxiliary_device_uninit(aux_dev);
472 bp->flags &= ~BNXT_FLAG_ROCE_CAP;
473 }
474 }
475
bnxt_rdma_aux_device_init(struct bnxt * bp)476 void bnxt_rdma_aux_device_init(struct bnxt *bp)
477 {
478 struct auxiliary_device *aux_dev;
479 struct bnxt_aux_priv *aux_priv;
480 struct bnxt_en_dev *edev;
481 struct bnxt_ulp *ulp;
482 int rc;
483
484 if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
485 return;
486
487 aux_priv = kzalloc(sizeof(*bp->aux_priv), GFP_KERNEL);
488 if (!aux_priv)
489 goto exit;
490
491 aux_priv->id = ida_alloc(&bnxt_aux_dev_ids, GFP_KERNEL);
492 if (aux_priv->id < 0) {
493 netdev_warn(bp->dev,
494 "ida alloc failed for ROCE auxiliary device\n");
495 kfree(aux_priv);
496 goto exit;
497 }
498
499 aux_dev = &aux_priv->aux_dev;
500 aux_dev->id = aux_priv->id;
501 aux_dev->name = "rdma";
502 aux_dev->dev.parent = &bp->pdev->dev;
503 aux_dev->dev.release = bnxt_aux_dev_release;
504
505 rc = auxiliary_device_init(aux_dev);
506 if (rc) {
507 ida_free(&bnxt_aux_dev_ids, aux_priv->id);
508 kfree(aux_priv);
509 goto exit;
510 }
511 bp->aux_priv = aux_priv;
512
513 /* From this point, all cleanup will happen via the .release callback &
514 * any error unwinding will need to include a call to
515 * auxiliary_device_uninit.
516 */
517 edev = kzalloc(sizeof(*edev), GFP_KERNEL);
518 if (!edev)
519 goto aux_dev_uninit;
520
521 aux_priv->edev = edev;
522
523 ulp = kzalloc(sizeof(*ulp), GFP_KERNEL);
524 if (!ulp)
525 goto aux_dev_uninit;
526
527 edev->ulp_tbl = ulp;
528 bp->edev = edev;
529 bnxt_set_edev_info(edev, bp);
530 bp->ulp_num_msix_want = bnxt_set_dflt_ulp_msix(bp);
531
532 return;
533
534 aux_dev_uninit:
535 auxiliary_device_uninit(aux_dev);
536 exit:
537 bp->flags &= ~BNXT_FLAG_ROCE_CAP;
538 }
539