1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2
3 #include <linux/bpf.h>
4 #include <linux/crash_dump.h>
5 #include <linux/etherdevice.h>
6 #include <linux/ethtool.h>
7 #include <linux/filter.h>
8 #include <linux/idr.h>
9 #include <linux/if_vlan.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/rtnetlink.h>
14 #include <linux/inetdevice.h>
15
16 #include "funeth.h"
17 #include "funeth_devlink.h"
18 #include "funeth_ktls.h"
19 #include "fun_port.h"
20 #include "fun_queue.h"
21 #include "funeth_txrx.h"
22
23 #define ADMIN_SQ_DEPTH 32
24 #define ADMIN_CQ_DEPTH 64
25 #define ADMIN_RQ_DEPTH 16
26
27 /* Default number of Tx/Rx queues. */
28 #define FUN_DFLT_QUEUES 16U
29
30 enum {
31 FUN_SERV_RES_CHANGE = FUN_SERV_FIRST_AVAIL,
32 FUN_SERV_DEL_PORTS,
33 };
34
35 static const struct pci_device_id funeth_id_table[] = {
36 { PCI_VDEVICE(FUNGIBLE, 0x0101) },
37 { PCI_VDEVICE(FUNGIBLE, 0x0181) },
38 { 0, }
39 };
40
41 /* Issue a port write admin command with @n key/value pairs. */
fun_port_write_cmds(struct funeth_priv * fp,unsigned int n,const int * keys,const u64 * data)42 static int fun_port_write_cmds(struct funeth_priv *fp, unsigned int n,
43 const int *keys, const u64 *data)
44 {
45 unsigned int cmd_size, i;
46 union {
47 struct fun_admin_port_req req;
48 struct fun_admin_port_rsp rsp;
49 u8 v[ADMIN_SQE_SIZE];
50 } cmd;
51
52 cmd_size = offsetof(struct fun_admin_port_req, u.write.write48) +
53 n * sizeof(struct fun_admin_write48_req);
54 if (cmd_size > sizeof(cmd) || cmd_size > ADMIN_RSP_MAX_LEN)
55 return -EINVAL;
56
57 cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_PORT,
58 cmd_size);
59 cmd.req.u.write =
60 FUN_ADMIN_PORT_WRITE_REQ_INIT(FUN_ADMIN_SUBOP_WRITE, 0,
61 fp->netdev->dev_port);
62 for (i = 0; i < n; i++)
63 cmd.req.u.write.write48[i] =
64 FUN_ADMIN_WRITE48_REQ_INIT(keys[i], data[i]);
65
66 return fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common,
67 &cmd.rsp, cmd_size, 0);
68 }
69
fun_port_write_cmd(struct funeth_priv * fp,int key,u64 data)70 int fun_port_write_cmd(struct funeth_priv *fp, int key, u64 data)
71 {
72 return fun_port_write_cmds(fp, 1, &key, &data);
73 }
74
75 /* Issue a port read admin command with @n key/value pairs. */
fun_port_read_cmds(struct funeth_priv * fp,unsigned int n,const int * keys,u64 * data)76 static int fun_port_read_cmds(struct funeth_priv *fp, unsigned int n,
77 const int *keys, u64 *data)
78 {
79 const struct fun_admin_read48_rsp *r48rsp;
80 unsigned int cmd_size, i;
81 int rc;
82 union {
83 struct fun_admin_port_req req;
84 struct fun_admin_port_rsp rsp;
85 u8 v[ADMIN_SQE_SIZE];
86 } cmd;
87
88 cmd_size = offsetof(struct fun_admin_port_req, u.read.read48) +
89 n * sizeof(struct fun_admin_read48_req);
90 if (cmd_size > sizeof(cmd) || cmd_size > ADMIN_RSP_MAX_LEN)
91 return -EINVAL;
92
93 cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_PORT,
94 cmd_size);
95 cmd.req.u.read =
96 FUN_ADMIN_PORT_READ_REQ_INIT(FUN_ADMIN_SUBOP_READ, 0,
97 fp->netdev->dev_port);
98 for (i = 0; i < n; i++)
99 cmd.req.u.read.read48[i] = FUN_ADMIN_READ48_REQ_INIT(keys[i]);
100
101 rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common,
102 &cmd.rsp, cmd_size, 0);
103 if (rc)
104 return rc;
105
106 for (r48rsp = cmd.rsp.u.read.read48, i = 0; i < n; i++, r48rsp++) {
107 data[i] = FUN_ADMIN_READ48_RSP_DATA_G(r48rsp->key_to_data);
108 dev_dbg(fp->fdev->dev,
109 "port_read_rsp lport=%u (key_to_data=0x%llx) key=%d data:%lld retval:%lld",
110 fp->lport, r48rsp->key_to_data, keys[i], data[i],
111 FUN_ADMIN_READ48_RSP_RET_G(r48rsp->key_to_data));
112 }
113 return 0;
114 }
115
fun_port_read_cmd(struct funeth_priv * fp,int key,u64 * data)116 int fun_port_read_cmd(struct funeth_priv *fp, int key, u64 *data)
117 {
118 return fun_port_read_cmds(fp, 1, &key, data);
119 }
120
fun_report_link(struct net_device * netdev)121 static void fun_report_link(struct net_device *netdev)
122 {
123 if (netif_carrier_ok(netdev)) {
124 const struct funeth_priv *fp = netdev_priv(netdev);
125 const char *fec = "", *pause = "";
126 int speed = fp->link_speed;
127 char unit = 'M';
128
129 if (fp->link_speed >= SPEED_1000) {
130 speed /= 1000;
131 unit = 'G';
132 }
133
134 if (fp->active_fec & FUN_PORT_FEC_RS)
135 fec = ", RS-FEC";
136 else if (fp->active_fec & FUN_PORT_FEC_FC)
137 fec = ", BASER-FEC";
138
139 if ((fp->active_fc & FUN_PORT_CAP_PAUSE_MASK) == FUN_PORT_CAP_PAUSE_MASK)
140 pause = ", Tx/Rx PAUSE";
141 else if (fp->active_fc & FUN_PORT_CAP_RX_PAUSE)
142 pause = ", Rx PAUSE";
143 else if (fp->active_fc & FUN_PORT_CAP_TX_PAUSE)
144 pause = ", Tx PAUSE";
145
146 netdev_info(netdev, "Link up at %d %cb/s full-duplex%s%s\n",
147 speed, unit, pause, fec);
148 } else {
149 netdev_info(netdev, "Link down\n");
150 }
151 }
152
fun_adi_write(struct fun_dev * fdev,enum fun_admin_adi_attr attr,unsigned int adi_id,const struct fun_adi_param * param)153 static int fun_adi_write(struct fun_dev *fdev, enum fun_admin_adi_attr attr,
154 unsigned int adi_id, const struct fun_adi_param *param)
155 {
156 struct fun_admin_adi_req req = {
157 .common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_ADI,
158 sizeof(req)),
159 .u.write.subop = FUN_ADMIN_SUBOP_WRITE,
160 .u.write.attribute = attr,
161 .u.write.id = cpu_to_be32(adi_id),
162 .u.write.param = *param
163 };
164
165 return fun_submit_admin_sync_cmd(fdev, &req.common, NULL, 0, 0);
166 }
167
168 /* Configure RSS for the given port. @op determines whether a new RSS context
169 * is to be created or whether an existing one should be reconfigured. The
170 * remaining parameters specify the hashing algorithm, key, and indirection
171 * table.
172 *
173 * This initiates packet delivery to the Rx queues set in the indirection
174 * table.
175 */
fun_config_rss(struct net_device * dev,int algo,const u8 * key,const u32 * qtable,u8 op)176 int fun_config_rss(struct net_device *dev, int algo, const u8 *key,
177 const u32 *qtable, u8 op)
178 {
179 struct funeth_priv *fp = netdev_priv(dev);
180 unsigned int table_len = fp->indir_table_nentries;
181 unsigned int len = FUN_ETH_RSS_MAX_KEY_SIZE + sizeof(u32) * table_len;
182 struct funeth_rxq **rxqs = rtnl_dereference(fp->rxqs);
183 union {
184 struct {
185 struct fun_admin_rss_req req;
186 struct fun_dataop_gl gl;
187 };
188 struct fun_admin_generic_create_rsp rsp;
189 } cmd;
190 __be32 *indir_tab;
191 u16 flags;
192 int rc;
193
194 if (op != FUN_ADMIN_SUBOP_CREATE && fp->rss_hw_id == FUN_HCI_ID_INVALID)
195 return -EINVAL;
196
197 flags = op == FUN_ADMIN_SUBOP_CREATE ?
198 FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR : 0;
199 cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_RSS,
200 sizeof(cmd));
201 cmd.req.u.create =
202 FUN_ADMIN_RSS_CREATE_REQ_INIT(op, flags, fp->rss_hw_id,
203 dev->dev_port, algo,
204 FUN_ETH_RSS_MAX_KEY_SIZE,
205 table_len, 0,
206 FUN_ETH_RSS_MAX_KEY_SIZE);
207 cmd.req.u.create.dataop = FUN_DATAOP_HDR_INIT(1, 0, 1, 0, len);
208 fun_dataop_gl_init(&cmd.gl, 0, 0, len, fp->rss_dma_addr);
209
210 /* write the key and indirection table into the RSS DMA area */
211 memcpy(fp->rss_cfg, key, FUN_ETH_RSS_MAX_KEY_SIZE);
212 indir_tab = fp->rss_cfg + FUN_ETH_RSS_MAX_KEY_SIZE;
213 for (rc = 0; rc < table_len; rc++)
214 *indir_tab++ = cpu_to_be32(rxqs[*qtable++]->hw_cqid);
215
216 rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common,
217 &cmd.rsp, sizeof(cmd.rsp), 0);
218 if (!rc && op == FUN_ADMIN_SUBOP_CREATE)
219 fp->rss_hw_id = be32_to_cpu(cmd.rsp.id);
220 return rc;
221 }
222
223 /* Destroy the HW RSS conntext associated with the given port. This also stops
224 * all packet delivery to our Rx queues.
225 */
fun_destroy_rss(struct funeth_priv * fp)226 static void fun_destroy_rss(struct funeth_priv *fp)
227 {
228 if (fp->rss_hw_id != FUN_HCI_ID_INVALID) {
229 fun_res_destroy(fp->fdev, FUN_ADMIN_OP_RSS, 0, fp->rss_hw_id);
230 fp->rss_hw_id = FUN_HCI_ID_INVALID;
231 }
232 }
233
fun_irq_aff_notify(struct irq_affinity_notify * notify,const cpumask_t * mask)234 static void fun_irq_aff_notify(struct irq_affinity_notify *notify,
235 const cpumask_t *mask)
236 {
237 struct fun_irq *p = container_of(notify, struct fun_irq, aff_notify);
238
239 cpumask_copy(&p->affinity_mask, mask);
240 }
241
fun_irq_aff_release(struct kref __always_unused * ref)242 static void fun_irq_aff_release(struct kref __always_unused *ref)
243 {
244 }
245
246 /* Allocate an IRQ structure, assign an MSI-X index and initial affinity to it,
247 * and add it to the IRQ XArray.
248 */
fun_alloc_qirq(struct funeth_priv * fp,unsigned int idx,int node,unsigned int xa_idx_offset)249 static struct fun_irq *fun_alloc_qirq(struct funeth_priv *fp, unsigned int idx,
250 int node, unsigned int xa_idx_offset)
251 {
252 struct fun_irq *irq;
253 int cpu, res;
254
255 cpu = cpumask_local_spread(idx, node);
256 node = cpu_to_mem(cpu);
257
258 irq = kzalloc_node(sizeof(*irq), GFP_KERNEL, node);
259 if (!irq)
260 return ERR_PTR(-ENOMEM);
261
262 res = fun_reserve_irqs(fp->fdev, 1, &irq->irq_idx);
263 if (res != 1)
264 goto free_irq;
265
266 res = xa_insert(&fp->irqs, idx + xa_idx_offset, irq, GFP_KERNEL);
267 if (res)
268 goto release_irq;
269
270 irq->irq = pci_irq_vector(fp->pdev, irq->irq_idx);
271 cpumask_set_cpu(cpu, &irq->affinity_mask);
272 irq->aff_notify.notify = fun_irq_aff_notify;
273 irq->aff_notify.release = fun_irq_aff_release;
274 irq->state = FUN_IRQ_INIT;
275 return irq;
276
277 release_irq:
278 fun_release_irqs(fp->fdev, 1, &irq->irq_idx);
279 free_irq:
280 kfree(irq);
281 return ERR_PTR(res);
282 }
283
fun_free_qirq(struct funeth_priv * fp,struct fun_irq * irq)284 static void fun_free_qirq(struct funeth_priv *fp, struct fun_irq *irq)
285 {
286 netif_napi_del(&irq->napi);
287 fun_release_irqs(fp->fdev, 1, &irq->irq_idx);
288 kfree(irq);
289 }
290
291 /* Release the IRQs reserved for Tx/Rx queues that aren't being used. */
fun_prune_queue_irqs(struct net_device * dev)292 static void fun_prune_queue_irqs(struct net_device *dev)
293 {
294 struct funeth_priv *fp = netdev_priv(dev);
295 unsigned int nreleased = 0;
296 struct fun_irq *irq;
297 unsigned long idx;
298
299 xa_for_each(&fp->irqs, idx, irq) {
300 if (irq->txq || irq->rxq) /* skip those in use */
301 continue;
302
303 xa_erase(&fp->irqs, idx);
304 fun_free_qirq(fp, irq);
305 nreleased++;
306 if (idx < fp->rx_irq_ofst)
307 fp->num_tx_irqs--;
308 else
309 fp->num_rx_irqs--;
310 }
311 netif_info(fp, intr, dev, "Released %u queue IRQs\n", nreleased);
312 }
313
314 /* Reserve IRQs, one per queue, to acommodate the requested queue numbers @ntx
315 * and @nrx. IRQs are added incrementally to those we already have.
316 * We hold on to allocated IRQs until garbage collection of unused IRQs is
317 * separately requested.
318 */
fun_alloc_queue_irqs(struct net_device * dev,unsigned int ntx,unsigned int nrx)319 static int fun_alloc_queue_irqs(struct net_device *dev, unsigned int ntx,
320 unsigned int nrx)
321 {
322 struct funeth_priv *fp = netdev_priv(dev);
323 int node = dev_to_node(&fp->pdev->dev);
324 struct fun_irq *irq;
325 unsigned int i;
326
327 for (i = fp->num_tx_irqs; i < ntx; i++) {
328 irq = fun_alloc_qirq(fp, i, node, 0);
329 if (IS_ERR(irq))
330 return PTR_ERR(irq);
331
332 fp->num_tx_irqs++;
333 netif_napi_add_tx(dev, &irq->napi, fun_txq_napi_poll);
334 }
335
336 for (i = fp->num_rx_irqs; i < nrx; i++) {
337 irq = fun_alloc_qirq(fp, i, node, fp->rx_irq_ofst);
338 if (IS_ERR(irq))
339 return PTR_ERR(irq);
340
341 fp->num_rx_irqs++;
342 netif_napi_add(dev, &irq->napi, fun_rxq_napi_poll);
343 }
344
345 netif_info(fp, intr, dev, "Reserved %u/%u IRQs for Tx/Rx queues\n",
346 ntx, nrx);
347 return 0;
348 }
349
free_txqs(struct funeth_txq ** txqs,unsigned int nqs,unsigned int start,int state)350 static void free_txqs(struct funeth_txq **txqs, unsigned int nqs,
351 unsigned int start, int state)
352 {
353 unsigned int i;
354
355 for (i = start; i < nqs && txqs[i]; i++)
356 txqs[i] = funeth_txq_free(txqs[i], state);
357 }
358
alloc_txqs(struct net_device * dev,struct funeth_txq ** txqs,unsigned int nqs,unsigned int depth,unsigned int start,int state)359 static int alloc_txqs(struct net_device *dev, struct funeth_txq **txqs,
360 unsigned int nqs, unsigned int depth, unsigned int start,
361 int state)
362 {
363 struct funeth_priv *fp = netdev_priv(dev);
364 unsigned int i;
365 int err;
366
367 for (i = start; i < nqs; i++) {
368 err = funeth_txq_create(dev, i, depth, xa_load(&fp->irqs, i),
369 state, &txqs[i]);
370 if (err) {
371 free_txqs(txqs, nqs, start, FUN_QSTATE_DESTROYED);
372 return err;
373 }
374 }
375 return 0;
376 }
377
free_rxqs(struct funeth_rxq ** rxqs,unsigned int nqs,unsigned int start,int state)378 static void free_rxqs(struct funeth_rxq **rxqs, unsigned int nqs,
379 unsigned int start, int state)
380 {
381 unsigned int i;
382
383 for (i = start; i < nqs && rxqs[i]; i++)
384 rxqs[i] = funeth_rxq_free(rxqs[i], state);
385 }
386
alloc_rxqs(struct net_device * dev,struct funeth_rxq ** rxqs,unsigned int nqs,unsigned int ncqe,unsigned int nrqe,unsigned int start,int state)387 static int alloc_rxqs(struct net_device *dev, struct funeth_rxq **rxqs,
388 unsigned int nqs, unsigned int ncqe, unsigned int nrqe,
389 unsigned int start, int state)
390 {
391 struct funeth_priv *fp = netdev_priv(dev);
392 unsigned int i;
393 int err;
394
395 for (i = start; i < nqs; i++) {
396 err = funeth_rxq_create(dev, i, ncqe, nrqe,
397 xa_load(&fp->irqs, i + fp->rx_irq_ofst),
398 state, &rxqs[i]);
399 if (err) {
400 free_rxqs(rxqs, nqs, start, FUN_QSTATE_DESTROYED);
401 return err;
402 }
403 }
404 return 0;
405 }
406
free_xdpqs(struct funeth_txq ** xdpqs,unsigned int nqs,unsigned int start,int state)407 static void free_xdpqs(struct funeth_txq **xdpqs, unsigned int nqs,
408 unsigned int start, int state)
409 {
410 unsigned int i;
411
412 for (i = start; i < nqs && xdpqs[i]; i++)
413 xdpqs[i] = funeth_txq_free(xdpqs[i], state);
414
415 if (state == FUN_QSTATE_DESTROYED)
416 kfree(xdpqs);
417 }
418
alloc_xdpqs(struct net_device * dev,unsigned int nqs,unsigned int depth,unsigned int start,int state)419 static struct funeth_txq **alloc_xdpqs(struct net_device *dev, unsigned int nqs,
420 unsigned int depth, unsigned int start,
421 int state)
422 {
423 struct funeth_txq **xdpqs;
424 unsigned int i;
425 int err;
426
427 xdpqs = kzalloc_objs(*xdpqs, nqs);
428 if (!xdpqs)
429 return ERR_PTR(-ENOMEM);
430
431 for (i = start; i < nqs; i++) {
432 err = funeth_txq_create(dev, i, depth, NULL, state, &xdpqs[i]);
433 if (err) {
434 free_xdpqs(xdpqs, nqs, start, FUN_QSTATE_DESTROYED);
435 return ERR_PTR(err);
436 }
437 }
438 return xdpqs;
439 }
440
fun_free_rings(struct net_device * netdev,struct fun_qset * qset)441 static void fun_free_rings(struct net_device *netdev, struct fun_qset *qset)
442 {
443 struct funeth_priv *fp = netdev_priv(netdev);
444 struct funeth_txq **xdpqs = qset->xdpqs;
445 struct funeth_rxq **rxqs = qset->rxqs;
446
447 /* qset may not specify any queues to operate on. In that case the
448 * currently installed queues are implied.
449 */
450 if (!rxqs) {
451 rxqs = rtnl_dereference(fp->rxqs);
452 xdpqs = rtnl_dereference(fp->xdpqs);
453 qset->txqs = fp->txqs;
454 qset->nrxqs = netdev->real_num_rx_queues;
455 qset->ntxqs = netdev->real_num_tx_queues;
456 qset->nxdpqs = fp->num_xdpqs;
457 }
458 if (!rxqs)
459 return;
460
461 if (rxqs == rtnl_dereference(fp->rxqs)) {
462 rcu_assign_pointer(fp->rxqs, NULL);
463 rcu_assign_pointer(fp->xdpqs, NULL);
464 synchronize_net();
465 fp->txqs = NULL;
466 }
467
468 free_rxqs(rxqs, qset->nrxqs, qset->rxq_start, qset->state);
469 free_txqs(qset->txqs, qset->ntxqs, qset->txq_start, qset->state);
470 free_xdpqs(xdpqs, qset->nxdpqs, qset->xdpq_start, qset->state);
471 if (qset->state == FUN_QSTATE_DESTROYED)
472 kfree(rxqs);
473
474 /* Tell the caller which queues were operated on. */
475 qset->rxqs = rxqs;
476 qset->xdpqs = xdpqs;
477 }
478
fun_alloc_rings(struct net_device * netdev,struct fun_qset * qset)479 static int fun_alloc_rings(struct net_device *netdev, struct fun_qset *qset)
480 {
481 struct funeth_txq **xdpqs = NULL, **txqs;
482 struct funeth_rxq **rxqs;
483 int err;
484
485 err = fun_alloc_queue_irqs(netdev, qset->ntxqs, qset->nrxqs);
486 if (err)
487 return err;
488
489 rxqs = kzalloc_objs(*rxqs, qset->ntxqs + qset->nrxqs);
490 if (!rxqs)
491 return -ENOMEM;
492
493 if (qset->nxdpqs) {
494 xdpqs = alloc_xdpqs(netdev, qset->nxdpqs, qset->sq_depth,
495 qset->xdpq_start, qset->state);
496 if (IS_ERR(xdpqs)) {
497 err = PTR_ERR(xdpqs);
498 goto free_qvec;
499 }
500 }
501
502 txqs = (struct funeth_txq **)&rxqs[qset->nrxqs];
503 err = alloc_txqs(netdev, txqs, qset->ntxqs, qset->sq_depth,
504 qset->txq_start, qset->state);
505 if (err)
506 goto free_xdpqs;
507
508 err = alloc_rxqs(netdev, rxqs, qset->nrxqs, qset->cq_depth,
509 qset->rq_depth, qset->rxq_start, qset->state);
510 if (err)
511 goto free_txqs;
512
513 qset->rxqs = rxqs;
514 qset->txqs = txqs;
515 qset->xdpqs = xdpqs;
516 return 0;
517
518 free_txqs:
519 free_txqs(txqs, qset->ntxqs, qset->txq_start, FUN_QSTATE_DESTROYED);
520 free_xdpqs:
521 free_xdpqs(xdpqs, qset->nxdpqs, qset->xdpq_start, FUN_QSTATE_DESTROYED);
522 free_qvec:
523 kfree(rxqs);
524 return err;
525 }
526
527 /* Take queues to the next level. Presently this means creating them on the
528 * device.
529 */
fun_advance_ring_state(struct net_device * dev,struct fun_qset * qset)530 static int fun_advance_ring_state(struct net_device *dev, struct fun_qset *qset)
531 {
532 struct funeth_priv *fp = netdev_priv(dev);
533 int i, err;
534
535 for (i = 0; i < qset->nrxqs; i++) {
536 err = fun_rxq_create_dev(qset->rxqs[i],
537 xa_load(&fp->irqs,
538 i + fp->rx_irq_ofst));
539 if (err)
540 goto out;
541 }
542
543 for (i = 0; i < qset->ntxqs; i++) {
544 err = fun_txq_create_dev(qset->txqs[i], xa_load(&fp->irqs, i));
545 if (err)
546 goto out;
547 }
548
549 for (i = 0; i < qset->nxdpqs; i++) {
550 err = fun_txq_create_dev(qset->xdpqs[i], NULL);
551 if (err)
552 goto out;
553 }
554
555 return 0;
556
557 out:
558 fun_free_rings(dev, qset);
559 return err;
560 }
561
fun_port_create(struct net_device * netdev)562 static int fun_port_create(struct net_device *netdev)
563 {
564 struct funeth_priv *fp = netdev_priv(netdev);
565 union {
566 struct fun_admin_port_req req;
567 struct fun_admin_port_rsp rsp;
568 } cmd;
569 int rc;
570
571 if (fp->lport != INVALID_LPORT)
572 return 0;
573
574 cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_PORT,
575 sizeof(cmd.req));
576 cmd.req.u.create =
577 FUN_ADMIN_PORT_CREATE_REQ_INIT(FUN_ADMIN_SUBOP_CREATE, 0,
578 netdev->dev_port);
579
580 rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common, &cmd.rsp,
581 sizeof(cmd.rsp), 0);
582
583 if (!rc)
584 fp->lport = be16_to_cpu(cmd.rsp.u.create.lport);
585 return rc;
586 }
587
fun_port_destroy(struct net_device * netdev)588 static int fun_port_destroy(struct net_device *netdev)
589 {
590 struct funeth_priv *fp = netdev_priv(netdev);
591
592 if (fp->lport == INVALID_LPORT)
593 return 0;
594
595 fp->lport = INVALID_LPORT;
596 return fun_res_destroy(fp->fdev, FUN_ADMIN_OP_PORT, 0,
597 netdev->dev_port);
598 }
599
fun_eth_create(struct funeth_priv * fp)600 static int fun_eth_create(struct funeth_priv *fp)
601 {
602 union {
603 struct fun_admin_eth_req req;
604 struct fun_admin_generic_create_rsp rsp;
605 } cmd;
606 int rc;
607
608 cmd.req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_ETH,
609 sizeof(cmd.req));
610 cmd.req.u.create = FUN_ADMIN_ETH_CREATE_REQ_INIT(
611 FUN_ADMIN_SUBOP_CREATE,
612 FUN_ADMIN_RES_CREATE_FLAG_ALLOCATOR,
613 0, fp->netdev->dev_port);
614
615 rc = fun_submit_admin_sync_cmd(fp->fdev, &cmd.req.common, &cmd.rsp,
616 sizeof(cmd.rsp), 0);
617 return rc ? rc : be32_to_cpu(cmd.rsp.id);
618 }
619
fun_vi_create(struct funeth_priv * fp)620 static int fun_vi_create(struct funeth_priv *fp)
621 {
622 struct fun_admin_vi_req req = {
623 .common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_VI,
624 sizeof(req)),
625 .u.create = FUN_ADMIN_VI_CREATE_REQ_INIT(FUN_ADMIN_SUBOP_CREATE,
626 0,
627 fp->netdev->dev_port,
628 fp->netdev->dev_port)
629 };
630
631 return fun_submit_admin_sync_cmd(fp->fdev, &req.common, NULL, 0, 0);
632 }
633
634 /* Helper to create an ETH flow and bind an SQ to it.
635 * Returns the ETH id (>= 0) on success or a negative error.
636 */
fun_create_and_bind_tx(struct funeth_priv * fp,u32 sqid)637 int fun_create_and_bind_tx(struct funeth_priv *fp, u32 sqid)
638 {
639 int rc, ethid;
640
641 ethid = fun_eth_create(fp);
642 if (ethid >= 0) {
643 rc = fun_bind(fp->fdev, FUN_ADMIN_BIND_TYPE_EPSQ, sqid,
644 FUN_ADMIN_BIND_TYPE_ETH, ethid);
645 if (rc) {
646 fun_res_destroy(fp->fdev, FUN_ADMIN_OP_ETH, 0, ethid);
647 ethid = rc;
648 }
649 }
650 return ethid;
651 }
652
fun_queue_irq_handler(int irq,void * data)653 static irqreturn_t fun_queue_irq_handler(int irq, void *data)
654 {
655 struct fun_irq *p = data;
656
657 if (p->rxq) {
658 prefetch(p->rxq->next_cqe_info);
659 p->rxq->irq_cnt++;
660 }
661 napi_schedule_irqoff(&p->napi);
662 return IRQ_HANDLED;
663 }
664
fun_enable_irqs(struct net_device * dev)665 static int fun_enable_irqs(struct net_device *dev)
666 {
667 struct funeth_priv *fp = netdev_priv(dev);
668 unsigned long idx, last;
669 unsigned int qidx;
670 struct fun_irq *p;
671 const char *qtype;
672 int err;
673
674 xa_for_each(&fp->irqs, idx, p) {
675 if (p->txq) {
676 qtype = "tx";
677 qidx = p->txq->qidx;
678 } else if (p->rxq) {
679 qtype = "rx";
680 qidx = p->rxq->qidx;
681 } else {
682 continue;
683 }
684
685 if (p->state != FUN_IRQ_INIT)
686 continue;
687
688 snprintf(p->name, sizeof(p->name) - 1, "%s-%s-%u", dev->name,
689 qtype, qidx);
690 err = request_irq(p->irq, fun_queue_irq_handler, 0, p->name, p);
691 if (err) {
692 netdev_err(dev, "Failed to allocate IRQ %u, err %d\n",
693 p->irq, err);
694 goto unroll;
695 }
696 p->state = FUN_IRQ_REQUESTED;
697 }
698
699 xa_for_each(&fp->irqs, idx, p) {
700 if (p->state != FUN_IRQ_REQUESTED)
701 continue;
702 irq_set_affinity_notifier(p->irq, &p->aff_notify);
703 irq_set_affinity_and_hint(p->irq, &p->affinity_mask);
704 napi_enable(&p->napi);
705 p->state = FUN_IRQ_ENABLED;
706 }
707
708 return 0;
709
710 unroll:
711 last = idx - 1;
712 xa_for_each_range(&fp->irqs, idx, p, 0, last)
713 if (p->state == FUN_IRQ_REQUESTED) {
714 free_irq(p->irq, p);
715 p->state = FUN_IRQ_INIT;
716 }
717
718 return err;
719 }
720
fun_disable_one_irq(struct fun_irq * irq)721 static void fun_disable_one_irq(struct fun_irq *irq)
722 {
723 napi_disable(&irq->napi);
724 irq_set_affinity_notifier(irq->irq, NULL);
725 irq_update_affinity_hint(irq->irq, NULL);
726 free_irq(irq->irq, irq);
727 irq->state = FUN_IRQ_INIT;
728 }
729
fun_disable_irqs(struct net_device * dev)730 static void fun_disable_irqs(struct net_device *dev)
731 {
732 struct funeth_priv *fp = netdev_priv(dev);
733 struct fun_irq *p;
734 unsigned long idx;
735
736 xa_for_each(&fp->irqs, idx, p)
737 if (p->state == FUN_IRQ_ENABLED)
738 fun_disable_one_irq(p);
739 }
740
fun_down(struct net_device * dev,struct fun_qset * qset)741 static void fun_down(struct net_device *dev, struct fun_qset *qset)
742 {
743 struct funeth_priv *fp = netdev_priv(dev);
744
745 /* If we don't have queues the data path is already down.
746 * Note netif_running(dev) may be true.
747 */
748 if (!rcu_access_pointer(fp->rxqs))
749 return;
750
751 /* It is also down if the queues aren't on the device. */
752 if (fp->txqs[0]->init_state >= FUN_QSTATE_INIT_FULL) {
753 netif_info(fp, ifdown, dev,
754 "Tearing down data path on device\n");
755 fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_DISABLE, 0);
756
757 netif_carrier_off(dev);
758 netif_tx_disable(dev);
759
760 fun_destroy_rss(fp);
761 fun_res_destroy(fp->fdev, FUN_ADMIN_OP_VI, 0, dev->dev_port);
762 fun_disable_irqs(dev);
763 }
764
765 fun_free_rings(dev, qset);
766 }
767
fun_up(struct net_device * dev,struct fun_qset * qset)768 static int fun_up(struct net_device *dev, struct fun_qset *qset)
769 {
770 static const int port_keys[] = {
771 FUN_ADMIN_PORT_KEY_STATS_DMA_LOW,
772 FUN_ADMIN_PORT_KEY_STATS_DMA_HIGH,
773 FUN_ADMIN_PORT_KEY_ENABLE
774 };
775
776 struct funeth_priv *fp = netdev_priv(dev);
777 u64 vals[] = {
778 lower_32_bits(fp->stats_dma_addr),
779 upper_32_bits(fp->stats_dma_addr),
780 FUN_PORT_FLAG_ENABLE_NOTIFY
781 };
782 int err;
783
784 netif_info(fp, ifup, dev, "Setting up data path on device\n");
785
786 if (qset->rxqs[0]->init_state < FUN_QSTATE_INIT_FULL) {
787 err = fun_advance_ring_state(dev, qset);
788 if (err)
789 return err;
790 }
791
792 err = fun_vi_create(fp);
793 if (err)
794 goto free_queues;
795
796 fp->txqs = qset->txqs;
797 rcu_assign_pointer(fp->rxqs, qset->rxqs);
798 rcu_assign_pointer(fp->xdpqs, qset->xdpqs);
799
800 err = fun_enable_irqs(dev);
801 if (err)
802 goto destroy_vi;
803
804 if (fp->rss_cfg) {
805 err = fun_config_rss(dev, fp->hash_algo, fp->rss_key,
806 fp->indir_table, FUN_ADMIN_SUBOP_CREATE);
807 } else {
808 /* The non-RSS case has only 1 queue. */
809 err = fun_bind(fp->fdev, FUN_ADMIN_BIND_TYPE_VI, dev->dev_port,
810 FUN_ADMIN_BIND_TYPE_EPCQ,
811 qset->rxqs[0]->hw_cqid);
812 }
813 if (err)
814 goto disable_irqs;
815
816 err = fun_port_write_cmds(fp, 3, port_keys, vals);
817 if (err)
818 goto free_rss;
819
820 netif_tx_start_all_queues(dev);
821 return 0;
822
823 free_rss:
824 fun_destroy_rss(fp);
825 disable_irqs:
826 fun_disable_irqs(dev);
827 destroy_vi:
828 fun_res_destroy(fp->fdev, FUN_ADMIN_OP_VI, 0, dev->dev_port);
829 free_queues:
830 fun_free_rings(dev, qset);
831 return err;
832 }
833
funeth_open(struct net_device * netdev)834 static int funeth_open(struct net_device *netdev)
835 {
836 struct funeth_priv *fp = netdev_priv(netdev);
837 struct fun_qset qset = {
838 .nrxqs = netdev->real_num_rx_queues,
839 .ntxqs = netdev->real_num_tx_queues,
840 .nxdpqs = fp->num_xdpqs,
841 .cq_depth = fp->cq_depth,
842 .rq_depth = fp->rq_depth,
843 .sq_depth = fp->sq_depth,
844 .state = FUN_QSTATE_INIT_FULL,
845 };
846 int rc;
847
848 rc = fun_alloc_rings(netdev, &qset);
849 if (rc)
850 return rc;
851
852 rc = fun_up(netdev, &qset);
853 if (rc) {
854 qset.state = FUN_QSTATE_DESTROYED;
855 fun_free_rings(netdev, &qset);
856 }
857
858 return rc;
859 }
860
funeth_close(struct net_device * netdev)861 static int funeth_close(struct net_device *netdev)
862 {
863 struct fun_qset qset = { .state = FUN_QSTATE_DESTROYED };
864
865 fun_down(netdev, &qset);
866 return 0;
867 }
868
fun_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * stats)869 static void fun_get_stats64(struct net_device *netdev,
870 struct rtnl_link_stats64 *stats)
871 {
872 struct funeth_priv *fp = netdev_priv(netdev);
873 struct funeth_txq **xdpqs;
874 struct funeth_rxq **rxqs;
875 unsigned int i, start;
876
877 stats->tx_packets = fp->tx_packets;
878 stats->tx_bytes = fp->tx_bytes;
879 stats->tx_dropped = fp->tx_dropped;
880
881 stats->rx_packets = fp->rx_packets;
882 stats->rx_bytes = fp->rx_bytes;
883 stats->rx_dropped = fp->rx_dropped;
884
885 rcu_read_lock();
886 rxqs = rcu_dereference(fp->rxqs);
887 if (!rxqs)
888 goto unlock;
889
890 for (i = 0; i < netdev->real_num_tx_queues; i++) {
891 struct funeth_txq_stats txs;
892
893 FUN_QSTAT_READ(fp->txqs[i], start, txs);
894 stats->tx_packets += txs.tx_pkts;
895 stats->tx_bytes += txs.tx_bytes;
896 stats->tx_dropped += txs.tx_map_err;
897 }
898
899 for (i = 0; i < netdev->real_num_rx_queues; i++) {
900 struct funeth_rxq_stats rxs;
901
902 FUN_QSTAT_READ(rxqs[i], start, rxs);
903 stats->rx_packets += rxs.rx_pkts;
904 stats->rx_bytes += rxs.rx_bytes;
905 stats->rx_dropped += rxs.rx_map_err + rxs.rx_mem_drops;
906 }
907
908 xdpqs = rcu_dereference(fp->xdpqs);
909 if (!xdpqs)
910 goto unlock;
911
912 for (i = 0; i < fp->num_xdpqs; i++) {
913 struct funeth_txq_stats txs;
914
915 FUN_QSTAT_READ(xdpqs[i], start, txs);
916 stats->tx_packets += txs.tx_pkts;
917 stats->tx_bytes += txs.tx_bytes;
918 }
919 unlock:
920 rcu_read_unlock();
921 }
922
fun_change_mtu(struct net_device * netdev,int new_mtu)923 static int fun_change_mtu(struct net_device *netdev, int new_mtu)
924 {
925 struct funeth_priv *fp = netdev_priv(netdev);
926 int rc;
927
928 rc = fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_MTU, new_mtu);
929 if (!rc)
930 WRITE_ONCE(netdev->mtu, new_mtu);
931 return rc;
932 }
933
fun_set_macaddr(struct net_device * netdev,void * addr)934 static int fun_set_macaddr(struct net_device *netdev, void *addr)
935 {
936 struct funeth_priv *fp = netdev_priv(netdev);
937 struct sockaddr *saddr = addr;
938 int rc;
939
940 if (!is_valid_ether_addr(saddr->sa_data))
941 return -EADDRNOTAVAIL;
942
943 if (ether_addr_equal(netdev->dev_addr, saddr->sa_data))
944 return 0;
945
946 rc = fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_MACADDR,
947 ether_addr_to_u64(saddr->sa_data));
948 if (!rc)
949 eth_hw_addr_set(netdev, saddr->sa_data);
950 return rc;
951 }
952
fun_get_port_attributes(struct net_device * netdev)953 static int fun_get_port_attributes(struct net_device *netdev)
954 {
955 static const int keys[] = {
956 FUN_ADMIN_PORT_KEY_MACADDR, FUN_ADMIN_PORT_KEY_CAPABILITIES,
957 FUN_ADMIN_PORT_KEY_ADVERT, FUN_ADMIN_PORT_KEY_MTU
958 };
959 static const int phys_keys[] = {
960 FUN_ADMIN_PORT_KEY_LANE_ATTRS,
961 };
962
963 struct funeth_priv *fp = netdev_priv(netdev);
964 u64 data[ARRAY_SIZE(keys)];
965 u8 mac[ETH_ALEN];
966 int i, rc;
967
968 rc = fun_port_read_cmds(fp, ARRAY_SIZE(keys), keys, data);
969 if (rc)
970 return rc;
971
972 for (i = 0; i < ARRAY_SIZE(keys); i++) {
973 switch (keys[i]) {
974 case FUN_ADMIN_PORT_KEY_MACADDR:
975 u64_to_ether_addr(data[i], mac);
976 if (is_zero_ether_addr(mac)) {
977 eth_hw_addr_random(netdev);
978 } else if (is_valid_ether_addr(mac)) {
979 eth_hw_addr_set(netdev, mac);
980 } else {
981 netdev_err(netdev,
982 "device provided a bad MAC address %pM\n",
983 mac);
984 return -EINVAL;
985 }
986 break;
987
988 case FUN_ADMIN_PORT_KEY_CAPABILITIES:
989 fp->port_caps = data[i];
990 break;
991
992 case FUN_ADMIN_PORT_KEY_ADVERT:
993 fp->advertising = data[i];
994 break;
995
996 case FUN_ADMIN_PORT_KEY_MTU:
997 netdev->mtu = data[i];
998 break;
999 }
1000 }
1001
1002 if (!(fp->port_caps & FUN_PORT_CAP_VPORT)) {
1003 rc = fun_port_read_cmds(fp, ARRAY_SIZE(phys_keys), phys_keys,
1004 data);
1005 if (rc)
1006 return rc;
1007
1008 fp->lane_attrs = data[0];
1009 }
1010
1011 if (netdev->addr_assign_type == NET_ADDR_RANDOM)
1012 return fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_MACADDR,
1013 ether_addr_to_u64(netdev->dev_addr));
1014 return 0;
1015 }
1016
fun_hwtstamp_get(struct net_device * dev,struct kernel_hwtstamp_config * config)1017 static int fun_hwtstamp_get(struct net_device *dev,
1018 struct kernel_hwtstamp_config *config)
1019 {
1020 const struct funeth_priv *fp = netdev_priv(dev);
1021
1022 *config = fp->hwtstamp_cfg;
1023 return 0;
1024 }
1025
fun_hwtstamp_set(struct net_device * dev,struct kernel_hwtstamp_config * config,struct netlink_ext_ack * extack)1026 static int fun_hwtstamp_set(struct net_device *dev,
1027 struct kernel_hwtstamp_config *config,
1028 struct netlink_ext_ack *extack)
1029 {
1030 struct funeth_priv *fp = netdev_priv(dev);
1031
1032 /* no TX HW timestamps */
1033 config->tx_type = HWTSTAMP_TX_OFF;
1034
1035 switch (config->rx_filter) {
1036 case HWTSTAMP_FILTER_NONE:
1037 break;
1038 case HWTSTAMP_FILTER_ALL:
1039 case HWTSTAMP_FILTER_SOME:
1040 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1041 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1042 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1043 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1044 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1045 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1046 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1047 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1048 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1049 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1050 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1051 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1052 case HWTSTAMP_FILTER_NTP_ALL:
1053 config->rx_filter = HWTSTAMP_FILTER_ALL;
1054 break;
1055 default:
1056 return -ERANGE;
1057 }
1058
1059 fp->hwtstamp_cfg = *config;
1060 return 0;
1061 }
1062
1063 /* Prepare the queues for XDP. */
fun_enter_xdp(struct net_device * dev,struct bpf_prog * prog)1064 static int fun_enter_xdp(struct net_device *dev, struct bpf_prog *prog)
1065 {
1066 struct funeth_priv *fp = netdev_priv(dev);
1067 unsigned int i, nqs = num_online_cpus();
1068 struct funeth_txq **xdpqs;
1069 struct funeth_rxq **rxqs;
1070 int err;
1071
1072 xdpqs = alloc_xdpqs(dev, nqs, fp->sq_depth, 0, FUN_QSTATE_INIT_FULL);
1073 if (IS_ERR(xdpqs))
1074 return PTR_ERR(xdpqs);
1075
1076 rxqs = rtnl_dereference(fp->rxqs);
1077 for (i = 0; i < dev->real_num_rx_queues; i++) {
1078 err = fun_rxq_set_bpf(rxqs[i], prog);
1079 if (err)
1080 goto out;
1081 }
1082
1083 fp->num_xdpqs = nqs;
1084 rcu_assign_pointer(fp->xdpqs, xdpqs);
1085 return 0;
1086 out:
1087 while (i--)
1088 fun_rxq_set_bpf(rxqs[i], NULL);
1089
1090 free_xdpqs(xdpqs, nqs, 0, FUN_QSTATE_DESTROYED);
1091 return err;
1092 }
1093
1094 /* Set the queues for non-XDP operation. */
fun_end_xdp(struct net_device * dev)1095 static void fun_end_xdp(struct net_device *dev)
1096 {
1097 struct funeth_priv *fp = netdev_priv(dev);
1098 struct funeth_txq **xdpqs;
1099 struct funeth_rxq **rxqs;
1100 unsigned int i;
1101
1102 xdpqs = rtnl_dereference(fp->xdpqs);
1103 rcu_assign_pointer(fp->xdpqs, NULL);
1104 synchronize_net();
1105 /* at this point both Rx and Tx XDP processing has ended */
1106
1107 free_xdpqs(xdpqs, fp->num_xdpqs, 0, FUN_QSTATE_DESTROYED);
1108 fp->num_xdpqs = 0;
1109
1110 rxqs = rtnl_dereference(fp->rxqs);
1111 for (i = 0; i < dev->real_num_rx_queues; i++)
1112 fun_rxq_set_bpf(rxqs[i], NULL);
1113 }
1114
1115 #define XDP_MAX_MTU \
1116 (PAGE_SIZE - FUN_XDP_HEADROOM - VLAN_ETH_HLEN - FUN_RX_TAILROOM)
1117
fun_xdp_setup(struct net_device * dev,struct netdev_bpf * xdp)1118 static int fun_xdp_setup(struct net_device *dev, struct netdev_bpf *xdp)
1119 {
1120 struct bpf_prog *old_prog, *prog = xdp->prog;
1121 struct funeth_priv *fp = netdev_priv(dev);
1122 int i, err;
1123
1124 /* XDP uses at most one buffer */
1125 if (prog && dev->mtu > XDP_MAX_MTU) {
1126 netdev_err(dev, "device MTU %u too large for XDP\n", dev->mtu);
1127 NL_SET_ERR_MSG_MOD(xdp->extack,
1128 "Device MTU too large for XDP");
1129 return -EINVAL;
1130 }
1131
1132 if (!netif_running(dev)) {
1133 fp->num_xdpqs = prog ? num_online_cpus() : 0;
1134 } else if (prog && !fp->xdp_prog) {
1135 err = fun_enter_xdp(dev, prog);
1136 if (err) {
1137 NL_SET_ERR_MSG_MOD(xdp->extack,
1138 "Failed to set queues for XDP.");
1139 return err;
1140 }
1141 } else if (!prog && fp->xdp_prog) {
1142 fun_end_xdp(dev);
1143 } else {
1144 struct funeth_rxq **rxqs = rtnl_dereference(fp->rxqs);
1145
1146 for (i = 0; i < dev->real_num_rx_queues; i++)
1147 WRITE_ONCE(rxqs[i]->xdp_prog, prog);
1148 }
1149
1150 if (prog)
1151 xdp_features_set_redirect_target(dev, true);
1152 else
1153 xdp_features_clear_redirect_target(dev);
1154
1155 dev->max_mtu = prog ? XDP_MAX_MTU : FUN_MAX_MTU;
1156 old_prog = xchg(&fp->xdp_prog, prog);
1157 if (old_prog)
1158 bpf_prog_put(old_prog);
1159
1160 return 0;
1161 }
1162
fun_xdp(struct net_device * dev,struct netdev_bpf * xdp)1163 static int fun_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1164 {
1165 switch (xdp->command) {
1166 case XDP_SETUP_PROG:
1167 return fun_xdp_setup(dev, xdp);
1168 default:
1169 return -EINVAL;
1170 }
1171 }
1172
fun_init_vports(struct fun_ethdev * ed,unsigned int n)1173 static int fun_init_vports(struct fun_ethdev *ed, unsigned int n)
1174 {
1175 if (ed->num_vports)
1176 return -EINVAL;
1177
1178 ed->vport_info = kvzalloc_objs(*ed->vport_info, n);
1179 if (!ed->vport_info)
1180 return -ENOMEM;
1181 ed->num_vports = n;
1182 return 0;
1183 }
1184
fun_free_vports(struct fun_ethdev * ed)1185 static void fun_free_vports(struct fun_ethdev *ed)
1186 {
1187 kvfree(ed->vport_info);
1188 ed->vport_info = NULL;
1189 ed->num_vports = 0;
1190 }
1191
fun_get_vport(struct fun_ethdev * ed,unsigned int vport)1192 static struct fun_vport_info *fun_get_vport(struct fun_ethdev *ed,
1193 unsigned int vport)
1194 {
1195 if (!ed->vport_info || vport >= ed->num_vports)
1196 return NULL;
1197
1198 return ed->vport_info + vport;
1199 }
1200
fun_set_vf_mac(struct net_device * dev,int vf,u8 * mac)1201 static int fun_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
1202 {
1203 struct funeth_priv *fp = netdev_priv(dev);
1204 struct fun_adi_param mac_param = {};
1205 struct fun_dev *fdev = fp->fdev;
1206 struct fun_ethdev *ed = to_fun_ethdev(fdev);
1207 struct fun_vport_info *vi;
1208 int rc = -EINVAL;
1209
1210 if (is_multicast_ether_addr(mac))
1211 return -EINVAL;
1212
1213 mutex_lock(&ed->state_mutex);
1214 vi = fun_get_vport(ed, vf);
1215 if (!vi)
1216 goto unlock;
1217
1218 mac_param.u.mac = FUN_ADI_MAC_INIT(ether_addr_to_u64(mac));
1219 rc = fun_adi_write(fdev, FUN_ADMIN_ADI_ATTR_MACADDR, vf + 1,
1220 &mac_param);
1221 if (!rc)
1222 ether_addr_copy(vi->mac, mac);
1223 unlock:
1224 mutex_unlock(&ed->state_mutex);
1225 return rc;
1226 }
1227
fun_set_vf_vlan(struct net_device * dev,int vf,u16 vlan,u8 qos,__be16 vlan_proto)1228 static int fun_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
1229 __be16 vlan_proto)
1230 {
1231 struct funeth_priv *fp = netdev_priv(dev);
1232 struct fun_adi_param vlan_param = {};
1233 struct fun_dev *fdev = fp->fdev;
1234 struct fun_ethdev *ed = to_fun_ethdev(fdev);
1235 struct fun_vport_info *vi;
1236 int rc = -EINVAL;
1237
1238 if (vlan > 4095 || qos > 7)
1239 return -EINVAL;
1240 if (vlan_proto && vlan_proto != htons(ETH_P_8021Q) &&
1241 vlan_proto != htons(ETH_P_8021AD))
1242 return -EINVAL;
1243
1244 mutex_lock(&ed->state_mutex);
1245 vi = fun_get_vport(ed, vf);
1246 if (!vi)
1247 goto unlock;
1248
1249 vlan_param.u.vlan = FUN_ADI_VLAN_INIT(be16_to_cpu(vlan_proto),
1250 ((u16)qos << VLAN_PRIO_SHIFT) | vlan);
1251 rc = fun_adi_write(fdev, FUN_ADMIN_ADI_ATTR_VLAN, vf + 1, &vlan_param);
1252 if (!rc) {
1253 vi->vlan = vlan;
1254 vi->qos = qos;
1255 vi->vlan_proto = vlan_proto;
1256 }
1257 unlock:
1258 mutex_unlock(&ed->state_mutex);
1259 return rc;
1260 }
1261
fun_set_vf_rate(struct net_device * dev,int vf,int min_tx_rate,int max_tx_rate)1262 static int fun_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
1263 int max_tx_rate)
1264 {
1265 struct funeth_priv *fp = netdev_priv(dev);
1266 struct fun_adi_param rate_param = {};
1267 struct fun_dev *fdev = fp->fdev;
1268 struct fun_ethdev *ed = to_fun_ethdev(fdev);
1269 struct fun_vport_info *vi;
1270 int rc = -EINVAL;
1271
1272 if (min_tx_rate)
1273 return -EINVAL;
1274
1275 mutex_lock(&ed->state_mutex);
1276 vi = fun_get_vport(ed, vf);
1277 if (!vi)
1278 goto unlock;
1279
1280 rate_param.u.rate = FUN_ADI_RATE_INIT(max_tx_rate);
1281 rc = fun_adi_write(fdev, FUN_ADMIN_ADI_ATTR_RATE, vf + 1, &rate_param);
1282 if (!rc)
1283 vi->max_rate = max_tx_rate;
1284 unlock:
1285 mutex_unlock(&ed->state_mutex);
1286 return rc;
1287 }
1288
fun_get_vf_config(struct net_device * dev,int vf,struct ifla_vf_info * ivi)1289 static int fun_get_vf_config(struct net_device *dev, int vf,
1290 struct ifla_vf_info *ivi)
1291 {
1292 struct funeth_priv *fp = netdev_priv(dev);
1293 struct fun_ethdev *ed = to_fun_ethdev(fp->fdev);
1294 const struct fun_vport_info *vi;
1295
1296 mutex_lock(&ed->state_mutex);
1297 vi = fun_get_vport(ed, vf);
1298 if (!vi)
1299 goto unlock;
1300
1301 memset(ivi, 0, sizeof(*ivi));
1302 ivi->vf = vf;
1303 ether_addr_copy(ivi->mac, vi->mac);
1304 ivi->vlan = vi->vlan;
1305 ivi->qos = vi->qos;
1306 ivi->vlan_proto = vi->vlan_proto;
1307 ivi->max_tx_rate = vi->max_rate;
1308 ivi->spoofchk = vi->spoofchk;
1309 unlock:
1310 mutex_unlock(&ed->state_mutex);
1311 return vi ? 0 : -EINVAL;
1312 }
1313
fun_uninit(struct net_device * dev)1314 static void fun_uninit(struct net_device *dev)
1315 {
1316 struct funeth_priv *fp = netdev_priv(dev);
1317
1318 fun_prune_queue_irqs(dev);
1319 xa_destroy(&fp->irqs);
1320 }
1321
1322 static const struct net_device_ops fun_netdev_ops = {
1323 .ndo_open = funeth_open,
1324 .ndo_stop = funeth_close,
1325 .ndo_start_xmit = fun_start_xmit,
1326 .ndo_get_stats64 = fun_get_stats64,
1327 .ndo_change_mtu = fun_change_mtu,
1328 .ndo_set_mac_address = fun_set_macaddr,
1329 .ndo_validate_addr = eth_validate_addr,
1330 .ndo_uninit = fun_uninit,
1331 .ndo_bpf = fun_xdp,
1332 .ndo_xdp_xmit = fun_xdp_xmit_frames,
1333 .ndo_set_vf_mac = fun_set_vf_mac,
1334 .ndo_set_vf_vlan = fun_set_vf_vlan,
1335 .ndo_set_vf_rate = fun_set_vf_rate,
1336 .ndo_get_vf_config = fun_get_vf_config,
1337 .ndo_hwtstamp_get = fun_hwtstamp_get,
1338 .ndo_hwtstamp_set = fun_hwtstamp_set,
1339 };
1340
1341 #define GSO_ENCAP_FLAGS (NETIF_F_GSO_GRE | NETIF_F_GSO_IPXIP4 | \
1342 NETIF_F_GSO_IPXIP6 | NETIF_F_GSO_UDP_TUNNEL | \
1343 NETIF_F_GSO_UDP_TUNNEL_CSUM)
1344 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN | \
1345 NETIF_F_GSO_UDP_L4)
1346 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_HW_CSUM | TSO_FLAGS | \
1347 GSO_ENCAP_FLAGS | NETIF_F_HIGHDMA)
1348
fun_dflt_rss_indir(struct funeth_priv * fp,unsigned int nrx)1349 static void fun_dflt_rss_indir(struct funeth_priv *fp, unsigned int nrx)
1350 {
1351 unsigned int i;
1352
1353 for (i = 0; i < fp->indir_table_nentries; i++)
1354 fp->indir_table[i] = ethtool_rxfh_indir_default(i, nrx);
1355 }
1356
1357 /* Reset the RSS indirection table to equal distribution across the current
1358 * number of Rx queues. Called at init time and whenever the number of Rx
1359 * queues changes subsequently. Note that this may also resize the indirection
1360 * table.
1361 */
fun_reset_rss_indir(struct net_device * dev,unsigned int nrx)1362 static void fun_reset_rss_indir(struct net_device *dev, unsigned int nrx)
1363 {
1364 struct funeth_priv *fp = netdev_priv(dev);
1365
1366 if (!fp->rss_cfg)
1367 return;
1368
1369 /* Set the table size to the max possible that allows an equal number
1370 * of occurrences of each CQ.
1371 */
1372 fp->indir_table_nentries = rounddown(FUN_ETH_RSS_MAX_INDIR_ENT, nrx);
1373 fun_dflt_rss_indir(fp, nrx);
1374 }
1375
1376 /* Update the RSS LUT to contain only queues in [0, nrx). Normally this will
1377 * update the LUT to an equal distribution among nrx queues, If @only_if_needed
1378 * is set the LUT is left unchanged if it already does not reference any queues
1379 * >= nrx.
1380 */
fun_rss_set_qnum(struct net_device * dev,unsigned int nrx,bool only_if_needed)1381 static int fun_rss_set_qnum(struct net_device *dev, unsigned int nrx,
1382 bool only_if_needed)
1383 {
1384 struct funeth_priv *fp = netdev_priv(dev);
1385 u32 old_lut[FUN_ETH_RSS_MAX_INDIR_ENT];
1386 unsigned int i, oldsz;
1387 int err;
1388
1389 if (!fp->rss_cfg)
1390 return 0;
1391
1392 if (only_if_needed) {
1393 for (i = 0; i < fp->indir_table_nentries; i++)
1394 if (fp->indir_table[i] >= nrx)
1395 break;
1396
1397 if (i >= fp->indir_table_nentries)
1398 return 0;
1399 }
1400
1401 memcpy(old_lut, fp->indir_table, sizeof(old_lut));
1402 oldsz = fp->indir_table_nentries;
1403 fun_reset_rss_indir(dev, nrx);
1404
1405 err = fun_config_rss(dev, fp->hash_algo, fp->rss_key,
1406 fp->indir_table, FUN_ADMIN_SUBOP_MODIFY);
1407 if (!err)
1408 return 0;
1409
1410 memcpy(fp->indir_table, old_lut, sizeof(old_lut));
1411 fp->indir_table_nentries = oldsz;
1412 return err;
1413 }
1414
1415 /* Allocate the DMA area for the RSS configuration commands to the device, and
1416 * initialize the hash, hash key, indirection table size and its entries to
1417 * their defaults. The indirection table defaults to equal distribution across
1418 * the Rx queues.
1419 */
fun_init_rss(struct net_device * dev)1420 static int fun_init_rss(struct net_device *dev)
1421 {
1422 struct funeth_priv *fp = netdev_priv(dev);
1423 size_t size = sizeof(fp->rss_key) + sizeof(fp->indir_table);
1424
1425 fp->rss_hw_id = FUN_HCI_ID_INVALID;
1426 if (!(fp->port_caps & FUN_PORT_CAP_OFFLOADS))
1427 return 0;
1428
1429 fp->rss_cfg = dma_alloc_coherent(&fp->pdev->dev, size,
1430 &fp->rss_dma_addr, GFP_KERNEL);
1431 if (!fp->rss_cfg)
1432 return -ENOMEM;
1433
1434 fp->hash_algo = FUN_ETH_RSS_ALG_TOEPLITZ;
1435 netdev_rss_key_fill(fp->rss_key, sizeof(fp->rss_key));
1436 fun_reset_rss_indir(dev, dev->real_num_rx_queues);
1437 return 0;
1438 }
1439
fun_free_rss(struct funeth_priv * fp)1440 static void fun_free_rss(struct funeth_priv *fp)
1441 {
1442 if (fp->rss_cfg) {
1443 dma_free_coherent(&fp->pdev->dev,
1444 sizeof(fp->rss_key) + sizeof(fp->indir_table),
1445 fp->rss_cfg, fp->rss_dma_addr);
1446 fp->rss_cfg = NULL;
1447 }
1448 }
1449
fun_set_ring_count(struct net_device * netdev,unsigned int ntx,unsigned int nrx)1450 void fun_set_ring_count(struct net_device *netdev, unsigned int ntx,
1451 unsigned int nrx)
1452 {
1453 netif_set_real_num_tx_queues(netdev, ntx);
1454 if (nrx != netdev->real_num_rx_queues) {
1455 netif_set_real_num_rx_queues(netdev, nrx);
1456 fun_reset_rss_indir(netdev, nrx);
1457 }
1458 }
1459
fun_init_stats_area(struct funeth_priv * fp)1460 static int fun_init_stats_area(struct funeth_priv *fp)
1461 {
1462 unsigned int nstats;
1463
1464 if (!(fp->port_caps & FUN_PORT_CAP_STATS))
1465 return 0;
1466
1467 nstats = PORT_MAC_RX_STATS_MAX + PORT_MAC_TX_STATS_MAX +
1468 PORT_MAC_FEC_STATS_MAX;
1469
1470 fp->stats = dma_alloc_coherent(&fp->pdev->dev, nstats * sizeof(u64),
1471 &fp->stats_dma_addr, GFP_KERNEL);
1472 if (!fp->stats)
1473 return -ENOMEM;
1474 return 0;
1475 }
1476
fun_free_stats_area(struct funeth_priv * fp)1477 static void fun_free_stats_area(struct funeth_priv *fp)
1478 {
1479 unsigned int nstats;
1480
1481 if (fp->stats) {
1482 nstats = PORT_MAC_RX_STATS_MAX + PORT_MAC_TX_STATS_MAX;
1483 dma_free_coherent(&fp->pdev->dev, nstats * sizeof(u64),
1484 fp->stats, fp->stats_dma_addr);
1485 fp->stats = NULL;
1486 }
1487 }
1488
fun_dl_port_register(struct net_device * netdev)1489 static int fun_dl_port_register(struct net_device *netdev)
1490 {
1491 struct funeth_priv *fp = netdev_priv(netdev);
1492 struct devlink *dl = priv_to_devlink(fp->fdev);
1493 struct devlink_port_attrs attrs = {};
1494 unsigned int idx;
1495
1496 if (fp->port_caps & FUN_PORT_CAP_VPORT) {
1497 attrs.flavour = DEVLINK_PORT_FLAVOUR_VIRTUAL;
1498 idx = fp->lport;
1499 } else {
1500 idx = netdev->dev_port;
1501 attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
1502 attrs.lanes = fp->lane_attrs & 7;
1503 if (fp->lane_attrs & FUN_PORT_LANE_SPLIT) {
1504 attrs.split = 1;
1505 attrs.phys.port_number = fp->lport & ~3;
1506 attrs.phys.split_subport_number = fp->lport & 3;
1507 } else {
1508 attrs.phys.port_number = fp->lport;
1509 }
1510 }
1511
1512 devlink_port_attrs_set(&fp->dl_port, &attrs);
1513
1514 return devlink_port_register(dl, &fp->dl_port, idx);
1515 }
1516
1517 /* Determine the max Tx/Rx queues for a port. */
fun_max_qs(struct fun_ethdev * ed,unsigned int * ntx,unsigned int * nrx)1518 static int fun_max_qs(struct fun_ethdev *ed, unsigned int *ntx,
1519 unsigned int *nrx)
1520 {
1521 int neth;
1522
1523 if (ed->num_ports > 1 || is_kdump_kernel()) {
1524 *ntx = 1;
1525 *nrx = 1;
1526 return 0;
1527 }
1528
1529 neth = fun_get_res_count(&ed->fdev, FUN_ADMIN_OP_ETH);
1530 if (neth < 0)
1531 return neth;
1532
1533 /* We determine the max number of queues based on the CPU
1534 * cores, device interrupts and queues, RSS size, and device Tx flows.
1535 *
1536 * - At least 1 Rx and 1 Tx queues.
1537 * - At most 1 Rx/Tx queue per core.
1538 * - Each Rx/Tx queue needs 1 SQ.
1539 */
1540 *ntx = min(ed->nsqs_per_port - 1, num_online_cpus());
1541 *nrx = *ntx;
1542 if (*ntx > neth)
1543 *ntx = neth;
1544 if (*nrx > FUN_ETH_RSS_MAX_INDIR_ENT)
1545 *nrx = FUN_ETH_RSS_MAX_INDIR_ENT;
1546 return 0;
1547 }
1548
fun_queue_defaults(struct net_device * dev,unsigned int nsqs)1549 static void fun_queue_defaults(struct net_device *dev, unsigned int nsqs)
1550 {
1551 unsigned int ntx, nrx;
1552
1553 ntx = min(dev->num_tx_queues, FUN_DFLT_QUEUES);
1554 nrx = min(dev->num_rx_queues, FUN_DFLT_QUEUES);
1555 if (ntx <= nrx) {
1556 ntx = min(ntx, nsqs / 2);
1557 nrx = min(nrx, nsqs - ntx);
1558 } else {
1559 nrx = min(nrx, nsqs / 2);
1560 ntx = min(ntx, nsqs - nrx);
1561 }
1562
1563 netif_set_real_num_tx_queues(dev, ntx);
1564 netif_set_real_num_rx_queues(dev, nrx);
1565 }
1566
1567 /* Replace the existing Rx/Tx/XDP queues with equal number of queues with
1568 * different settings, e.g. depth. This is a disruptive replacement that
1569 * temporarily shuts down the data path and should be limited to changes that
1570 * can't be applied to live queues. The old queues are always discarded.
1571 */
fun_replace_queues(struct net_device * dev,struct fun_qset * newqs,struct netlink_ext_ack * extack)1572 int fun_replace_queues(struct net_device *dev, struct fun_qset *newqs,
1573 struct netlink_ext_ack *extack)
1574 {
1575 struct fun_qset oldqs = { .state = FUN_QSTATE_DESTROYED };
1576 struct funeth_priv *fp = netdev_priv(dev);
1577 int err;
1578
1579 newqs->nrxqs = dev->real_num_rx_queues;
1580 newqs->ntxqs = dev->real_num_tx_queues;
1581 newqs->nxdpqs = fp->num_xdpqs;
1582 newqs->state = FUN_QSTATE_INIT_SW;
1583 err = fun_alloc_rings(dev, newqs);
1584 if (err) {
1585 NL_SET_ERR_MSG_MOD(extack,
1586 "Unable to allocate memory for new queues, keeping current settings");
1587 return err;
1588 }
1589
1590 fun_down(dev, &oldqs);
1591
1592 err = fun_up(dev, newqs);
1593 if (!err)
1594 return 0;
1595
1596 /* The new queues couldn't be installed. We do not retry the old queues
1597 * as they are the same to the device as the new queues and would
1598 * similarly fail.
1599 */
1600 newqs->state = FUN_QSTATE_DESTROYED;
1601 fun_free_rings(dev, newqs);
1602 NL_SET_ERR_MSG_MOD(extack, "Unable to restore the data path with the new queues.");
1603 return err;
1604 }
1605
1606 /* Change the number of Rx/Tx queues of a device while it is up. This is done
1607 * by incrementally adding/removing queues to meet the new requirements while
1608 * handling ongoing traffic.
1609 */
fun_change_num_queues(struct net_device * dev,unsigned int ntx,unsigned int nrx)1610 int fun_change_num_queues(struct net_device *dev, unsigned int ntx,
1611 unsigned int nrx)
1612 {
1613 unsigned int keep_tx = min(dev->real_num_tx_queues, ntx);
1614 unsigned int keep_rx = min(dev->real_num_rx_queues, nrx);
1615 struct funeth_priv *fp = netdev_priv(dev);
1616 struct fun_qset oldqs = {
1617 .rxqs = rtnl_dereference(fp->rxqs),
1618 .txqs = fp->txqs,
1619 .nrxqs = dev->real_num_rx_queues,
1620 .ntxqs = dev->real_num_tx_queues,
1621 .rxq_start = keep_rx,
1622 .txq_start = keep_tx,
1623 .state = FUN_QSTATE_DESTROYED
1624 };
1625 struct fun_qset newqs = {
1626 .nrxqs = nrx,
1627 .ntxqs = ntx,
1628 .rxq_start = keep_rx,
1629 .txq_start = keep_tx,
1630 .cq_depth = fp->cq_depth,
1631 .rq_depth = fp->rq_depth,
1632 .sq_depth = fp->sq_depth,
1633 .state = FUN_QSTATE_INIT_FULL
1634 };
1635 int i, err;
1636
1637 err = fun_alloc_rings(dev, &newqs);
1638 if (err)
1639 goto free_irqs;
1640
1641 err = fun_enable_irqs(dev); /* of any newly added queues */
1642 if (err)
1643 goto free_rings;
1644
1645 /* copy the queues we are keeping to the new set */
1646 memcpy(newqs.rxqs, oldqs.rxqs, keep_rx * sizeof(*oldqs.rxqs));
1647 memcpy(newqs.txqs, fp->txqs, keep_tx * sizeof(*fp->txqs));
1648
1649 if (nrx < dev->real_num_rx_queues) {
1650 err = fun_rss_set_qnum(dev, nrx, true);
1651 if (err)
1652 goto disable_tx_irqs;
1653
1654 for (i = nrx; i < dev->real_num_rx_queues; i++)
1655 fun_disable_one_irq(container_of(oldqs.rxqs[i]->napi,
1656 struct fun_irq, napi));
1657
1658 netif_set_real_num_rx_queues(dev, nrx);
1659 }
1660
1661 if (ntx < dev->real_num_tx_queues)
1662 netif_set_real_num_tx_queues(dev, ntx);
1663
1664 rcu_assign_pointer(fp->rxqs, newqs.rxqs);
1665 fp->txqs = newqs.txqs;
1666 synchronize_net();
1667
1668 if (ntx > dev->real_num_tx_queues)
1669 netif_set_real_num_tx_queues(dev, ntx);
1670
1671 if (nrx > dev->real_num_rx_queues) {
1672 netif_set_real_num_rx_queues(dev, nrx);
1673 fun_rss_set_qnum(dev, nrx, false);
1674 }
1675
1676 /* disable interrupts of any excess Tx queues */
1677 for (i = keep_tx; i < oldqs.ntxqs; i++)
1678 fun_disable_one_irq(oldqs.txqs[i]->irq);
1679
1680 fun_free_rings(dev, &oldqs);
1681 fun_prune_queue_irqs(dev);
1682 return 0;
1683
1684 disable_tx_irqs:
1685 for (i = oldqs.ntxqs; i < ntx; i++)
1686 fun_disable_one_irq(newqs.txqs[i]->irq);
1687 free_rings:
1688 newqs.state = FUN_QSTATE_DESTROYED;
1689 fun_free_rings(dev, &newqs);
1690 free_irqs:
1691 fun_prune_queue_irqs(dev);
1692 return err;
1693 }
1694
fun_create_netdev(struct fun_ethdev * ed,unsigned int portid)1695 static int fun_create_netdev(struct fun_ethdev *ed, unsigned int portid)
1696 {
1697 struct fun_dev *fdev = &ed->fdev;
1698 struct net_device *netdev;
1699 struct funeth_priv *fp;
1700 unsigned int ntx, nrx;
1701 int rc;
1702
1703 rc = fun_max_qs(ed, &ntx, &nrx);
1704 if (rc)
1705 return rc;
1706
1707 netdev = alloc_etherdev_mqs(sizeof(*fp), ntx, nrx);
1708 if (!netdev) {
1709 rc = -ENOMEM;
1710 goto done;
1711 }
1712
1713 netdev->dev_port = portid;
1714 fun_queue_defaults(netdev, ed->nsqs_per_port);
1715
1716 fp = netdev_priv(netdev);
1717 fp->fdev = fdev;
1718 fp->pdev = to_pci_dev(fdev->dev);
1719 fp->netdev = netdev;
1720 xa_init(&fp->irqs);
1721 fp->rx_irq_ofst = ntx;
1722 seqcount_init(&fp->link_seq);
1723
1724 fp->lport = INVALID_LPORT;
1725 rc = fun_port_create(netdev);
1726 if (rc)
1727 goto free_netdev;
1728
1729 /* bind port to admin CQ for async events */
1730 rc = fun_bind(fdev, FUN_ADMIN_BIND_TYPE_PORT, portid,
1731 FUN_ADMIN_BIND_TYPE_EPCQ, 0);
1732 if (rc)
1733 goto destroy_port;
1734
1735 rc = fun_get_port_attributes(netdev);
1736 if (rc)
1737 goto destroy_port;
1738
1739 rc = fun_init_rss(netdev);
1740 if (rc)
1741 goto destroy_port;
1742
1743 rc = fun_init_stats_area(fp);
1744 if (rc)
1745 goto free_rss;
1746
1747 SET_NETDEV_DEV(netdev, fdev->dev);
1748 SET_NETDEV_DEVLINK_PORT(netdev, &fp->dl_port);
1749 netdev->netdev_ops = &fun_netdev_ops;
1750
1751 netdev->hw_features = NETIF_F_SG | NETIF_F_RXHASH | NETIF_F_RXCSUM;
1752 if (fp->port_caps & FUN_PORT_CAP_OFFLOADS)
1753 netdev->hw_features |= NETIF_F_HW_CSUM | TSO_FLAGS;
1754 if (fp->port_caps & FUN_PORT_CAP_ENCAP_OFFLOADS)
1755 netdev->hw_features |= GSO_ENCAP_FLAGS;
1756
1757 netdev->features |= netdev->hw_features | NETIF_F_HIGHDMA;
1758 netdev->vlan_features = netdev->features & VLAN_FEAT;
1759 netdev->mpls_features = netdev->vlan_features;
1760 netdev->hw_enc_features = netdev->hw_features;
1761 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
1762
1763 netdev->min_mtu = ETH_MIN_MTU;
1764 netdev->max_mtu = FUN_MAX_MTU;
1765
1766 fun_set_ethtool_ops(netdev);
1767
1768 /* configurable parameters */
1769 fp->sq_depth = min(SQ_DEPTH, fdev->q_depth);
1770 fp->cq_depth = min(CQ_DEPTH, fdev->q_depth);
1771 fp->rq_depth = min_t(unsigned int, RQ_DEPTH, fdev->q_depth);
1772 fp->rx_coal_usec = CQ_INTCOAL_USEC;
1773 fp->rx_coal_count = CQ_INTCOAL_NPKT;
1774 fp->tx_coal_usec = SQ_INTCOAL_USEC;
1775 fp->tx_coal_count = SQ_INTCOAL_NPKT;
1776 fp->cq_irq_db = FUN_IRQ_CQ_DB(fp->rx_coal_usec, fp->rx_coal_count);
1777
1778 rc = fun_dl_port_register(netdev);
1779 if (rc)
1780 goto free_stats;
1781
1782 fp->ktls_id = FUN_HCI_ID_INVALID;
1783 fun_ktls_init(netdev); /* optional, failure OK */
1784
1785 netif_carrier_off(netdev);
1786 ed->netdevs[portid] = netdev;
1787 rc = register_netdev(netdev);
1788 if (rc)
1789 goto unreg_devlink;
1790 return 0;
1791
1792 unreg_devlink:
1793 ed->netdevs[portid] = NULL;
1794 fun_ktls_cleanup(fp);
1795 devlink_port_unregister(&fp->dl_port);
1796 free_stats:
1797 fun_free_stats_area(fp);
1798 free_rss:
1799 fun_free_rss(fp);
1800 destroy_port:
1801 fun_port_destroy(netdev);
1802 free_netdev:
1803 free_netdev(netdev);
1804 done:
1805 dev_err(fdev->dev, "couldn't allocate port %u, error %d", portid, rc);
1806 return rc;
1807 }
1808
fun_destroy_netdev(struct net_device * netdev)1809 static void fun_destroy_netdev(struct net_device *netdev)
1810 {
1811 struct funeth_priv *fp;
1812
1813 fp = netdev_priv(netdev);
1814 unregister_netdev(netdev);
1815 devlink_port_unregister(&fp->dl_port);
1816 fun_ktls_cleanup(fp);
1817 fun_free_stats_area(fp);
1818 fun_free_rss(fp);
1819 fun_port_destroy(netdev);
1820 free_netdev(netdev);
1821 }
1822
fun_create_ports(struct fun_ethdev * ed,unsigned int nports)1823 static int fun_create_ports(struct fun_ethdev *ed, unsigned int nports)
1824 {
1825 struct fun_dev *fd = &ed->fdev;
1826 int i, rc;
1827
1828 /* The admin queue takes 1 IRQ and 2 SQs. */
1829 ed->nsqs_per_port = min(fd->num_irqs - 1,
1830 fd->kern_end_qid - 2) / nports;
1831 if (ed->nsqs_per_port < 2) {
1832 dev_err(fd->dev, "Too few SQs for %u ports", nports);
1833 return -EINVAL;
1834 }
1835
1836 ed->netdevs = kzalloc_objs(*ed->netdevs, nports);
1837 if (!ed->netdevs)
1838 return -ENOMEM;
1839
1840 ed->num_ports = nports;
1841 for (i = 0; i < nports; i++) {
1842 rc = fun_create_netdev(ed, i);
1843 if (rc)
1844 goto free_netdevs;
1845 }
1846
1847 return 0;
1848
1849 free_netdevs:
1850 while (i)
1851 fun_destroy_netdev(ed->netdevs[--i]);
1852 kfree(ed->netdevs);
1853 ed->netdevs = NULL;
1854 ed->num_ports = 0;
1855 return rc;
1856 }
1857
fun_destroy_ports(struct fun_ethdev * ed)1858 static void fun_destroy_ports(struct fun_ethdev *ed)
1859 {
1860 unsigned int i;
1861
1862 for (i = 0; i < ed->num_ports; i++)
1863 fun_destroy_netdev(ed->netdevs[i]);
1864
1865 kfree(ed->netdevs);
1866 ed->netdevs = NULL;
1867 ed->num_ports = 0;
1868 }
1869
fun_update_link_state(const struct fun_ethdev * ed,const struct fun_admin_port_notif * notif)1870 static void fun_update_link_state(const struct fun_ethdev *ed,
1871 const struct fun_admin_port_notif *notif)
1872 {
1873 unsigned int port_idx = be16_to_cpu(notif->id);
1874 struct net_device *netdev;
1875 struct funeth_priv *fp;
1876
1877 if (port_idx >= ed->num_ports)
1878 return;
1879
1880 netdev = ed->netdevs[port_idx];
1881 fp = netdev_priv(netdev);
1882
1883 write_seqcount_begin(&fp->link_seq);
1884 fp->link_speed = be32_to_cpu(notif->speed) * 10; /* 10 Mbps->Mbps */
1885 fp->active_fc = notif->flow_ctrl;
1886 fp->active_fec = notif->fec;
1887 fp->xcvr_type = notif->xcvr_type;
1888 fp->link_down_reason = notif->link_down_reason;
1889 fp->lp_advertising = be64_to_cpu(notif->lp_advertising);
1890
1891 if ((notif->link_state | notif->missed_events) & FUN_PORT_FLAG_MAC_DOWN)
1892 netif_carrier_off(netdev);
1893 if (notif->link_state & FUN_PORT_FLAG_MAC_UP)
1894 netif_carrier_on(netdev);
1895
1896 write_seqcount_end(&fp->link_seq);
1897 fun_report_link(netdev);
1898 }
1899
1900 /* handler for async events delivered through the admin CQ */
fun_event_cb(struct fun_dev * fdev,void * entry)1901 static void fun_event_cb(struct fun_dev *fdev, void *entry)
1902 {
1903 u8 op = ((struct fun_admin_rsp_common *)entry)->op;
1904
1905 if (op == FUN_ADMIN_OP_PORT) {
1906 const struct fun_admin_port_notif *rsp = entry;
1907
1908 if (rsp->subop == FUN_ADMIN_SUBOP_NOTIFY) {
1909 fun_update_link_state(to_fun_ethdev(fdev), rsp);
1910 } else if (rsp->subop == FUN_ADMIN_SUBOP_RES_COUNT) {
1911 const struct fun_admin_res_count_rsp *r = entry;
1912
1913 if (r->count.data)
1914 set_bit(FUN_SERV_RES_CHANGE, &fdev->service_flags);
1915 else
1916 set_bit(FUN_SERV_DEL_PORTS, &fdev->service_flags);
1917 fun_serv_sched(fdev);
1918 } else {
1919 dev_info(fdev->dev, "adminq event unexpected op %u subop %u",
1920 op, rsp->subop);
1921 }
1922 } else {
1923 dev_info(fdev->dev, "adminq event unexpected op %u", op);
1924 }
1925 }
1926
1927 /* handler for pending work managed by the service task */
fun_service_cb(struct fun_dev * fdev)1928 static void fun_service_cb(struct fun_dev *fdev)
1929 {
1930 struct fun_ethdev *ed = to_fun_ethdev(fdev);
1931 int rc;
1932
1933 if (test_and_clear_bit(FUN_SERV_DEL_PORTS, &fdev->service_flags))
1934 fun_destroy_ports(ed);
1935
1936 if (!test_and_clear_bit(FUN_SERV_RES_CHANGE, &fdev->service_flags))
1937 return;
1938
1939 rc = fun_get_res_count(fdev, FUN_ADMIN_OP_PORT);
1940 if (rc < 0 || rc == ed->num_ports)
1941 return;
1942
1943 if (ed->num_ports)
1944 fun_destroy_ports(ed);
1945 if (rc)
1946 fun_create_ports(ed, rc);
1947 }
1948
funeth_sriov_configure(struct pci_dev * pdev,int nvfs)1949 static int funeth_sriov_configure(struct pci_dev *pdev, int nvfs)
1950 {
1951 struct fun_dev *fdev = pci_get_drvdata(pdev);
1952 struct fun_ethdev *ed = to_fun_ethdev(fdev);
1953 int rc;
1954
1955 if (nvfs == 0) {
1956 if (pci_vfs_assigned(pdev)) {
1957 dev_warn(&pdev->dev,
1958 "Cannot disable SR-IOV while VFs are assigned\n");
1959 return -EPERM;
1960 }
1961
1962 mutex_lock(&ed->state_mutex);
1963 fun_free_vports(ed);
1964 mutex_unlock(&ed->state_mutex);
1965 pci_disable_sriov(pdev);
1966 return 0;
1967 }
1968
1969 rc = pci_enable_sriov(pdev, nvfs);
1970 if (rc)
1971 return rc;
1972
1973 mutex_lock(&ed->state_mutex);
1974 rc = fun_init_vports(ed, nvfs);
1975 mutex_unlock(&ed->state_mutex);
1976 if (rc) {
1977 pci_disable_sriov(pdev);
1978 return rc;
1979 }
1980
1981 return nvfs;
1982 }
1983
funeth_probe(struct pci_dev * pdev,const struct pci_device_id * id)1984 static int funeth_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1985 {
1986 struct fun_dev_params aqreq = {
1987 .cqe_size_log2 = ilog2(ADMIN_CQE_SIZE),
1988 .sqe_size_log2 = ilog2(ADMIN_SQE_SIZE),
1989 .cq_depth = ADMIN_CQ_DEPTH,
1990 .sq_depth = ADMIN_SQ_DEPTH,
1991 .rq_depth = ADMIN_RQ_DEPTH,
1992 .min_msix = 2, /* 1 Rx + 1 Tx */
1993 .event_cb = fun_event_cb,
1994 .serv_cb = fun_service_cb,
1995 };
1996 struct devlink *devlink;
1997 struct fun_ethdev *ed;
1998 struct fun_dev *fdev;
1999 int rc;
2000
2001 devlink = fun_devlink_alloc(&pdev->dev);
2002 if (!devlink) {
2003 dev_err(&pdev->dev, "devlink alloc failed\n");
2004 return -ENOMEM;
2005 }
2006
2007 ed = devlink_priv(devlink);
2008 mutex_init(&ed->state_mutex);
2009
2010 fdev = &ed->fdev;
2011 rc = fun_dev_enable(fdev, pdev, &aqreq, KBUILD_MODNAME);
2012 if (rc)
2013 goto free_devlink;
2014
2015 rc = fun_get_res_count(fdev, FUN_ADMIN_OP_PORT);
2016 if (rc > 0)
2017 rc = fun_create_ports(ed, rc);
2018 if (rc < 0)
2019 goto disable_dev;
2020
2021 fun_serv_restart(fdev);
2022 fun_devlink_register(devlink);
2023 return 0;
2024
2025 disable_dev:
2026 fun_dev_disable(fdev);
2027 free_devlink:
2028 mutex_destroy(&ed->state_mutex);
2029 fun_devlink_free(devlink);
2030 return rc;
2031 }
2032
funeth_remove(struct pci_dev * pdev)2033 static void funeth_remove(struct pci_dev *pdev)
2034 {
2035 struct fun_dev *fdev = pci_get_drvdata(pdev);
2036 struct devlink *devlink;
2037 struct fun_ethdev *ed;
2038
2039 ed = to_fun_ethdev(fdev);
2040 devlink = priv_to_devlink(ed);
2041 fun_devlink_unregister(devlink);
2042
2043 #ifdef CONFIG_PCI_IOV
2044 funeth_sriov_configure(pdev, 0);
2045 #endif
2046
2047 fun_serv_stop(fdev);
2048 fun_destroy_ports(ed);
2049 fun_dev_disable(fdev);
2050 mutex_destroy(&ed->state_mutex);
2051
2052 fun_devlink_free(devlink);
2053 }
2054
2055 static struct pci_driver funeth_driver = {
2056 .name = KBUILD_MODNAME,
2057 .id_table = funeth_id_table,
2058 .probe = funeth_probe,
2059 .remove = funeth_remove,
2060 .shutdown = funeth_remove,
2061 .sriov_configure = funeth_sriov_configure,
2062 };
2063
2064 module_pci_driver(funeth_driver);
2065
2066 MODULE_AUTHOR("Dimitris Michailidis <dmichail@fungible.com>");
2067 MODULE_DESCRIPTION("Fungible Ethernet Network Driver");
2068 MODULE_LICENSE("Dual BSD/GPL");
2069 MODULE_DEVICE_TABLE(pci, funeth_id_table);
2070