1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 */
5
6 #include "opt_rss.h"
7
8 #include <sys/param.h>
9 #include <sys/systm.h>
10 #include <sys/kernel.h>
11 #include <sys/endian.h>
12 #include <sys/sockio.h>
13 #include <sys/mbuf.h>
14 #include <sys/malloc.h>
15 #include <sys/module.h>
16 #include <sys/socket.h>
17 #include <sys/sysctl.h>
18 #include <sys/smp.h>
19 #include <vm/vm.h>
20 #include <vm/pmap.h>
21
22 #include <net/ethernet.h>
23 #include <net/if.h>
24 #include <net/if_var.h>
25 #include <net/if_arp.h>
26 #include <net/if_dl.h>
27 #include <net/if_types.h>
28 #include <net/if_media.h>
29 #include <net/if_vlan_var.h>
30 #include <net/iflib.h>
31 #ifdef RSS
32 #include <net/rss_config.h>
33 #endif
34
35 #include <netinet/in_systm.h>
36 #include <netinet/in.h>
37 #include <netinet/ip.h>
38 #include <netinet/ip6.h>
39 #include <netinet6/ip6_var.h>
40 #include <netinet/udp.h>
41 #include <netinet/tcp.h>
42
43 #include <machine/bus.h>
44 #include <machine/resource.h>
45 #include <sys/bus.h>
46 #include <sys/rman.h>
47
48 #include <dev/pci/pcireg.h>
49 #include <dev/pci/pcivar.h>
50
51 #include "ifdi_if.h"
52 #include "enic.h"
53
54 #include "opt_inet.h"
55 #include "opt_inet6.h"
56
57 static int enic_isc_txd_encap(void *, if_pkt_info_t);
58 static void enic_isc_txd_flush(void *, uint16_t, qidx_t);
59 static int enic_isc_txd_credits_update(void *, uint16_t, bool);
60 static int enic_isc_rxd_available(void *, uint16_t, qidx_t, qidx_t);
61 static int enic_isc_rxd_pkt_get(void *, if_rxd_info_t);
62 static void enic_isc_rxd_refill(void *, if_rxd_update_t);
63 static void enic_isc_rxd_flush(void *, uint16_t, uint8_t, qidx_t);
64 static int enic_legacy_intr(void *);
65 static void enic_initial_post_rx(struct enic *, struct vnic_rq *);
66 static int enic_wq_service(struct vnic_dev *, struct cq_desc *, u8, u16, u16,
67 void *);
68 static int enic_rq_service(struct vnic_dev *, struct cq_desc *, u8, u16, u16,
69 void *);
70
71 struct if_txrx enic_txrx = {
72 .ift_txd_encap = enic_isc_txd_encap,
73 .ift_txd_flush = enic_isc_txd_flush,
74 .ift_txd_credits_update = enic_isc_txd_credits_update,
75 .ift_rxd_available = enic_isc_rxd_available,
76 .ift_rxd_pkt_get = enic_isc_rxd_pkt_get,
77 .ift_rxd_refill = enic_isc_rxd_refill,
78 .ift_rxd_flush = enic_isc_rxd_flush,
79 .ift_legacy_intr = enic_legacy_intr
80 };
81
82 static int
enic_isc_txd_encap(void * vsc,if_pkt_info_t pi)83 enic_isc_txd_encap(void *vsc, if_pkt_info_t pi)
84 {
85 struct enic_softc *softc;
86 struct enic *enic;
87 struct vnic_wq *wq;
88 int nsegs;
89 int i;
90
91 struct wq_enet_desc *desc;
92 uint64_t bus_addr;
93 uint16_t mss = 7;
94 uint16_t header_len = 0;
95 uint8_t offload_mode = 0;
96 uint8_t eop = 0, cq;
97 uint8_t vlan_tag_insert = 0;
98 unsigned short vlan_id = 0;
99
100 unsigned int wq_desc_avail;
101 int head_idx;
102 unsigned int desc_count, data_len;
103
104 softc = vsc;
105 enic = &softc->enic;
106 if_softc_ctx_t scctx = softc->scctx;
107
108 wq = &enic->wq[pi->ipi_qsidx];
109 nsegs = pi->ipi_nsegs;
110
111 ENIC_LOCK(softc);
112 wq_desc_avail = vnic_wq_desc_avail(wq);
113 head_idx = wq->head_idx;
114 desc_count = wq->ring.desc_count;
115
116 if ((scctx->isc_capenable & IFCAP_RXCSUM) != 0)
117 offload_mode |= WQ_ENET_OFFLOAD_MODE_CSUM;
118
119 for (i = 0; i < nsegs; i++) {
120 eop = 0;
121 cq = 0;
122 wq->cq_pend++;
123 if (i + 1 == nsegs) {
124 eop = 1;
125 cq = 1;
126 wq->cq_pend = 0;
127 }
128 desc = wq->ring.descs;
129 bus_addr = pi->ipi_segs[i].ds_addr;
130 data_len = pi->ipi_segs[i].ds_len;
131
132 wq_enet_desc_enc(&desc[head_idx], bus_addr, data_len, mss,
133 header_len, offload_mode, eop, cq, 0,
134 vlan_tag_insert, vlan_id, 0);
135
136 head_idx = enic_ring_incr(desc_count, head_idx);
137 wq_desc_avail--;
138 }
139
140 wq->ring.desc_avail = wq_desc_avail;
141 wq->head_idx = head_idx;
142
143 pi->ipi_new_pidx = head_idx;
144 ENIC_UNLOCK(softc);
145
146 return (0);
147 }
148
149 static void
enic_isc_txd_flush(void * vsc,uint16_t txqid,qidx_t pidx)150 enic_isc_txd_flush(void *vsc, uint16_t txqid, qidx_t pidx)
151 {
152 struct enic_softc *softc;
153 struct enic *enic;
154 struct vnic_wq *wq;
155 int head_idx;
156
157 softc = vsc;
158 enic = &softc->enic;
159
160 ENIC_LOCK(softc);
161 wq = &enic->wq[txqid];
162 head_idx = wq->head_idx;
163
164 ENIC_BUS_WRITE_4(wq->ctrl, TX_POSTED_INDEX, head_idx);
165 ENIC_UNLOCK(softc);
166 }
167
168 static int
enic_isc_txd_credits_update(void * vsc,uint16_t txqid,bool clear)169 enic_isc_txd_credits_update(void *vsc, uint16_t txqid, bool clear)
170 {
171
172 struct enic_softc *softc;
173 struct enic *enic;
174 struct vnic_wq *wq;
175 struct vnic_cq *cq;
176 int processed;
177 unsigned int cq_wq;
178 unsigned int wq_work_to_do = 10;
179 unsigned int wq_work_avail;
180
181 softc = vsc;
182 enic = &softc->enic;
183 wq = &softc->enic.wq[txqid];
184
185 cq_wq = enic_cq_wq(enic, txqid);
186 cq = &enic->cq[cq_wq];
187
188 ENIC_LOCK(softc);
189 wq_work_avail = vnic_cq_work(cq, wq_work_to_do);
190 ENIC_UNLOCK(softc);
191
192 if (wq_work_avail == 0)
193 return (0);
194
195 if (!clear)
196 return (1);
197
198 ENIC_LOCK(softc);
199 vnic_cq_service(cq, wq_work_to_do,
200 enic_wq_service, NULL);
201
202 processed = wq->processed;
203 wq->processed = 0;
204
205 ENIC_UNLOCK(softc);
206
207 return (processed);
208 }
209
210 static int
enic_isc_rxd_available(void * vsc,uint16_t rxqid,qidx_t idx,qidx_t budget)211 enic_isc_rxd_available(void *vsc, uint16_t rxqid, qidx_t idx, qidx_t budget)
212 {
213 struct enic_softc *softc;
214 struct enic *enic;
215 struct vnic_cq *cq;
216 unsigned int rq_work_to_do = budget;
217 unsigned int rq_work_avail = 0;
218 unsigned int cq_rq;
219
220 softc = vsc;
221 enic = &softc->enic;
222
223 cq_rq = enic_cq_rq(&softc->enic, rxqid);
224 cq = &enic->cq[cq_rq];
225
226 rq_work_avail = vnic_cq_work(cq, rq_work_to_do);
227 return rq_work_avail;
228 }
229
230 static int
enic_isc_rxd_pkt_get(void * vsc,if_rxd_info_t ri)231 enic_isc_rxd_pkt_get(void *vsc, if_rxd_info_t ri)
232 {
233 struct enic_softc *softc;
234 struct enic *enic;
235 struct vnic_cq *cq;
236 unsigned int rq_work_to_do = 1;
237 unsigned int rq_work_done = 0;
238 unsigned int cq_rq;
239
240 softc = vsc;
241 enic = &softc->enic;
242
243 cq_rq = enic_cq_rq(&softc->enic, ri->iri_qsidx);
244 cq = &enic->cq[cq_rq];
245 ENIC_LOCK(softc);
246 rq_work_done = vnic_cq_service(cq, rq_work_to_do, enic_rq_service, ri);
247
248 if (rq_work_done != 0) {
249 vnic_intr_return_credits(&enic->intr[cq_rq], rq_work_done, 0,
250 1);
251 ENIC_UNLOCK(softc);
252 return (0);
253 } else {
254 ENIC_UNLOCK(softc);
255 return (-1);
256 }
257
258 }
259
260 static void
enic_isc_rxd_refill(void * vsc,if_rxd_update_t iru)261 enic_isc_rxd_refill(void *vsc, if_rxd_update_t iru)
262 {
263 struct enic_softc *softc;
264 struct vnic_rq *rq;
265 struct rq_enet_desc *rqd;
266
267 uint64_t *paddrs;
268 int count;
269 uint32_t pidx;
270 int len;
271 int idx;
272 int i;
273
274 count = iru->iru_count;
275 len = iru->iru_buf_size;
276 paddrs = iru->iru_paddrs;
277 pidx = iru->iru_pidx;
278
279 softc = vsc;
280 rq = &softc->enic.rq[iru->iru_qsidx];
281 rqd = rq->ring.descs;
282
283 idx = pidx;
284 for (i = 0; i < count; i++, idx++) {
285
286 if (idx == rq->ring.desc_count)
287 idx = 0;
288 rq_enet_desc_enc(&rqd[idx], paddrs[i],
289 RQ_ENET_TYPE_ONLY_SOP,
290 len);
291
292 }
293
294 rq->in_use = 1;
295
296 if (rq->need_initial_post) {
297 ENIC_BUS_WRITE_4(rq->ctrl, RX_FETCH_INDEX, 0);
298 }
299
300 enic_initial_post_rx(&softc->enic, rq);
301 }
302
303 static void
enic_isc_rxd_flush(void * vsc,uint16_t rxqid,uint8_t flid,qidx_t pidx)304 enic_isc_rxd_flush(void *vsc, uint16_t rxqid, uint8_t flid, qidx_t pidx)
305 {
306
307 struct enic_softc *softc;
308 struct vnic_rq *rq;
309
310 softc = vsc;
311 rq = &softc->enic.rq[rxqid];
312
313 /*
314 * pidx is the index of the last descriptor with a buffer the device
315 * can use, and the device needs to be told which index is one past
316 * that.
317 */
318
319 ENIC_LOCK(softc);
320 ENIC_BUS_WRITE_4(rq->ctrl, RX_POSTED_INDEX, pidx);
321 ENIC_UNLOCK(softc);
322 }
323
324 static int
enic_legacy_intr(void * xsc)325 enic_legacy_intr(void *xsc)
326 {
327 return (1);
328 }
329
330 static inline void
vnic_wq_service(struct vnic_wq * wq,struct cq_desc * cq_desc,u16 completed_index,void (* buf_service)(struct vnic_wq * wq,struct cq_desc * cq_desc,void * opaque),void * opaque)331 vnic_wq_service(struct vnic_wq *wq, struct cq_desc *cq_desc,
332 u16 completed_index, void (*buf_service) (struct vnic_wq *wq,
333 struct cq_desc *cq_desc, /* struct vnic_wq_buf * *buf, */ void *opaque),
334 void *opaque)
335 {
336 int processed;
337
338 processed = completed_index - wq->ring.last_count;
339 if (processed < 0)
340 processed += wq->ring.desc_count;
341 if (processed == 0)
342 processed++;
343
344 wq->ring.desc_avail += processed;
345 wq->processed += processed;
346 wq->ring.last_count = completed_index;
347 }
348
349 /*
350 * Post the Rx buffers for the first time. enic_alloc_rx_queue_mbufs() has
351 * allocated the buffers and filled the RQ descriptor ring. Just need to push
352 * the post index to the NIC.
353 */
354 static void
enic_initial_post_rx(struct enic * enic,struct vnic_rq * rq)355 enic_initial_post_rx(struct enic *enic, struct vnic_rq *rq)
356 {
357 struct enic_softc *softc = enic->softc;
358 if (!rq->in_use || !rq->need_initial_post)
359 return;
360
361 ENIC_LOCK(softc);
362 /* make sure all prior writes are complete before doing the PIO write */
363 /* Post all but the last buffer to VIC. */
364 rq->posted_index = rq->ring.desc_count - 1;
365
366 rq->rx_nb_hold = 0;
367
368 ENIC_BUS_WRITE_4(rq->ctrl, RX_POSTED_INDEX, rq->posted_index);
369
370 rq->need_initial_post = false;
371 ENIC_UNLOCK(softc);
372 }
373
374 static int
enic_wq_service(struct vnic_dev * vdev,struct cq_desc * cq_desc,u8 type,u16 q_number,u16 completed_index,void * opaque)375 enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, u8 type,
376 u16 q_number, u16 completed_index, void *opaque)
377 {
378 struct enic *enic = vnic_dev_priv(vdev);
379
380 vnic_wq_service(&enic->wq[q_number], cq_desc,
381 completed_index, NULL, opaque);
382 return (0);
383 }
384
385 static void
vnic_rq_service(struct vnic_rq * rq,struct cq_desc * cq_desc,u16 in_completed_index,int desc_return,void (* buf_service)(struct vnic_rq * rq,struct cq_desc * cq_desc,int skipped,void * opaque),void * opaque)386 vnic_rq_service(struct vnic_rq *rq, struct cq_desc *cq_desc,
387 u16 in_completed_index, int desc_return,
388 void(*buf_service)(struct vnic_rq *rq, struct cq_desc *cq_desc,
389 /* struct vnic_rq_buf * *buf, */ int skipped, void *opaque), void *opaque)
390 {
391 if_softc_ctx_t scctx;
392 if_rxd_info_t ri = (if_rxd_info_t) opaque;
393 u8 type, color, eop, sop, ingress_port, vlan_stripped;
394 u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
395 u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
396 u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
397 u8 packet_error;
398 u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
399 u32 rss_hash;
400 int cqidx;
401 if_rxd_frag_t frag;
402
403 scctx = rq->vdev->softc->scctx;
404
405 cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
406 &type, &color, &q_number, &completed_index,
407 &ingress_port, &fcoe, &eop, &sop, &rss_type,
408 &csum_not_calc, &rss_hash, &bytes_written,
409 &packet_error, &vlan_stripped, &vlan_tci, &checksum,
410 &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
411 &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
412 &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
413 &fcs_ok);
414
415 cqidx = ri->iri_cidx;
416
417 frag = &ri->iri_frags[0];
418 frag->irf_idx = cqidx;
419 frag->irf_len = bytes_written;
420
421 if (++cqidx == rq->ring.desc_count) {
422 cqidx = 0;
423 }
424
425 ri->iri_cidx = cqidx;
426 ri->iri_nfrags = 1;
427 ri->iri_len = bytes_written;
428
429 if ((scctx->isc_capenable & IFCAP_RXCSUM) != 0)
430 if (!csum_not_calc && (tcp_udp_csum_ok || ipv4_csum_ok)) {
431 ri->iri_csum_flags = (CSUM_IP_CHECKED | CSUM_IP_VALID);
432 }
433 }
434
435 static int
enic_rq_service(struct vnic_dev * vdev,struct cq_desc * cq_desc,u8 type,u16 q_number,u16 completed_index,void * opaque)436 enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
437 u8 type, u16 q_number, u16 completed_index, void *opaque)
438 {
439 struct enic *enic = vnic_dev_priv(vdev);
440 if_rxd_info_t ri = (if_rxd_info_t) opaque;
441
442 vnic_rq_service(&enic->rq[ri->iri_qsidx], cq_desc, completed_index,
443 VNIC_RQ_RETURN_DESC, NULL, /* enic_rq_indicate_buf, */ opaque);
444
445 return (0);
446 }
447
448 void
enic_prep_wq_for_simple_tx(struct enic * enic,uint16_t queue_idx)449 enic_prep_wq_for_simple_tx(struct enic *enic, uint16_t queue_idx)
450 {
451 struct wq_enet_desc *desc;
452 struct vnic_wq *wq;
453 unsigned int i;
454
455 /*
456 * Fill WQ descriptor fields that never change. Every descriptor is
457 * one packet, so set EOP. Also set CQ_ENTRY every ENIC_WQ_CQ_THRESH
458 * descriptors (i.e. request one completion update every 32 packets).
459 */
460 wq = &enic->wq[queue_idx];
461 desc = (struct wq_enet_desc *)wq->ring.descs;
462 for (i = 0; i < wq->ring.desc_count; i++, desc++) {
463 desc->header_length_flags = 1 << WQ_ENET_FLAGS_EOP_SHIFT;
464 if (i % ENIC_WQ_CQ_THRESH == ENIC_WQ_CQ_THRESH - 1)
465 desc->header_length_flags |=
466 (1 << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT);
467 }
468 }
469
470 void
enic_start_wq(struct enic * enic,uint16_t queue_idx)471 enic_start_wq(struct enic *enic, uint16_t queue_idx)
472 {
473 vnic_wq_enable(&enic->wq[queue_idx]);
474 }
475
476 int
enic_stop_wq(struct enic * enic,uint16_t queue_idx)477 enic_stop_wq(struct enic *enic, uint16_t queue_idx)
478 {
479 int ret;
480
481 ret = vnic_wq_disable(&enic->wq[queue_idx]);
482
483 return (ret);
484 }
485
486 void
enic_start_rq(struct enic * enic,uint16_t queue_idx)487 enic_start_rq(struct enic *enic, uint16_t queue_idx)
488 {
489 struct vnic_rq *rq;
490
491 rq = &enic->rq[queue_idx];
492 vnic_rq_enable(rq);
493 enic_initial_post_rx(enic, rq);
494 }
495
496 int
enic_stop_rq(struct enic * enic,uint16_t queue_idx)497 enic_stop_rq(struct enic *enic, uint16_t queue_idx)
498 {
499 int ret;
500
501 ret = vnic_rq_disable(&enic->rq[queue_idx]);
502
503 return (ret);
504 }
505
506
507 void
enic_dev_disable(struct enic * enic)508 enic_dev_disable(struct enic *enic) {
509 vnic_dev_disable(enic->vdev);
510 }
511