1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
4 */
5
6 #include "enic.h"
7 #include "vnic_dev.h"
8 #include "vnic_resource.h"
9 #include "vnic_devcmd.h"
10 #include "vnic_nic.h"
11 #include "vnic_stats.h"
12
13 #define VNIC_MAX_RES_HDR_SIZE \
14 (sizeof(struct vnic_resource_header) + \
15 sizeof(struct vnic_resource) * RES_TYPE_MAX)
16 #define VNIC_RES_STRIDE 128
17
18 #define VNIC_MAX_FLOW_COUNTERS 2048
19
vnic_dev_priv(struct vnic_dev * vdev)20 void *vnic_dev_priv(struct vnic_dev *vdev)
21 {
22 return vdev->priv;
23 }
24
vnic_register_cbacks(struct vnic_dev * vdev,void * (* alloc_consistent)(void * priv,size_t size,bus_addr_t * dma_handle,struct iflib_dma_info * res,u8 * name),void (* free_consistent)(void * priv,size_t size,void * vaddr,bus_addr_t dma_handle,struct iflib_dma_info * res))25 void vnic_register_cbacks(struct vnic_dev *vdev,
26 void *(*alloc_consistent)(void *priv, size_t size,
27 bus_addr_t *dma_handle, struct iflib_dma_info *res,u8 *name),
28 void (*free_consistent)(void *priv,
29 size_t size, void *vaddr,
30 bus_addr_t dma_handle,struct iflib_dma_info *res))
31 {
32 vdev->alloc_consistent = alloc_consistent;
33 vdev->free_consistent = free_consistent;
34 }
35
vnic_dev_discover_res(struct vnic_dev * vdev,struct vnic_dev_bar * bar,unsigned int num_bars)36 static int vnic_dev_discover_res(struct vnic_dev *vdev,
37 struct vnic_dev_bar *bar, unsigned int num_bars)
38 {
39 struct enic_softc *softc = vdev->softc;
40 struct vnic_resource_header __iomem *rh;
41 struct mgmt_barmap_hdr __iomem *mrh;
42 struct vnic_resource __iomem *r;
43 int r_offset;
44 u8 type;
45
46 if (num_bars == 0)
47 return (EINVAL);
48
49 rh = malloc(sizeof(*rh), M_DEVBUF, M_NOWAIT | M_ZERO);
50 mrh = malloc(sizeof(*mrh), M_DEVBUF, M_NOWAIT | M_ZERO);
51 if (!rh) {
52 pr_err("vNIC BAR0 res hdr not mem-mapped\n");
53 free(rh, M_DEVBUF);
54 free(mrh, M_DEVBUF);
55 return (EINVAL);
56 }
57
58 /* Check for mgmt vnic in addition to normal vnic */
59 ENIC_BUS_READ_REGION_4(softc, mem, 0, (void *)rh, sizeof(*rh) / 4);
60 ENIC_BUS_READ_REGION_4(softc, mem, 0, (void *)mrh, sizeof(*mrh) / 4);
61 if ((rh->magic != VNIC_RES_MAGIC) ||
62 (rh->version != VNIC_RES_VERSION)) {
63 if ((mrh->magic != MGMTVNIC_MAGIC) ||
64 mrh->version != MGMTVNIC_VERSION) {
65 pr_err("vNIC BAR0 res magic/version error " \
66 "exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n",
67 VNIC_RES_MAGIC, VNIC_RES_VERSION,
68 MGMTVNIC_MAGIC, MGMTVNIC_VERSION,
69 rh->magic, rh->version);
70 free(rh, M_DEVBUF);
71 free(mrh, M_DEVBUF);
72 return (EINVAL);
73 }
74 }
75
76 if (mrh->magic == MGMTVNIC_MAGIC)
77 r_offset = sizeof(*mrh);
78 else
79 r_offset = sizeof(*rh);
80
81 r = malloc(sizeof(*r), M_DEVBUF, M_NOWAIT | M_ZERO);
82 ENIC_BUS_READ_REGION_4(softc, mem, r_offset, (void *)r, sizeof(*r) / 4);
83 while ((type = r->type) != RES_TYPE_EOL) {
84 u8 bar_num = r->bar;
85 u32 bar_offset =r->bar_offset;
86 u32 count = r->count;
87
88 r_offset += sizeof(*r);
89
90 if (bar_num >= num_bars)
91 continue;
92
93 switch (type) {
94 case RES_TYPE_WQ:
95 case RES_TYPE_RQ:
96 case RES_TYPE_CQ:
97 case RES_TYPE_INTR_CTRL:
98 case RES_TYPE_INTR_PBA_LEGACY:
99 case RES_TYPE_DEVCMD:
100 case RES_TYPE_DEVCMD2:
101 break;
102 default:
103 ENIC_BUS_READ_REGION_4(softc, mem, r_offset, (void *)r, sizeof(*r) / 4);
104 continue;
105 }
106
107 vdev->res[type].count = count;
108 bcopy(&softc->mem, &vdev->res[type].bar, sizeof(softc->mem));
109 vdev->res[type].bar.offset = bar_offset;
110 ENIC_BUS_READ_REGION_4(softc, mem, r_offset, (void *)r, sizeof(*r) / 4);
111 }
112
113 free(rh, M_DEVBUF);
114 free(mrh, M_DEVBUF);
115 free(r, M_DEVBUF);
116 return 0;
117 }
118
vnic_dev_get_res_count(struct vnic_dev * vdev,enum vnic_res_type type)119 unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
120 enum vnic_res_type type)
121 {
122 return vdev->res[type].count;
123 }
124
vnic_dev_get_res(struct vnic_dev * vdev,enum vnic_res_type type,unsigned int index)125 void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
126 unsigned int index)
127 {
128 struct vnic_res *res;
129
130 if (!vdev->res[type].bar.tag)
131 return NULL;
132
133 res = malloc(sizeof(*res), M_DEVBUF, M_NOWAIT | M_ZERO);
134 bcopy(&vdev->res[type], res, sizeof(*res));
135
136 switch (type) {
137 case RES_TYPE_WQ:
138 case RES_TYPE_RQ:
139 case RES_TYPE_CQ:
140 case RES_TYPE_INTR_CTRL:
141 res->bar.offset +=
142 index * VNIC_RES_STRIDE;
143 default:
144 res->bar.offset += 0;
145 }
146
147 return res;
148 }
149
vnic_dev_desc_ring_size(struct vnic_dev_ring * ring,unsigned int desc_count,unsigned int desc_size)150 unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
151 unsigned int desc_count, unsigned int desc_size)
152 {
153 /* The base address of the desc rings must be 512 byte aligned.
154 * Descriptor count is aligned to groups of 32 descriptors. A
155 * count of 0 means the maximum 4096 descriptors. Descriptor
156 * size is aligned to 16 bytes.
157 */
158
159 unsigned int count_align = 32;
160 unsigned int desc_align = 16;
161
162 ring->base_align = 512;
163
164 if (desc_count == 0)
165 desc_count = 4096;
166
167 ring->desc_count = VNIC_ALIGN(desc_count, count_align);
168
169 ring->desc_size = VNIC_ALIGN(desc_size, desc_align);
170
171 ring->size = ring->desc_count * ring->desc_size;
172 ring->size_unaligned = ring->size + ring->base_align;
173
174 return ring->size_unaligned;
175 }
176
vnic_dev_clear_desc_ring(struct vnic_dev_ring * ring)177 void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
178 {
179 memset(ring->descs, 0, ring->size);
180 }
181
_vnic_dev_cmd(struct vnic_dev * vdev,enum vnic_devcmd_cmd cmd,int wait)182 static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
183 int wait)
184 {
185 struct vnic_res __iomem *devcmd = vdev->devcmd;
186 int delay;
187 u32 status;
188 int err;
189
190 status = ENIC_BUS_READ_4(devcmd, DEVCMD_STATUS);
191 if (status == 0xFFFFFFFF) {
192 /* PCI-e target device is gone */
193 return (ENODEV);
194 }
195 if (status & STAT_BUSY) {
196
197 pr_err("Busy devcmd %d\n", _CMD_N(cmd));
198 return (EBUSY);
199 }
200
201 if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
202 ENIC_BUS_WRITE_REGION_4(devcmd, DEVCMD_ARGS(0), (void *)&vdev->args[0], VNIC_DEVCMD_NARGS * 2);
203 }
204
205 ENIC_BUS_WRITE_4(devcmd, DEVCMD_CMD, cmd);
206
207 if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) {
208 return 0;
209 }
210
211 for (delay = 0; delay < wait; delay++) {
212
213 udelay(100);
214
215 status = ENIC_BUS_READ_4(devcmd, DEVCMD_STATUS);
216 if (status == 0xFFFFFFFF) {
217 /* PCI-e target device is gone */
218 return (ENODEV);
219 }
220
221 if (!(status & STAT_BUSY)) {
222 if (status & STAT_ERROR) {
223 err = -(int)ENIC_BUS_READ_8(devcmd, DEVCMD_ARGS(0));
224
225 if (cmd != CMD_CAPABILITY)
226 pr_err("Devcmd %d failed " \
227 "with error code %d\n",
228 _CMD_N(cmd), err);
229 return (err);
230 }
231
232 if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
233 ENIC_BUS_READ_REGION_4(devcmd, bar, DEVCMD_ARGS(0), (void *)&vdev->args[0], VNIC_DEVCMD_NARGS * 2);
234 }
235
236 return 0;
237 }
238 }
239
240 pr_err("Timedout devcmd %d\n", _CMD_N(cmd));
241 return (ETIMEDOUT);
242 }
243
_vnic_dev_cmd2(struct vnic_dev * vdev,enum vnic_devcmd_cmd cmd,int wait)244 static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
245 int wait)
246 {
247 struct devcmd2_controller *dc2c = vdev->devcmd2;
248 struct devcmd2_result *result;
249 u8 color;
250 unsigned int i;
251 u32 fetch_index, new_posted;
252 int delay, err;
253 u32 posted = dc2c->posted;
254
255 fetch_index = ENIC_BUS_READ_4(dc2c->wq_ctrl, TX_FETCH_INDEX);
256 if (fetch_index == 0xFFFFFFFF)
257 return (ENODEV);
258
259 new_posted = (posted + 1) % DEVCMD2_RING_SIZE;
260
261 if (new_posted == fetch_index) {
262 device_printf(dev_from_vnic_dev(vdev),
263 "devcmd2 %d: wq is full. fetch index: %u, posted index: %u\n",
264 _CMD_N(cmd), fetch_index, posted);
265 return (EBUSY);
266 }
267
268 dc2c->cmd_ring[posted].cmd = cmd;
269 dc2c->cmd_ring[posted].flags = 0;
270
271 if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
272 dc2c->cmd_ring[posted].flags |= DEVCMD2_FNORESULT;
273 if (_CMD_DIR(cmd) & _CMD_DIR_WRITE)
274 for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
275 dc2c->cmd_ring[posted].args[i] = vdev->args[i];
276
277 ENIC_BUS_WRITE_4(dc2c->wq_ctrl, TX_POSTED_INDEX, new_posted);
278 dc2c->posted = new_posted;
279
280 if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
281 return (0);
282
283 result = dc2c->result + dc2c->next_result;
284 color = dc2c->color;
285
286 dc2c->next_result++;
287 if (dc2c->next_result == dc2c->result_size) {
288 dc2c->next_result = 0;
289 dc2c->color = dc2c->color ? 0 : 1;
290 }
291
292 for (delay = 0; delay < wait; delay++) {
293 if (result->color == color) {
294 if (result->error) {
295 err = result->error;
296 if (err != ERR_ECMDUNKNOWN ||
297 cmd != CMD_CAPABILITY)
298 device_printf(dev_from_vnic_dev(vdev),
299 "Error %d devcmd %d\n", err,
300 _CMD_N(cmd));
301 return (err);
302 }
303 if (_CMD_DIR(cmd) & _CMD_DIR_READ)
304 for (i = 0; i < VNIC_DEVCMD2_NARGS; i++)
305 vdev->args[i] = result->results[i];
306
307 return 0;
308 }
309 udelay(100);
310 }
311
312 device_printf(dev_from_vnic_dev(vdev),
313 "devcmd %d timed out\n", _CMD_N(cmd));
314
315
316 return (ETIMEDOUT);
317 }
318
vnic_dev_cmd_proxy(struct vnic_dev * vdev,enum vnic_devcmd_cmd proxy_cmd,enum vnic_devcmd_cmd cmd,u64 * args,int nargs,int wait)319 static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
320 enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd,
321 u64 *args, int nargs, int wait)
322 {
323 u32 status;
324 int err;
325
326 /*
327 * Proxy command consumes 2 arguments. One for proxy index,
328 * the other is for command to be proxied
329 */
330 if (nargs > VNIC_DEVCMD_NARGS - 2) {
331 pr_err("number of args %d exceeds the maximum\n", nargs);
332 return (EINVAL);
333 }
334 memset(vdev->args, 0, sizeof(vdev->args));
335
336 vdev->args[0] = vdev->proxy_index;
337 vdev->args[1] = cmd;
338 memcpy(&vdev->args[2], args, nargs * sizeof(args[0]));
339
340 err = vdev->devcmd_rtn(vdev, proxy_cmd, wait);
341 if (err)
342 return (err);
343
344 status = (u32)vdev->args[0];
345 if (status & STAT_ERROR) {
346 err = (int)vdev->args[1];
347 if (err != ERR_ECMDUNKNOWN ||
348 cmd != CMD_CAPABILITY)
349 pr_err("Error %d proxy devcmd %d\n", err, _CMD_N(cmd));
350 return (err);
351 }
352
353 memcpy(args, &vdev->args[1], nargs * sizeof(args[0]));
354
355 return 0;
356 }
357
vnic_dev_cmd_no_proxy(struct vnic_dev * vdev,enum vnic_devcmd_cmd cmd,u64 * args,int nargs,int wait)358 static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
359 enum vnic_devcmd_cmd cmd, u64 *args, int nargs, int wait)
360 {
361 int err;
362
363 if (nargs > VNIC_DEVCMD_NARGS) {
364 pr_err("number of args %d exceeds the maximum\n", nargs);
365 return (EINVAL);
366 }
367 memset(vdev->args, 0, sizeof(vdev->args));
368 memcpy(vdev->args, args, nargs * sizeof(args[0]));
369
370 err = vdev->devcmd_rtn(vdev, cmd, wait);
371
372 memcpy(args, vdev->args, nargs * sizeof(args[0]));
373
374 return (err);
375 }
376
vnic_dev_cmd(struct vnic_dev * vdev,enum vnic_devcmd_cmd cmd,u64 * a0,u64 * a1,int wait)377 int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
378 u64 *a0, u64 *a1, int wait)
379 {
380 u64 args[2];
381 int err;
382
383 args[0] = *a0;
384 args[1] = *a1;
385 memset(vdev->args, 0, sizeof(vdev->args));
386
387 switch (vdev->proxy) {
388 case PROXY_BY_INDEX:
389 err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd,
390 args, ARRAY_SIZE(args), wait);
391 break;
392 case PROXY_BY_BDF:
393 err = vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd,
394 args, ARRAY_SIZE(args), wait);
395 break;
396 case PROXY_NONE:
397 default:
398 err = vnic_dev_cmd_no_proxy(vdev, cmd, args, 2, wait);
399 break;
400 }
401
402 if (err == 0) {
403 *a0 = args[0];
404 *a1 = args[1];
405 }
406
407 return (err);
408 }
409
vnic_dev_cmd_args(struct vnic_dev * vdev,enum vnic_devcmd_cmd cmd,u64 * args,int nargs,int wait)410 int vnic_dev_cmd_args(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
411 u64 *args, int nargs, int wait)
412 {
413 switch (vdev->proxy) {
414 case PROXY_BY_INDEX:
415 return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd,
416 args, nargs, wait);
417 case PROXY_BY_BDF:
418 return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd,
419 args, nargs, wait);
420 case PROXY_NONE:
421 default:
422 return vnic_dev_cmd_no_proxy(vdev, cmd, args, nargs, wait);
423 }
424 }
425
vnic_dev_advanced_filters_cap(struct vnic_dev * vdev,u64 * args,int nargs)426 static int vnic_dev_advanced_filters_cap(struct vnic_dev *vdev, u64 *args,
427 int nargs)
428 {
429 memset(args, 0, nargs * sizeof(*args));
430 args[0] = CMD_ADD_ADV_FILTER;
431 args[1] = FILTER_CAP_MODE_V1_FLAG;
432 return vnic_dev_cmd_args(vdev, CMD_CAPABILITY, args, nargs, 1000);
433 }
434
vnic_dev_capable_adv_filters(struct vnic_dev * vdev)435 int vnic_dev_capable_adv_filters(struct vnic_dev *vdev)
436 {
437 u64 a0 = CMD_ADD_ADV_FILTER, a1 = 0;
438 int wait = 1000;
439 int err;
440
441 err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
442 if (err)
443 return 0;
444 return (a1 >= (u32)FILTER_DPDK_1);
445 }
446
447 /* Determine the "best" filtering mode VIC is capaible of. Returns one of 3
448 * value or 0 on error:
449 * FILTER_DPDK_1- advanced filters availabile
450 * FILTER_USNIC_IP_FLAG - advanced filters but with the restriction that
451 * the IP layer must explicitly specified. I.e. cannot have a UDP
452 * filter that matches both IPv4 and IPv6.
453 * FILTER_IPV4_5TUPLE - fallback if either of the 2 above aren't available.
454 * all other filter types are not available.
455 * Retrun true in filter_tags if supported
456 */
vnic_dev_capable_filter_mode(struct vnic_dev * vdev,u32 * mode,u8 * filter_actions)457 int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, u32 *mode,
458 u8 *filter_actions)
459 {
460 u64 args[4];
461 int err;
462 u32 max_level = 0;
463
464 err = vnic_dev_advanced_filters_cap(vdev, args, 4);
465
466 /* determine supported filter actions */
467 *filter_actions = FILTER_ACTION_RQ_STEERING_FLAG; /* always available */
468 if (args[2] == FILTER_CAP_MODE_V1)
469 *filter_actions = args[3];
470
471 if (err || ((args[0] == 1) && (args[1] == 0))) {
472 /* Adv filter Command not supported or adv filters available but
473 * not enabled. Try the normal filter capability command.
474 */
475 args[0] = CMD_ADD_FILTER;
476 args[1] = 0;
477 err = vnic_dev_cmd_args(vdev, CMD_CAPABILITY, args, 2, 1000);
478 if (err)
479 return (err);
480 max_level = args[1];
481 goto parse_max_level;
482 } else if (args[2] == FILTER_CAP_MODE_V1) {
483 /* parse filter capability mask in args[1] */
484 if (args[1] & FILTER_DPDK_1_FLAG)
485 *mode = FILTER_DPDK_1;
486 else if (args[1] & FILTER_USNIC_IP_FLAG)
487 *mode = FILTER_USNIC_IP;
488 else if (args[1] & FILTER_IPV4_5TUPLE_FLAG)
489 *mode = FILTER_IPV4_5TUPLE;
490 return 0;
491 }
492 max_level = args[1];
493 parse_max_level:
494 if (max_level >= (u32)FILTER_USNIC_IP)
495 *mode = FILTER_USNIC_IP;
496 else
497 *mode = FILTER_IPV4_5TUPLE;
498 return 0;
499 }
500
vnic_dev_capable_udp_rss_weak(struct vnic_dev * vdev,bool * cfg_chk,bool * weak)501 void vnic_dev_capable_udp_rss_weak(struct vnic_dev *vdev, bool *cfg_chk,
502 bool *weak)
503 {
504 u64 a0 = CMD_NIC_CFG, a1 = 0;
505 int wait = 1000;
506 int err;
507
508 *cfg_chk = false;
509 *weak = false;
510 err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
511 if (err == 0 && a0 != 0 && a1 != 0) {
512 *cfg_chk = true;
513 *weak = !!((a1 >> 32) & CMD_NIC_CFG_CAPF_UDP_WEAK);
514 }
515 }
516
vnic_dev_capable(struct vnic_dev * vdev,enum vnic_devcmd_cmd cmd)517 int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd)
518 {
519 u64 a0 = (u32)cmd, a1 = 0;
520 int wait = 1000;
521 int err;
522
523 err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
524
525 return !(err || a0);
526 }
527
vnic_dev_spec(struct vnic_dev * vdev,unsigned int offset,size_t size,void * value)528 int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size,
529 void *value)
530 {
531 u64 a0, a1;
532 int wait = 1000;
533 int err;
534
535 a0 = offset;
536 a1 = size;
537
538 err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
539
540 switch (size) {
541 case 1:
542 *(u8 *)value = (u8)a0;
543 break;
544 case 2:
545 *(u16 *)value = (u16)a0;
546 break;
547 case 4:
548 *(u32 *)value = (u32)a0;
549 break;
550 case 8:
551 *(u64 *)value = a0;
552 break;
553 default:
554 BUG();
555 break;
556 }
557
558 return (err);
559 }
560
vnic_dev_stats_clear(struct vnic_dev * vdev)561 int vnic_dev_stats_clear(struct vnic_dev *vdev)
562 {
563 u64 a0 = 0, a1 = 0;
564 int wait = 1000;
565
566 return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
567 }
568
vnic_dev_stats_dump(struct vnic_dev * vdev,struct vnic_stats ** stats)569 int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
570 {
571 u64 a0, a1;
572 int wait = 1000;
573 int rc;
574
575 if (!vdev->stats)
576 return (ENOMEM);
577
578 *stats = vdev->stats;
579 a0 = vdev->stats_res.idi_paddr;
580 a1 = sizeof(struct vnic_stats);
581
582 bus_dmamap_sync(vdev->stats_res.idi_tag,
583 vdev->stats_res.idi_map,
584 BUS_DMASYNC_POSTREAD);
585 rc = vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
586 bus_dmamap_sync(vdev->stats_res.idi_tag,
587 vdev->stats_res.idi_map,
588 BUS_DMASYNC_PREREAD);
589 return (rc);
590 }
591
592 /*
593 * Configure counter DMA
594 */
vnic_dev_counter_dma_cfg(struct vnic_dev * vdev,u32 period,u32 num_counters)595 int vnic_dev_counter_dma_cfg(struct vnic_dev *vdev, u32 period,
596 u32 num_counters)
597 {
598 u64 args[3];
599 int wait = 1000;
600 int err;
601
602 if (num_counters > VNIC_MAX_FLOW_COUNTERS)
603 return (ENOMEM);
604 if (period > 0 && (period < VNIC_COUNTER_DMA_MIN_PERIOD ||
605 num_counters == 0))
606 return (EINVAL);
607
608 args[0] = num_counters;
609 args[1] = vdev->flow_counters_res.idi_paddr;
610 args[2] = period;
611 bus_dmamap_sync(vdev->flow_counters_res.idi_tag,
612 vdev->flow_counters_res.idi_map,
613 BUS_DMASYNC_POSTREAD);
614 err = vnic_dev_cmd_args(vdev, CMD_COUNTER_DMA_CONFIG, args, 3, wait);
615 bus_dmamap_sync(vdev->flow_counters_res.idi_tag,
616 vdev->flow_counters_res.idi_map,
617 BUS_DMASYNC_PREREAD);
618
619 /* record if DMAs need to be stopped on close */
620 if (!err)
621 vdev->flow_counters_dma_active = (num_counters != 0 &&
622 period != 0);
623
624 return (err);
625 }
626
vnic_dev_close(struct vnic_dev * vdev)627 int vnic_dev_close(struct vnic_dev *vdev)
628 {
629 u64 a0 = 0, a1 = 0;
630 int wait = 1000;
631
632 return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
633 }
634
vnic_dev_enable_wait(struct vnic_dev * vdev)635 int vnic_dev_enable_wait(struct vnic_dev *vdev)
636 {
637 u64 a0 = 0, a1 = 0;
638 int wait = 1000;
639
640 if (vnic_dev_capable(vdev, CMD_ENABLE_WAIT))
641 return vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait);
642 else
643 return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
644 }
645
vnic_dev_disable(struct vnic_dev * vdev)646 int vnic_dev_disable(struct vnic_dev *vdev)
647 {
648 u64 a0 = 0, a1 = 0;
649 int wait = 1000;
650
651 return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
652 }
653
vnic_dev_open(struct vnic_dev * vdev,int arg)654 int vnic_dev_open(struct vnic_dev *vdev, int arg)
655 {
656 u64 a0 = (u32)arg, a1 = 0;
657 int wait = 1000;
658
659 return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
660 }
661
vnic_dev_open_done(struct vnic_dev * vdev,int * done)662 int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
663 {
664 u64 a0 = 0, a1 = 0;
665 int wait = 1000;
666 int err;
667
668 *done = 0;
669
670 err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
671 if (err)
672 return (err);
673
674 *done = (a0 == 0);
675
676 return 0;
677 }
678
vnic_dev_get_mac_addr(struct vnic_dev * vdev,u8 * mac_addr)679 int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
680 {
681 u64 a0 = 0, a1 = 0;
682 int wait = 1000;
683 int err, i;
684
685 for (i = 0; i < ETH_ALEN; i++)
686 mac_addr[i] = 0;
687
688 err = vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
689 if (err)
690 return (err);
691
692 for (i = 0; i < ETH_ALEN; i++)
693 mac_addr[i] = ((u8 *)&a0)[i];
694
695 return 0;
696 }
697
vnic_dev_packet_filter(struct vnic_dev * vdev,int directed,int multicast,int broadcast,int promisc,int allmulti)698 int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
699 int broadcast, int promisc, int allmulti)
700 {
701 u64 a0, a1 = 0;
702 int wait = 1000;
703 int err;
704
705 a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
706 (multicast ? CMD_PFILTER_MULTICAST : 0) |
707 (broadcast ? CMD_PFILTER_BROADCAST : 0) |
708 (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
709 (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
710
711 err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
712 if (err)
713 pr_err("Can't set packet filter\n");
714
715 return (err);
716 }
717
vnic_dev_add_addr(struct vnic_dev * vdev,u8 * addr)718 int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
719 {
720 u64 a0 = 0, a1 = 0;
721 int wait = 1000;
722 int err;
723 int i;
724
725 for (i = 0; i < ETH_ALEN; i++)
726 ((u8 *)&a0)[i] = addr[i];
727
728 err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
729 if (err)
730 pr_err("Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
731 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
732 err);
733
734 return (err);
735 }
736
vnic_dev_del_addr(struct vnic_dev * vdev,u8 * addr)737 int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
738 {
739 u64 a0 = 0, a1 = 0;
740 int wait = 1000;
741 int err;
742 int i;
743
744 for (i = 0; i < ETH_ALEN; i++)
745 ((u8 *)&a0)[i] = addr[i];
746
747 err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
748 if (err)
749 pr_err("Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
750 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
751 err);
752
753 return (err);
754 }
755
vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev * vdev,u8 ig_vlan_rewrite_mode)756 int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
757 u8 ig_vlan_rewrite_mode)
758 {
759 u64 a0 = ig_vlan_rewrite_mode, a1 = 0;
760 int wait = 1000;
761
762 if (vnic_dev_capable(vdev, CMD_IG_VLAN_REWRITE_MODE))
763 return vnic_dev_cmd(vdev, CMD_IG_VLAN_REWRITE_MODE,
764 &a0, &a1, wait);
765 else
766 return 0;
767 }
768
vnic_dev_set_reset_flag(struct vnic_dev * vdev,int state)769 void vnic_dev_set_reset_flag(struct vnic_dev *vdev, int state)
770 {
771 vdev->in_reset = state;
772 }
773
vnic_dev_in_reset(struct vnic_dev * vdev)774 static inline int vnic_dev_in_reset(struct vnic_dev *vdev)
775 {
776 return vdev->in_reset;
777 }
778
vnic_dev_notify_setcmd(struct vnic_dev * vdev,void * notify_addr,bus_addr_t notify_pa,u16 intr)779 int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
780 void *notify_addr, bus_addr_t notify_pa, u16 intr)
781 {
782 u64 a0, a1;
783 int wait = 1000;
784 int r;
785
786 bus_dmamap_sync(vdev->notify_res.idi_tag,
787 vdev->notify_res.idi_map,
788 BUS_DMASYNC_PREWRITE);
789 memset(notify_addr, 0, sizeof(struct vnic_devcmd_notify));
790 bus_dmamap_sync(vdev->notify_res.idi_tag,
791 vdev->notify_res.idi_map,
792 BUS_DMASYNC_POSTWRITE);
793 if (!vnic_dev_in_reset(vdev)) {
794 vdev->notify = notify_addr;
795 vdev->notify_pa = notify_pa;
796 }
797
798 a0 = (u64)notify_pa;
799 a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
800 a1 += sizeof(struct vnic_devcmd_notify);
801
802 r = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
803 if (!vnic_dev_in_reset(vdev))
804 vdev->notify_sz = (r == 0) ? (u32)a1 : 0;
805
806 return r;
807 }
808
vnic_dev_notify_set(struct vnic_dev * vdev,u16 intr)809 int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
810 {
811 void *notify_addr = NULL;
812 bus_addr_t notify_pa = 0;
813 char name[NAME_MAX];
814 static u32 instance;
815
816 if (vdev->notify || vdev->notify_pa) {
817 return vnic_dev_notify_setcmd(vdev, vdev->notify,
818 vdev->notify_pa, intr);
819 }
820 if (!vnic_dev_in_reset(vdev)) {
821 snprintf((char *)name, sizeof(name),
822 "vnic_notify-%u", instance++);
823 iflib_dma_alloc(vdev->softc->ctx,
824 sizeof(struct vnic_devcmd_notify),
825 &vdev->notify_res, BUS_DMA_NOWAIT);
826 notify_pa = vdev->notify_res.idi_paddr;
827 notify_addr = vdev->notify_res.idi_vaddr;
828 }
829
830 return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr);
831 }
832
vnic_dev_notify_unsetcmd(struct vnic_dev * vdev)833 int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev)
834 {
835 u64 a0, a1;
836 int wait = 1000;
837 int err;
838
839 a0 = 0; /* paddr = 0 to unset notify buffer */
840 a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
841 a1 += sizeof(struct vnic_devcmd_notify);
842
843 err = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
844 if (!vnic_dev_in_reset(vdev)) {
845 vdev->notify = NULL;
846 vdev->notify_pa = 0;
847 vdev->notify_sz = 0;
848 }
849
850 return (err);
851 }
852
vnic_dev_notify_unset(struct vnic_dev * vdev)853 int vnic_dev_notify_unset(struct vnic_dev *vdev)
854 {
855 if (vdev->notify && !vnic_dev_in_reset(vdev)) {
856 iflib_dma_free(&vdev->notify_res);
857 }
858
859 return vnic_dev_notify_unsetcmd(vdev);
860 }
861
vnic_dev_notify_ready(struct vnic_dev * vdev)862 static int vnic_dev_notify_ready(struct vnic_dev *vdev)
863 {
864 u32 *words;
865 unsigned int nwords = vdev->notify_sz / 4;
866 unsigned int i;
867 u32 csum;
868
869 if (!vdev->notify || !vdev->notify_sz)
870 return 0;
871
872 do {
873 csum = 0;
874 bus_dmamap_sync(vdev->notify_res.idi_tag,
875 vdev->notify_res.idi_map,
876 BUS_DMASYNC_PREREAD);
877 memcpy(&vdev->notify_copy, vdev->notify, vdev->notify_sz);
878 bus_dmamap_sync(vdev->notify_res.idi_tag,
879 vdev->notify_res.idi_map,
880 BUS_DMASYNC_POSTREAD);
881 words = (u32 *)&vdev->notify_copy;
882 for (i = 1; i < nwords; i++)
883 csum += words[i];
884 } while (csum != words[0]);
885
886
887 return (1);
888 }
889
vnic_dev_init(struct vnic_dev * vdev,int arg)890 int vnic_dev_init(struct vnic_dev *vdev, int arg)
891 {
892 u64 a0 = (u32)arg, a1 = 0;
893 int wait = 1000;
894 int r = 0;
895
896 if (vnic_dev_capable(vdev, CMD_INIT))
897 r = vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
898 else {
899 vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait);
900 if (a0 & CMD_INITF_DEFAULT_MAC) {
901 /* Emulate these for old CMD_INIT_v1 which
902 * didn't pass a0 so no CMD_INITF_*.
903 */
904 vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
905 vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
906 }
907 }
908 return r;
909 }
910
vnic_dev_intr_coal_timer_info_default(struct vnic_dev * vdev)911 void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev)
912 {
913 /* Default: hardware intr coal timer is in units of 1.5 usecs */
914 vdev->intr_coal_timer_info.mul = 2;
915 vdev->intr_coal_timer_info.div = 3;
916 vdev->intr_coal_timer_info.max_usec =
917 vnic_dev_intr_coal_timer_hw_to_usec(vdev, 0xffff);
918 }
919
vnic_dev_link_status(struct vnic_dev * vdev)920 int vnic_dev_link_status(struct vnic_dev *vdev)
921 {
922 if (!vnic_dev_notify_ready(vdev))
923 return 0;
924
925 return vdev->notify_copy.link_state;
926 }
927
vnic_dev_port_speed(struct vnic_dev * vdev)928 u32 vnic_dev_port_speed(struct vnic_dev *vdev)
929 {
930 if (!vnic_dev_notify_ready(vdev))
931 return 0;
932
933 return vdev->notify_copy.port_speed;
934 }
935
vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev * vdev,u32 usec)936 u32 vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, u32 usec)
937 {
938 return (usec * vdev->intr_coal_timer_info.mul) /
939 vdev->intr_coal_timer_info.div;
940 }
941
vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev * vdev,u32 hw_cycles)942 u32 vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, u32 hw_cycles)
943 {
944 return (hw_cycles * vdev->intr_coal_timer_info.div) /
945 vdev->intr_coal_timer_info.mul;
946 }
947
vnic_dev_get_intr_coal_timer_max(struct vnic_dev * vdev)948 u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev)
949 {
950 return vdev->intr_coal_timer_info.max_usec;
951 }
952
vnic_dev_mtu(struct vnic_dev * vdev)953 u32 vnic_dev_mtu(struct vnic_dev *vdev)
954 {
955 if (!vnic_dev_notify_ready(vdev))
956 return 0;
957
958 return vdev->notify_copy.mtu;
959 }
960
vnic_dev_set_intr_mode(struct vnic_dev * vdev,enum vnic_dev_intr_mode intr_mode)961 void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
962 enum vnic_dev_intr_mode intr_mode)
963 {
964 vdev->intr_mode = intr_mode;
965 }
966
vnic_dev_get_intr_mode(struct vnic_dev * vdev)967 enum vnic_dev_intr_mode vnic_dev_get_intr_mode(
968 struct vnic_dev *vdev)
969 {
970 return vdev->intr_mode;
971 }
972
973
vnic_dev_alloc_stats_mem(struct vnic_dev * vdev)974 int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev)
975 {
976 char name[NAME_MAX];
977 static u32 instance;
978 struct enic_softc *softc;
979
980 softc = vdev->softc;
981
982 snprintf((char *)name, sizeof(name), "vnic_stats-%u", instance++);
983 iflib_dma_alloc(softc->ctx, sizeof(struct vnic_stats), &vdev->stats_res, 0);
984 vdev->stats = (struct vnic_stats *)vdev->stats_res.idi_vaddr;
985 return vdev->stats == NULL ? -ENOMEM : 0;
986 }
987
988 /*
989 * Initialize for up to VNIC_MAX_FLOW_COUNTERS
990 */
vnic_dev_alloc_counter_mem(struct vnic_dev * vdev)991 int vnic_dev_alloc_counter_mem(struct vnic_dev *vdev)
992 {
993 char name[NAME_MAX];
994 static u32 instance;
995 struct enic_softc *softc;
996
997 softc = vdev->softc;
998
999 snprintf((char *)name, sizeof(name), "vnic_flow_ctrs-%u", instance++);
1000 iflib_dma_alloc(softc->ctx, sizeof(struct vnic_counter_counts) * VNIC_MAX_FLOW_COUNTERS, &vdev->flow_counters_res, 0);
1001 vdev->flow_counters = (struct vnic_counter_counts *)vdev->flow_counters_res.idi_vaddr;
1002 vdev->flow_counters_dma_active = 0;
1003 return (vdev->flow_counters == NULL ? ENOMEM : 0);
1004 }
1005
vnic_dev_register(struct vnic_dev * vdev,struct enic_bar_info * mem,unsigned int num_bars)1006 struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
1007 struct enic_bar_info *mem, unsigned int num_bars)
1008 {
1009 if (vnic_dev_discover_res(vdev, NULL, num_bars))
1010 goto err_out;
1011
1012 vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
1013 if (!vdev->devcmd)
1014 goto err_out;
1015
1016 return vdev;
1017
1018 err_out:
1019 return NULL;
1020 }
1021
vnic_dev_init_devcmd1(struct vnic_dev * vdev)1022 static int vnic_dev_init_devcmd1(struct vnic_dev *vdev)
1023 {
1024 vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
1025 if (!vdev->devcmd)
1026 return (ENODEV);
1027 vdev->devcmd_rtn = _vnic_dev_cmd;
1028
1029 return 0;
1030 }
1031
vnic_dev_init_devcmd2(struct vnic_dev * vdev)1032 static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
1033 {
1034 int err;
1035 unsigned int fetch_index;
1036
1037
1038 err = 0;
1039
1040 if (vdev->devcmd2)
1041 return (0);
1042
1043 vdev->devcmd2 = malloc(sizeof(*vdev->devcmd2), M_DEVBUF,
1044 M_NOWAIT | M_ZERO);
1045
1046 if (!vdev->devcmd2) {
1047 return (ENOMEM);
1048 }
1049
1050 vdev->devcmd2->color = 1;
1051 vdev->devcmd2->result_size = DEVCMD2_RING_SIZE;
1052
1053 err = enic_wq_devcmd2_alloc(vdev, &vdev->devcmd2->wq, DEVCMD2_RING_SIZE,
1054 DEVCMD2_DESC_SIZE);
1055
1056 if (err) {
1057 goto err_free_devcmd2;
1058 }
1059 vdev->devcmd2->wq_ctrl = vdev->devcmd2->wq.ctrl;
1060 vdev->devcmd2->cmd_ring = vdev->devcmd2->wq.ring.descs;
1061
1062 fetch_index = ENIC_BUS_READ_4(vdev->devcmd2->wq.ctrl, TX_FETCH_INDEX);
1063 if (fetch_index == 0xFFFFFFFF)
1064 return (ENODEV);
1065
1066 enic_wq_init_start(&vdev->devcmd2->wq, 0, fetch_index, fetch_index, 0,
1067 0);
1068 vdev->devcmd2->posted = fetch_index;
1069 vnic_wq_enable(&vdev->devcmd2->wq);
1070
1071 err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring,
1072 DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE);
1073 if (err)
1074 goto err_free_devcmd2;
1075
1076 vdev->devcmd2->result = vdev->devcmd2->results_ring.descs;
1077 vdev->args[0] = (u64)vdev->devcmd2->results_ring.base_addr |
1078 VNIC_PADDR_TARGET;
1079 vdev->args[1] = DEVCMD2_RING_SIZE;
1080
1081 err = _vnic_dev_cmd2(vdev, CMD_INITIALIZE_DEVCMD2, 1000);
1082 if (err)
1083 goto err_free_devcmd2;
1084
1085 vdev->devcmd_rtn = _vnic_dev_cmd2;
1086
1087 return (err);
1088
1089 err_free_devcmd2:
1090 err = ENOMEM;
1091 if (vdev->devcmd2->wq_ctrl)
1092 vnic_wq_free(&vdev->devcmd2->wq);
1093 if (vdev->devcmd2->result)
1094 vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
1095 free(vdev->devcmd2, M_DEVBUF);
1096 vdev->devcmd2 = NULL;
1097
1098 return (err);
1099 }
1100
1101 /*
1102 * vnic_dev_classifier: Add/Delete classifier entries
1103 * @vdev: vdev of the device
1104 * @cmd: CLSF_ADD for Add filter
1105 * CLSF_DEL for Delete filter
1106 * @entry: In case of ADD filter, the caller passes the RQ number in this
1107 * variable.
1108 * This function stores the filter_id returned by the
1109 * firmware in the same variable before return;
1110 *
1111 * In case of DEL filter, the caller passes the RQ number. Return
1112 * value is irrelevant.
1113 * @data: filter data
1114 * @action: action data
1115 */
1116
vnic_dev_overlay_offload_ctrl(struct vnic_dev * vdev,u8 overlay,u8 config)1117 int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, u8 overlay, u8 config)
1118 {
1119 u64 a0 = overlay;
1120 u64 a1 = config;
1121 int wait = 1000;
1122
1123 return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CTRL, &a0, &a1, wait);
1124 }
1125
vnic_dev_overlay_offload_cfg(struct vnic_dev * vdev,u8 overlay,u16 vxlan_udp_port_number)1126 int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay,
1127 u16 vxlan_udp_port_number)
1128 {
1129 u64 a1 = vxlan_udp_port_number;
1130 u64 a0 = overlay;
1131 int wait = 1000;
1132
1133 return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CFG, &a0, &a1, wait);
1134 }
1135
vnic_dev_capable_vxlan(struct vnic_dev * vdev)1136 int vnic_dev_capable_vxlan(struct vnic_dev *vdev)
1137 {
1138 u64 a0 = VIC_FEATURE_VXLAN;
1139 u64 a1 = 0;
1140 int wait = 1000;
1141 int ret;
1142
1143 ret = vnic_dev_cmd(vdev, CMD_GET_SUPP_FEATURE_VER, &a0, &a1, wait);
1144 /* 1 if the NIC can do VXLAN for both IPv4 and IPv6 with multiple WQs */
1145 return ret == 0 &&
1146 (a1 & (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ)) ==
1147 (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ);
1148 }
1149
vnic_dev_counter_alloc(struct vnic_dev * vdev,uint32_t * idx)1150 bool vnic_dev_counter_alloc(struct vnic_dev *vdev, uint32_t *idx)
1151 {
1152 u64 a0 = 0;
1153 u64 a1 = 0;
1154 int wait = 1000;
1155
1156 if (vnic_dev_cmd(vdev, CMD_COUNTER_ALLOC, &a0, &a1, wait))
1157 return false;
1158 *idx = (uint32_t)a0;
1159 return true;
1160 }
1161
vnic_dev_counter_free(struct vnic_dev * vdev,uint32_t idx)1162 bool vnic_dev_counter_free(struct vnic_dev *vdev, uint32_t idx)
1163 {
1164 u64 a0 = idx;
1165 u64 a1 = 0;
1166 int wait = 1000;
1167
1168 return vnic_dev_cmd(vdev, CMD_COUNTER_FREE, &a0, &a1,
1169 wait) == 0;
1170 }
1171
vnic_dev_counter_query(struct vnic_dev * vdev,uint32_t idx,bool reset,uint64_t * packets,uint64_t * bytes)1172 bool vnic_dev_counter_query(struct vnic_dev *vdev, uint32_t idx,
1173 bool reset, uint64_t *packets, uint64_t *bytes)
1174 {
1175 u64 a0 = idx;
1176 u64 a1 = reset ? 1 : 0;
1177 int wait = 1000;
1178
1179 if (reset) {
1180 /* query/reset returns updated counters */
1181 if (vnic_dev_cmd(vdev, CMD_COUNTER_QUERY, &a0, &a1, wait))
1182 return false;
1183 *packets = a0;
1184 *bytes = a1;
1185 } else {
1186 /* Get values DMA'd from the adapter */
1187 *packets = vdev->flow_counters[idx].vcc_packets;
1188 *bytes = vdev->flow_counters[idx].vcc_bytes;
1189 }
1190 return true;
1191 }
1192
dev_from_vnic_dev(struct vnic_dev * vdev)1193 device_t dev_from_vnic_dev(struct vnic_dev *vdev) {
1194 return (vdev->softc->dev);
1195 }
1196
vnic_dev_cmd_init(struct vnic_dev * vdev)1197 int vnic_dev_cmd_init(struct vnic_dev *vdev) {
1198 int err;
1199 void __iomem *res;
1200
1201 res = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
1202 if (res) {
1203 err = vnic_dev_init_devcmd2(vdev);
1204 if (err)
1205 device_printf(dev_from_vnic_dev(vdev),
1206 "DEVCMD2 init failed, Using DEVCMD1\n");
1207 else
1208 return 0;
1209 }
1210
1211 err = vnic_dev_init_devcmd1(vdev);
1212
1213 return (err);
1214 }
1215