xref: /freebsd/sys/dev/enic/vnic_dev.c (revision 1c9c4a25e600bcfad3eec891d43221b55ddf7e19)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
3  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4  */
5 
6 #include "enic.h"
7 #include "vnic_dev.h"
8 #include "vnic_resource.h"
9 #include "vnic_devcmd.h"
10 #include "vnic_nic.h"
11 #include "vnic_stats.h"
12 
13 #define VNIC_MAX_RES_HDR_SIZE \
14 	(sizeof(struct vnic_resource_header) + \
15 	sizeof(struct vnic_resource) * RES_TYPE_MAX)
16 #define VNIC_RES_STRIDE	128
17 
18 #define VNIC_MAX_FLOW_COUNTERS 2048
19 
vnic_dev_priv(struct vnic_dev * vdev)20 void *vnic_dev_priv(struct vnic_dev *vdev)
21 {
22 	return vdev->priv;
23 }
24 
vnic_register_cbacks(struct vnic_dev * vdev,void * (* alloc_consistent)(void * priv,size_t size,bus_addr_t * dma_handle,struct iflib_dma_info * res,u8 * name),void (* free_consistent)(void * priv,size_t size,void * vaddr,bus_addr_t dma_handle,struct iflib_dma_info * res))25 void vnic_register_cbacks(struct vnic_dev *vdev,
26 	void *(*alloc_consistent)(void *priv, size_t size,
27 	    bus_addr_t *dma_handle, struct iflib_dma_info *res,u8 *name),
28 	void (*free_consistent)(void *priv,
29 	    size_t size, void *vaddr,
30 	    bus_addr_t dma_handle,struct iflib_dma_info *res))
31 {
32 	vdev->alloc_consistent = alloc_consistent;
33 	vdev->free_consistent = free_consistent;
34 }
35 
vnic_dev_discover_res(struct vnic_dev * vdev,struct vnic_dev_bar * bar,unsigned int num_bars)36 static int vnic_dev_discover_res(struct vnic_dev *vdev,
37 	struct vnic_dev_bar *bar, unsigned int num_bars)
38 {
39 	struct enic_softc *softc = vdev->softc;
40 	struct vnic_resource_header __iomem *rh;
41 	struct mgmt_barmap_hdr __iomem *mrh;
42 	struct vnic_resource __iomem *r;
43 	int r_offset;
44 	u8 type;
45 
46 	if (num_bars == 0)
47 		return (EINVAL);
48 
49 	rh = malloc(sizeof(*rh), M_DEVBUF, M_NOWAIT | M_ZERO);
50 	mrh = malloc(sizeof(*mrh), M_DEVBUF, M_NOWAIT | M_ZERO);
51 	if (!rh) {
52 		pr_err("vNIC BAR0 res hdr not mem-mapped\n");
53 		free(rh, M_DEVBUF);
54 		free(mrh, M_DEVBUF);
55 		return (EINVAL);
56 	}
57 
58 	/* Check for mgmt vnic in addition to normal vnic */
59 	ENIC_BUS_READ_REGION_4(softc, mem, 0, (void *)rh, sizeof(*rh) / 4);
60 	ENIC_BUS_READ_REGION_4(softc, mem, 0, (void *)mrh, sizeof(*mrh) / 4);
61 	if ((rh->magic != VNIC_RES_MAGIC) ||
62 	    (rh->version != VNIC_RES_VERSION)) {
63 		if ((mrh->magic != MGMTVNIC_MAGIC) ||
64 			mrh->version != MGMTVNIC_VERSION) {
65 			pr_err("vNIC BAR0 res magic/version error " \
66 				"exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n",
67 				VNIC_RES_MAGIC, VNIC_RES_VERSION,
68 				MGMTVNIC_MAGIC, MGMTVNIC_VERSION,
69 				rh->magic, rh->version);
70 			free(rh, M_DEVBUF);
71 			free(mrh, M_DEVBUF);
72 			return (EINVAL);
73 		}
74 	}
75 
76 	if (mrh->magic == MGMTVNIC_MAGIC)
77 		r_offset = sizeof(*mrh);
78 	else
79 		r_offset = sizeof(*rh);
80 
81 	r = malloc(sizeof(*r), M_DEVBUF, M_NOWAIT | M_ZERO);
82 	ENIC_BUS_READ_REGION_4(softc, mem, r_offset, (void *)r, sizeof(*r) / 4);
83 	while ((type = r->type) != RES_TYPE_EOL) {
84 		u8 bar_num = r->bar;
85 		u32 bar_offset =r->bar_offset;
86 		u32 count = r->count;
87 
88 		r_offset += sizeof(*r);
89 
90 		if (bar_num >= num_bars)
91 			continue;
92 
93 		switch (type) {
94 		case RES_TYPE_WQ:
95 		case RES_TYPE_RQ:
96 		case RES_TYPE_CQ:
97 		case RES_TYPE_INTR_CTRL:
98 		case RES_TYPE_INTR_PBA_LEGACY:
99 		case RES_TYPE_DEVCMD:
100 		case RES_TYPE_DEVCMD2:
101 			break;
102 		default:
103 			ENIC_BUS_READ_REGION_4(softc, mem, r_offset, (void *)r, sizeof(*r) / 4);
104 			continue;
105 		}
106 
107 		vdev->res[type].count = count;
108 		bcopy(&softc->mem, &vdev->res[type].bar, sizeof(softc->mem));
109 		vdev->res[type].bar.offset = bar_offset;
110 		ENIC_BUS_READ_REGION_4(softc, mem, r_offset, (void *)r, sizeof(*r) / 4);
111 	}
112 
113 	free(rh, M_DEVBUF);
114 	free(mrh, M_DEVBUF);
115 	free(r, M_DEVBUF);
116 	return 0;
117 }
118 
vnic_dev_get_res_count(struct vnic_dev * vdev,enum vnic_res_type type)119 unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
120 	enum vnic_res_type type)
121 {
122 	return vdev->res[type].count;
123 }
124 
vnic_dev_get_res(struct vnic_dev * vdev,enum vnic_res_type type,unsigned int index)125 void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
126 	unsigned int index)
127 {
128 	struct vnic_res *res;
129 
130 	if (!vdev->res[type].bar.tag)
131 		return NULL;
132 
133 	res = malloc(sizeof(*res), M_DEVBUF, M_NOWAIT | M_ZERO);
134 	bcopy(&vdev->res[type], res, sizeof(*res));
135 
136 	switch (type) {
137 	case RES_TYPE_WQ:
138 	case RES_TYPE_RQ:
139 	case RES_TYPE_CQ:
140 	case RES_TYPE_INTR_CTRL:
141 		res->bar.offset +=
142 		    index * VNIC_RES_STRIDE;
143 	default:
144 		res->bar.offset += 0;
145 	}
146 
147 	return res;
148 }
149 
vnic_dev_desc_ring_size(struct vnic_dev_ring * ring,unsigned int desc_count,unsigned int desc_size)150 unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
151 	unsigned int desc_count, unsigned int desc_size)
152 {
153 	/* The base address of the desc rings must be 512 byte aligned.
154 	 * Descriptor count is aligned to groups of 32 descriptors.  A
155 	 * count of 0 means the maximum 4096 descriptors.  Descriptor
156 	 * size is aligned to 16 bytes.
157 	 */
158 
159 	unsigned int count_align = 32;
160 	unsigned int desc_align = 16;
161 
162 	ring->base_align = 512;
163 
164 	if (desc_count == 0)
165 		desc_count = 4096;
166 
167 	ring->desc_count = VNIC_ALIGN(desc_count, count_align);
168 
169 	ring->desc_size = VNIC_ALIGN(desc_size, desc_align);
170 
171 	ring->size_unaligned = ring->desc_count * ring->desc_size \
172 		+ ring->base_align;
173 
174 	return ring->size_unaligned;
175 }
176 
_vnic_dev_cmd(struct vnic_dev * vdev,enum vnic_devcmd_cmd cmd,int wait)177 static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
178 	int wait)
179 {
180 	struct vnic_res __iomem *devcmd = vdev->devcmd;
181 	int delay;
182 	u32 status;
183 	int err;
184 
185 	status = ENIC_BUS_READ_4(devcmd, DEVCMD_STATUS);
186 	if (status == 0xFFFFFFFF) {
187 		/* PCI-e target device is gone */
188 		return (ENODEV);
189 	}
190 	if (status & STAT_BUSY) {
191 
192 		pr_err("Busy devcmd %d\n",  _CMD_N(cmd));
193 		return (EBUSY);
194 	}
195 
196 	if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
197 		ENIC_BUS_WRITE_REGION_4(devcmd, DEVCMD_ARGS(0), (void *)&vdev->args[0], VNIC_DEVCMD_NARGS * 2);
198 	}
199 
200 	ENIC_BUS_WRITE_4(devcmd, DEVCMD_CMD, cmd);
201 
202 	if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) {
203 		return 0;
204 	}
205 
206 	for (delay = 0; delay < wait; delay++) {
207 
208 		udelay(100);
209 
210 		status = ENIC_BUS_READ_4(devcmd, DEVCMD_STATUS);
211 		if (status == 0xFFFFFFFF) {
212 			/* PCI-e target device is gone */
213 			return (ENODEV);
214 		}
215 
216 		if (!(status & STAT_BUSY)) {
217 			if (status & STAT_ERROR) {
218 				err = -(int)ENIC_BUS_READ_8(devcmd, DEVCMD_ARGS(0));
219 
220 				if (cmd != CMD_CAPABILITY)
221 					pr_err("Devcmd %d failed " \
222 						"with error code %d\n",
223 						_CMD_N(cmd), err);
224 				return (err);
225 			}
226 
227 			if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
228 				ENIC_BUS_READ_REGION_4(devcmd, bar, DEVCMD_ARGS(0), (void *)&vdev->args[0], VNIC_DEVCMD_NARGS * 2);
229 			}
230 
231 			return 0;
232 		}
233 	}
234 
235 	pr_err("Timedout devcmd %d\n", _CMD_N(cmd));
236 	return (ETIMEDOUT);
237 }
238 
_vnic_dev_cmd2(struct vnic_dev * vdev,enum vnic_devcmd_cmd cmd,int wait)239 static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
240 	int wait)
241 {
242 	struct devcmd2_controller *dc2c = vdev->devcmd2;
243 	struct devcmd2_result *result;
244 	u8 color;
245 	unsigned int i;
246 	u32 fetch_index, new_posted;
247 	int delay, err;
248 	u32 posted = dc2c->posted;
249 
250 	fetch_index = ENIC_BUS_READ_4(dc2c->wq_ctrl, TX_FETCH_INDEX);
251 	if (fetch_index == 0xFFFFFFFF)
252 		return (ENODEV);
253 
254 	new_posted = (posted + 1) % DEVCMD2_RING_SIZE;
255 
256 	if (new_posted == fetch_index) {
257 		device_printf(dev_from_vnic_dev(vdev),
258 		    "devcmd2 %d: wq is full. fetch index: %u, posted index: %u\n",
259 		    _CMD_N(cmd), fetch_index, posted);
260 		return (EBUSY);
261 	}
262 
263 	dc2c->cmd_ring[posted].cmd = cmd;
264 	dc2c->cmd_ring[posted].flags = 0;
265 
266 	if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
267 		dc2c->cmd_ring[posted].flags |= DEVCMD2_FNORESULT;
268 	if (_CMD_DIR(cmd) & _CMD_DIR_WRITE)
269 		for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
270 			dc2c->cmd_ring[posted].args[i] = vdev->args[i];
271 
272 	ENIC_BUS_WRITE_4(dc2c->wq_ctrl, TX_POSTED_INDEX, new_posted);
273 	dc2c->posted = new_posted;
274 
275 	if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
276 		return (0);
277 
278 	result = dc2c->result + dc2c->next_result;
279 	color = dc2c->color;
280 
281 	dc2c->next_result++;
282 	if (dc2c->next_result == dc2c->result_size) {
283 		dc2c->next_result = 0;
284 		dc2c->color = dc2c->color ? 0 : 1;
285 	}
286 
287 	for (delay = 0; delay < wait; delay++) {
288 		if (result->color == color) {
289 			if (result->error) {
290 				err = result->error;
291 				if (err != ERR_ECMDUNKNOWN ||
292 				     cmd != CMD_CAPABILITY)
293 					device_printf(dev_from_vnic_dev(vdev),
294 					     "Error %d devcmd %d\n", err,
295 					     _CMD_N(cmd));
296 				return (err);
297 			}
298 			if (_CMD_DIR(cmd) & _CMD_DIR_READ)
299 				for (i = 0; i < VNIC_DEVCMD2_NARGS; i++)
300 					vdev->args[i] = result->results[i];
301 
302 			return 0;
303 		}
304 		udelay(100);
305 	}
306 
307 	device_printf(dev_from_vnic_dev(vdev),
308 	    "devcmd %d timed out\n", _CMD_N(cmd));
309 
310 
311 	return (ETIMEDOUT);
312 }
313 
vnic_dev_cmd_proxy(struct vnic_dev * vdev,enum vnic_devcmd_cmd proxy_cmd,enum vnic_devcmd_cmd cmd,u64 * args,int nargs,int wait)314 static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
315 	enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd,
316 	u64 *args, int nargs, int wait)
317 {
318 	u32 status;
319 	int err;
320 
321 	/*
322 	 * Proxy command consumes 2 arguments. One for proxy index,
323 	 * the other is for command to be proxied
324 	 */
325 	if (nargs > VNIC_DEVCMD_NARGS - 2) {
326 		pr_err("number of args %d exceeds the maximum\n", nargs);
327 		return (EINVAL);
328 	}
329 	memset(vdev->args, 0, sizeof(vdev->args));
330 
331 	vdev->args[0] = vdev->proxy_index;
332 	vdev->args[1] = cmd;
333 	memcpy(&vdev->args[2], args, nargs * sizeof(args[0]));
334 
335 	err = vdev->devcmd_rtn(vdev, proxy_cmd, wait);
336 	if (err)
337 		return (err);
338 
339 	status = (u32)vdev->args[0];
340 	if (status & STAT_ERROR) {
341 		err = (int)vdev->args[1];
342 		if (err != ERR_ECMDUNKNOWN ||
343 		    cmd != CMD_CAPABILITY)
344 			pr_err("Error %d proxy devcmd %d\n", err, _CMD_N(cmd));
345 		return (err);
346 	}
347 
348 	memcpy(args, &vdev->args[1], nargs * sizeof(args[0]));
349 
350 	return 0;
351 }
352 
vnic_dev_cmd_no_proxy(struct vnic_dev * vdev,enum vnic_devcmd_cmd cmd,u64 * args,int nargs,int wait)353 static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
354 	enum vnic_devcmd_cmd cmd, u64 *args, int nargs, int wait)
355 {
356 	int err;
357 
358 	if (nargs > VNIC_DEVCMD_NARGS) {
359 		pr_err("number of args %d exceeds the maximum\n", nargs);
360 		return (EINVAL);
361 	}
362 	memset(vdev->args, 0, sizeof(vdev->args));
363 	memcpy(vdev->args, args, nargs * sizeof(args[0]));
364 
365 	err = vdev->devcmd_rtn(vdev, cmd, wait);
366 
367 	memcpy(args, vdev->args, nargs * sizeof(args[0]));
368 
369 	return (err);
370 }
371 
vnic_dev_cmd(struct vnic_dev * vdev,enum vnic_devcmd_cmd cmd,u64 * a0,u64 * a1,int wait)372 int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
373 	u64 *a0, u64 *a1, int wait)
374 {
375 	u64 args[2];
376 	int err;
377 
378 	args[0] = *a0;
379 	args[1] = *a1;
380 	memset(vdev->args, 0, sizeof(vdev->args));
381 
382 	switch (vdev->proxy) {
383 	case PROXY_BY_INDEX:
384 		err =  vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd,
385 				args, ARRAY_SIZE(args), wait);
386 		break;
387 	case PROXY_BY_BDF:
388 		err =  vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd,
389 				args, ARRAY_SIZE(args), wait);
390 		break;
391 	case PROXY_NONE:
392 	default:
393 		err = vnic_dev_cmd_no_proxy(vdev, cmd, args, 2, wait);
394 		break;
395 	}
396 
397 	if (err == 0) {
398 		*a0 = args[0];
399 		*a1 = args[1];
400 	}
401 
402 	return (err);
403 }
404 
vnic_dev_cmd_args(struct vnic_dev * vdev,enum vnic_devcmd_cmd cmd,u64 * args,int nargs,int wait)405 int vnic_dev_cmd_args(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
406 		      u64 *args, int nargs, int wait)
407 {
408 	switch (vdev->proxy) {
409 	case PROXY_BY_INDEX:
410 		return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd,
411 				args, nargs, wait);
412 	case PROXY_BY_BDF:
413 		return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd,
414 				args, nargs, wait);
415 	case PROXY_NONE:
416 	default:
417 		return vnic_dev_cmd_no_proxy(vdev, cmd, args, nargs, wait);
418 	}
419 }
420 
vnic_dev_advanced_filters_cap(struct vnic_dev * vdev,u64 * args,int nargs)421 static int vnic_dev_advanced_filters_cap(struct vnic_dev *vdev, u64 *args,
422 		int nargs)
423 {
424 	memset(args, 0, nargs * sizeof(*args));
425 	args[0] = CMD_ADD_ADV_FILTER;
426 	args[1] = FILTER_CAP_MODE_V1_FLAG;
427 	return vnic_dev_cmd_args(vdev, CMD_CAPABILITY, args, nargs, 1000);
428 }
429 
vnic_dev_capable_adv_filters(struct vnic_dev * vdev)430 int vnic_dev_capable_adv_filters(struct vnic_dev *vdev)
431 {
432 	u64 a0 = CMD_ADD_ADV_FILTER, a1 = 0;
433 	int wait = 1000;
434 	int err;
435 
436 	err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
437 	if (err)
438 		return 0;
439 	return (a1 >= (u32)FILTER_DPDK_1);
440 }
441 
442 /*  Determine the "best" filtering mode VIC is capaible of. Returns one of 3
443  *  value or 0 on error:
444  *	FILTER_DPDK_1- advanced filters availabile
445  *	FILTER_USNIC_IP_FLAG - advanced filters but with the restriction that
446  *		the IP layer must explicitly specified. I.e. cannot have a UDP
447  *		filter that matches both IPv4 and IPv6.
448  *	FILTER_IPV4_5TUPLE - fallback if either of the 2 above aren't available.
449  *		all other filter types are not available.
450  *   Retrun true in filter_tags if supported
451  */
vnic_dev_capable_filter_mode(struct vnic_dev * vdev,u32 * mode,u8 * filter_actions)452 int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, u32 *mode,
453 				 u8 *filter_actions)
454 {
455 	u64 args[4];
456 	int err;
457 	u32 max_level = 0;
458 
459 	err = vnic_dev_advanced_filters_cap(vdev, args, 4);
460 
461 	/* determine supported filter actions */
462 	*filter_actions = FILTER_ACTION_RQ_STEERING_FLAG; /* always available */
463 	if (args[2] == FILTER_CAP_MODE_V1)
464 		*filter_actions = args[3];
465 
466 	if (err || ((args[0] == 1) && (args[1] == 0))) {
467 		/* Adv filter Command not supported or adv filters available but
468 		 * not enabled. Try the normal filter capability command.
469 		 */
470 		args[0] = CMD_ADD_FILTER;
471 		args[1] = 0;
472 		err = vnic_dev_cmd_args(vdev, CMD_CAPABILITY, args, 2, 1000);
473 		if (err)
474 			return (err);
475 		max_level = args[1];
476 		goto parse_max_level;
477 	} else if (args[2] == FILTER_CAP_MODE_V1) {
478 		/* parse filter capability mask in args[1] */
479 		if (args[1] & FILTER_DPDK_1_FLAG)
480 			*mode = FILTER_DPDK_1;
481 		else if (args[1] & FILTER_USNIC_IP_FLAG)
482 			*mode = FILTER_USNIC_IP;
483 		else if (args[1] & FILTER_IPV4_5TUPLE_FLAG)
484 			*mode = FILTER_IPV4_5TUPLE;
485 		return 0;
486 	}
487 	max_level = args[1];
488 parse_max_level:
489 	if (max_level >= (u32)FILTER_USNIC_IP)
490 		*mode = FILTER_USNIC_IP;
491 	else
492 		*mode = FILTER_IPV4_5TUPLE;
493 	return 0;
494 }
495 
vnic_dev_capable_udp_rss_weak(struct vnic_dev * vdev,bool * cfg_chk,bool * weak)496 void vnic_dev_capable_udp_rss_weak(struct vnic_dev *vdev, bool *cfg_chk,
497 				   bool *weak)
498 {
499 	u64 a0 = CMD_NIC_CFG, a1 = 0;
500 	int wait = 1000;
501 	int err;
502 
503 	*cfg_chk = false;
504 	*weak = false;
505 	err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
506 	if (err == 0 && a0 != 0 && a1 != 0) {
507 		*cfg_chk = true;
508 		*weak = !!((a1 >> 32) & CMD_NIC_CFG_CAPF_UDP_WEAK);
509 	}
510 }
511 
vnic_dev_capable(struct vnic_dev * vdev,enum vnic_devcmd_cmd cmd)512 int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd)
513 {
514 	u64 a0 = (u32)cmd, a1 = 0;
515 	int wait = 1000;
516 	int err;
517 
518 	err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
519 
520 	return !(err || a0);
521 }
522 
vnic_dev_spec(struct vnic_dev * vdev,unsigned int offset,size_t size,void * value)523 int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size,
524 	void *value)
525 {
526 	u64 a0, a1;
527 	int wait = 1000;
528 	int err;
529 
530 	a0 = offset;
531 	a1 = size;
532 
533 	err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
534 
535 	switch (size) {
536 	case 1:
537 		*(u8 *)value = (u8)a0;
538 		break;
539 	case 2:
540 		*(u16 *)value = (u16)a0;
541 		break;
542 	case 4:
543 		*(u32 *)value = (u32)a0;
544 		break;
545 	case 8:
546 		*(u64 *)value = a0;
547 		break;
548 	default:
549 		BUG();
550 		break;
551 	}
552 
553 	return (err);
554 }
555 
vnic_dev_stats_clear(struct vnic_dev * vdev)556 int vnic_dev_stats_clear(struct vnic_dev *vdev)
557 {
558 	u64 a0 = 0, a1 = 0;
559 	int wait = 1000;
560 
561 	return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
562 }
563 
vnic_dev_stats_dump(struct vnic_dev * vdev,struct vnic_stats ** stats)564 int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
565 {
566 	u64 a0, a1;
567 	int wait = 1000;
568 	int rc;
569 
570 	if (!vdev->stats)
571 		return (ENOMEM);
572 
573 	*stats = vdev->stats;
574 	a0 = vdev->stats_res.idi_paddr;
575 	a1 = sizeof(struct vnic_stats);
576 
577 	bus_dmamap_sync(vdev->stats_res.idi_tag,
578 			vdev->stats_res.idi_map,
579 			BUS_DMASYNC_POSTREAD);
580 	rc = vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
581 	bus_dmamap_sync(vdev->stats_res.idi_tag,
582 			vdev->stats_res.idi_map,
583 			BUS_DMASYNC_PREREAD);
584 	return (rc);
585 }
586 
587 /*
588  * Configure counter DMA
589  */
vnic_dev_counter_dma_cfg(struct vnic_dev * vdev,u32 period,u32 num_counters)590 int vnic_dev_counter_dma_cfg(struct vnic_dev *vdev, u32 period,
591 			     u32 num_counters)
592 {
593 	u64 args[3];
594 	int wait = 1000;
595 	int err;
596 
597 	if (num_counters > VNIC_MAX_FLOW_COUNTERS)
598 		return (ENOMEM);
599 	if (period > 0 && (period < VNIC_COUNTER_DMA_MIN_PERIOD ||
600 	    num_counters == 0))
601 		return (EINVAL);
602 
603 	args[0] = num_counters;
604 	args[1] = vdev->flow_counters_res.idi_paddr;
605 	args[2] = period;
606 	bus_dmamap_sync(vdev->flow_counters_res.idi_tag,
607 			vdev->flow_counters_res.idi_map,
608 			BUS_DMASYNC_POSTREAD);
609 	err =  vnic_dev_cmd_args(vdev, CMD_COUNTER_DMA_CONFIG, args, 3, wait);
610 	bus_dmamap_sync(vdev->flow_counters_res.idi_tag,
611 			vdev->flow_counters_res.idi_map,
612 			BUS_DMASYNC_PREREAD);
613 
614 	/* record if DMAs need to be stopped on close */
615 	if (!err)
616 		vdev->flow_counters_dma_active = (num_counters != 0 &&
617 						  period != 0);
618 
619 	return (err);
620 }
621 
vnic_dev_close(struct vnic_dev * vdev)622 int vnic_dev_close(struct vnic_dev *vdev)
623 {
624 	u64 a0 = 0, a1 = 0;
625 	int wait = 1000;
626 
627 	return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
628 }
629 
vnic_dev_enable_wait(struct vnic_dev * vdev)630 int vnic_dev_enable_wait(struct vnic_dev *vdev)
631 {
632 	u64 a0 = 0, a1 = 0;
633 	int wait = 1000;
634 
635 	if (vnic_dev_capable(vdev, CMD_ENABLE_WAIT))
636 		return vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait);
637 	else
638 		return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
639 }
640 
vnic_dev_disable(struct vnic_dev * vdev)641 int vnic_dev_disable(struct vnic_dev *vdev)
642 {
643 	u64 a0 = 0, a1 = 0;
644 	int wait = 1000;
645 
646 	return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
647 }
648 
vnic_dev_open(struct vnic_dev * vdev,int arg)649 int vnic_dev_open(struct vnic_dev *vdev, int arg)
650 {
651 	u64 a0 = (u32)arg, a1 = 0;
652 	int wait = 1000;
653 
654 	return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
655 }
656 
vnic_dev_open_done(struct vnic_dev * vdev,int * done)657 int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
658 {
659 	u64 a0 = 0, a1 = 0;
660 	int wait = 1000;
661 	int err;
662 
663 	*done = 0;
664 
665 	err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
666 	if (err)
667 		return (err);
668 
669 	*done = (a0 == 0);
670 
671 	return 0;
672 }
673 
vnic_dev_get_mac_addr(struct vnic_dev * vdev,u8 * mac_addr)674 int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
675 {
676 	u64 a0 = 0, a1 = 0;
677 	int wait = 1000;
678 	int err, i;
679 
680 	for (i = 0; i < ETH_ALEN; i++)
681 		mac_addr[i] = 0;
682 
683 	err = vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
684 	if (err)
685 		return (err);
686 
687 	for (i = 0; i < ETH_ALEN; i++)
688 		mac_addr[i] = ((u8 *)&a0)[i];
689 
690 	return 0;
691 }
692 
vnic_dev_packet_filter(struct vnic_dev * vdev,int directed,int multicast,int broadcast,int promisc,int allmulti)693 int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
694 	int broadcast, int promisc, int allmulti)
695 {
696 	u64 a0, a1 = 0;
697 	int wait = 1000;
698 	int err;
699 
700 	a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
701 	     (multicast ? CMD_PFILTER_MULTICAST : 0) |
702 	     (broadcast ? CMD_PFILTER_BROADCAST : 0) |
703 	     (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
704 	     (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
705 
706 	err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
707 	if (err)
708 		pr_err("Can't set packet filter\n");
709 
710 	return (err);
711 }
712 
vnic_dev_add_addr(struct vnic_dev * vdev,u8 * addr)713 int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
714 {
715 	u64 a0 = 0, a1 = 0;
716 	int wait = 1000;
717 	int err;
718 	int i;
719 
720 	for (i = 0; i < ETH_ALEN; i++)
721 		((u8 *)&a0)[i] = addr[i];
722 
723 	err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
724 	if (err)
725 		pr_err("Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
726 			addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
727 			err);
728 
729 	return (err);
730 }
731 
vnic_dev_del_addr(struct vnic_dev * vdev,u8 * addr)732 int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
733 {
734 	u64 a0 = 0, a1 = 0;
735 	int wait = 1000;
736 	int err;
737 	int i;
738 
739 	for (i = 0; i < ETH_ALEN; i++)
740 		((u8 *)&a0)[i] = addr[i];
741 
742 	err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
743 	if (err)
744 		pr_err("Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n",
745 			addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
746 			err);
747 
748 	return (err);
749 }
750 
vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev * vdev,u8 ig_vlan_rewrite_mode)751 int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
752 	u8 ig_vlan_rewrite_mode)
753 {
754 	u64 a0 = ig_vlan_rewrite_mode, a1 = 0;
755 	int wait = 1000;
756 
757 	if (vnic_dev_capable(vdev, CMD_IG_VLAN_REWRITE_MODE))
758 		return vnic_dev_cmd(vdev, CMD_IG_VLAN_REWRITE_MODE,
759 				&a0, &a1, wait);
760 	else
761 		return 0;
762 }
763 
vnic_dev_set_reset_flag(struct vnic_dev * vdev,int state)764 void vnic_dev_set_reset_flag(struct vnic_dev *vdev, int state)
765 {
766 	vdev->in_reset = state;
767 }
768 
vnic_dev_in_reset(struct vnic_dev * vdev)769 static inline int vnic_dev_in_reset(struct vnic_dev *vdev)
770 {
771 	return vdev->in_reset;
772 }
773 
vnic_dev_notify_setcmd(struct vnic_dev * vdev,void * notify_addr,bus_addr_t notify_pa,u16 intr)774 int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
775 	void *notify_addr, bus_addr_t notify_pa, u16 intr)
776 {
777 	u64 a0, a1;
778 	int wait = 1000;
779 	int r;
780 
781 	bus_dmamap_sync(vdev->notify_res.idi_tag,
782 			vdev->notify_res.idi_map,
783 			BUS_DMASYNC_PREWRITE);
784 	memset(notify_addr, 0, sizeof(struct vnic_devcmd_notify));
785 	bus_dmamap_sync(vdev->notify_res.idi_tag,
786 			vdev->notify_res.idi_map,
787 			BUS_DMASYNC_POSTWRITE);
788 	if (!vnic_dev_in_reset(vdev)) {
789 		vdev->notify = notify_addr;
790 		vdev->notify_pa = notify_pa;
791 	}
792 
793 	a0 = (u64)notify_pa;
794 	a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
795 	a1 += sizeof(struct vnic_devcmd_notify);
796 
797 	r = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
798 	if (!vnic_dev_in_reset(vdev))
799 		vdev->notify_sz = (r == 0) ? (u32)a1 : 0;
800 
801 	return r;
802 }
803 
vnic_dev_notify_set(struct vnic_dev * vdev,u16 intr)804 int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
805 {
806 	void *notify_addr = NULL;
807 	bus_addr_t notify_pa = 0;
808 	char name[NAME_MAX];
809 	static u32 instance;
810 
811 	if (vdev->notify || vdev->notify_pa) {
812 		return vnic_dev_notify_setcmd(vdev, vdev->notify,
813 					      vdev->notify_pa, intr);
814 	}
815 	if (!vnic_dev_in_reset(vdev)) {
816 		snprintf((char *)name, sizeof(name),
817 			"vnic_notify-%u", instance++);
818 		iflib_dma_alloc(vdev->softc->ctx,
819 				     sizeof(struct vnic_devcmd_notify),
820 				     &vdev->notify_res, BUS_DMA_NOWAIT);
821 		notify_pa = vdev->notify_res.idi_paddr;
822 		notify_addr = vdev->notify_res.idi_vaddr;
823 	}
824 
825 	return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr);
826 }
827 
vnic_dev_notify_unsetcmd(struct vnic_dev * vdev)828 int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev)
829 {
830 	u64 a0, a1;
831 	int wait = 1000;
832 	int err;
833 
834 	a0 = 0;  /* paddr = 0 to unset notify buffer */
835 	a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
836 	a1 += sizeof(struct vnic_devcmd_notify);
837 
838 	err = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
839 	if (!vnic_dev_in_reset(vdev)) {
840 		vdev->notify = NULL;
841 		vdev->notify_pa = 0;
842 		vdev->notify_sz = 0;
843 	}
844 
845 	return (err);
846 }
847 
vnic_dev_notify_unset(struct vnic_dev * vdev)848 int vnic_dev_notify_unset(struct vnic_dev *vdev)
849 {
850 	if (vdev->notify && !vnic_dev_in_reset(vdev)) {
851 		iflib_dma_free(&vdev->notify_res);
852 	}
853 
854 	return vnic_dev_notify_unsetcmd(vdev);
855 }
856 
vnic_dev_notify_ready(struct vnic_dev * vdev)857 static int vnic_dev_notify_ready(struct vnic_dev *vdev)
858 {
859 	u32 *words;
860 	unsigned int nwords = vdev->notify_sz / 4;
861 	unsigned int i;
862 	u32 csum;
863 
864 	if (!vdev->notify || !vdev->notify_sz)
865 		return 0;
866 
867 	do {
868 		csum = 0;
869 		bus_dmamap_sync(vdev->notify_res.idi_tag,
870 				vdev->notify_res.idi_map,
871 				BUS_DMASYNC_PREREAD);
872 		memcpy(&vdev->notify_copy, vdev->notify, vdev->notify_sz);
873 		bus_dmamap_sync(vdev->notify_res.idi_tag,
874 				vdev->notify_res.idi_map,
875 				BUS_DMASYNC_POSTREAD);
876 		words = (u32 *)&vdev->notify_copy;
877 		for (i = 1; i < nwords; i++)
878 			csum += words[i];
879 	} while (csum != words[0]);
880 
881 
882 	return (1);
883 }
884 
vnic_dev_init(struct vnic_dev * vdev,int arg)885 int vnic_dev_init(struct vnic_dev *vdev, int arg)
886 {
887 	u64 a0 = (u32)arg, a1 = 0;
888 	int wait = 1000;
889 	int r = 0;
890 
891 	if (vnic_dev_capable(vdev, CMD_INIT))
892 		r = vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
893 	else {
894 		vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait);
895 		if (a0 & CMD_INITF_DEFAULT_MAC) {
896 			/* Emulate these for old CMD_INIT_v1 which
897 			 * didn't pass a0 so no CMD_INITF_*.
898 			 */
899 			vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
900 			vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
901 		}
902 	}
903 	return r;
904 }
905 
vnic_dev_intr_coal_timer_info_default(struct vnic_dev * vdev)906 void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev)
907 {
908 	/* Default: hardware intr coal timer is in units of 1.5 usecs */
909 	vdev->intr_coal_timer_info.mul = 2;
910 	vdev->intr_coal_timer_info.div = 3;
911 	vdev->intr_coal_timer_info.max_usec =
912 		vnic_dev_intr_coal_timer_hw_to_usec(vdev, 0xffff);
913 }
914 
vnic_dev_link_status(struct vnic_dev * vdev)915 int vnic_dev_link_status(struct vnic_dev *vdev)
916 {
917 	if (!vnic_dev_notify_ready(vdev))
918 		return 0;
919 
920 	return vdev->notify_copy.link_state;
921 }
922 
vnic_dev_port_speed(struct vnic_dev * vdev)923 u32 vnic_dev_port_speed(struct vnic_dev *vdev)
924 {
925 	if (!vnic_dev_notify_ready(vdev))
926 		return 0;
927 
928 	return vdev->notify_copy.port_speed;
929 }
930 
vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev * vdev,u32 usec)931 u32 vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, u32 usec)
932 {
933 	return (usec * vdev->intr_coal_timer_info.mul) /
934 		vdev->intr_coal_timer_info.div;
935 }
936 
vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev * vdev,u32 hw_cycles)937 u32 vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, u32 hw_cycles)
938 {
939 	return (hw_cycles * vdev->intr_coal_timer_info.div) /
940 		vdev->intr_coal_timer_info.mul;
941 }
942 
vnic_dev_get_intr_coal_timer_max(struct vnic_dev * vdev)943 u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev)
944 {
945 	return vdev->intr_coal_timer_info.max_usec;
946 }
947 
vnic_dev_mtu(struct vnic_dev * vdev)948 u32 vnic_dev_mtu(struct vnic_dev *vdev)
949 {
950 	if (!vnic_dev_notify_ready(vdev))
951 		return 0;
952 
953 	return vdev->notify_copy.mtu;
954 }
955 
vnic_dev_set_intr_mode(struct vnic_dev * vdev,enum vnic_dev_intr_mode intr_mode)956 void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
957         enum vnic_dev_intr_mode intr_mode)
958 {
959 	vdev->intr_mode = intr_mode;
960 }
961 
vnic_dev_get_intr_mode(struct vnic_dev * vdev)962 enum vnic_dev_intr_mode vnic_dev_get_intr_mode(
963         struct vnic_dev *vdev)
964 {
965 	return vdev->intr_mode;
966 }
967 
968 
vnic_dev_alloc_stats_mem(struct vnic_dev * vdev)969 int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev)
970 {
971 	char name[NAME_MAX];
972 	static u32 instance;
973 	struct enic_softc *softc;
974 
975 	softc = vdev->softc;
976 
977 	snprintf((char *)name, sizeof(name), "vnic_stats-%u", instance++);
978 	iflib_dma_alloc(softc->ctx, sizeof(struct vnic_stats), &vdev->stats_res, 0);
979 	vdev->stats = (struct vnic_stats *)vdev->stats_res.idi_vaddr;
980 	return vdev->stats == NULL ? -ENOMEM : 0;
981 }
982 
983 /*
984  * Initialize for up to VNIC_MAX_FLOW_COUNTERS
985  */
vnic_dev_alloc_counter_mem(struct vnic_dev * vdev)986 int vnic_dev_alloc_counter_mem(struct vnic_dev *vdev)
987 {
988 	char name[NAME_MAX];
989 	static u32 instance;
990 	struct enic_softc *softc;
991 
992 	softc = vdev->softc;
993 
994 	snprintf((char *)name, sizeof(name), "vnic_flow_ctrs-%u", instance++);
995 	iflib_dma_alloc(softc->ctx, sizeof(struct vnic_counter_counts) * VNIC_MAX_FLOW_COUNTERS, &vdev->flow_counters_res, 0);
996 	vdev->flow_counters = (struct vnic_counter_counts *)vdev->flow_counters_res.idi_vaddr;
997 	vdev->flow_counters_dma_active = 0;
998 	return (vdev->flow_counters == NULL ? ENOMEM : 0);
999 }
1000 
vnic_dev_register(struct vnic_dev * vdev,struct enic_bar_info * mem,unsigned int num_bars)1001 struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
1002     struct enic_bar_info *mem, unsigned int num_bars)
1003 {
1004 	if (vnic_dev_discover_res(vdev, NULL, num_bars))
1005 		goto err_out;
1006 
1007 	vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
1008 	if (!vdev->devcmd)
1009 		goto err_out;
1010 
1011 	return vdev;
1012 
1013 err_out:
1014 	return NULL;
1015 }
1016 
vnic_dev_init_devcmd1(struct vnic_dev * vdev)1017 static int vnic_dev_init_devcmd1(struct vnic_dev *vdev)
1018 {
1019 	vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
1020 	if (!vdev->devcmd)
1021 		return (ENODEV);
1022 	vdev->devcmd_rtn = _vnic_dev_cmd;
1023 
1024 	return 0;
1025 }
1026 
vnic_dev_init_devcmd2(struct vnic_dev * vdev)1027 static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
1028 {
1029 	int err;
1030 	unsigned int fetch_index;
1031 
1032 
1033 	err = 0;
1034 
1035 	if (vdev->devcmd2)
1036 		return (0);
1037 
1038 	vdev->devcmd2 = malloc(sizeof(*vdev->devcmd2), M_DEVBUF,
1039 	    M_NOWAIT | M_ZERO);
1040 
1041 	if (!vdev->devcmd2) {
1042 		return (ENOMEM);
1043 	}
1044 
1045 	vdev->devcmd2->color = 1;
1046 	vdev->devcmd2->result_size = DEVCMD2_RING_SIZE;
1047 
1048 	err = enic_wq_devcmd2_alloc(vdev, &vdev->devcmd2->wq, DEVCMD2_RING_SIZE,
1049 	    DEVCMD2_DESC_SIZE);
1050 
1051 	if (err) {
1052 		goto err_free_devcmd2;
1053 	}
1054 	vdev->devcmd2->wq_ctrl = vdev->devcmd2->wq.ctrl;
1055 	vdev->devcmd2->cmd_ring = vdev->devcmd2->wq.ring.descs;
1056 
1057 	fetch_index = ENIC_BUS_READ_4(vdev->devcmd2->wq.ctrl, TX_FETCH_INDEX);
1058 	if (fetch_index == 0xFFFFFFFF)
1059 		return (ENODEV);
1060 
1061 	enic_wq_init_start(&vdev->devcmd2->wq, 0, fetch_index, fetch_index, 0,
1062 	    0);
1063 	vdev->devcmd2->posted = fetch_index;
1064 	vnic_wq_enable(&vdev->devcmd2->wq);
1065 
1066 	err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring,
1067             DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE);
1068         if (err)
1069                 goto err_free_devcmd2;
1070 
1071 	vdev->devcmd2->result = vdev->devcmd2->results_ring.descs;
1072 	vdev->args[0] = (u64)vdev->devcmd2->results_ring.base_addr |
1073 	    VNIC_PADDR_TARGET;
1074 	vdev->args[1] = DEVCMD2_RING_SIZE;
1075 
1076 	err = _vnic_dev_cmd2(vdev, CMD_INITIALIZE_DEVCMD2, 1000);
1077 	if (err)
1078 		goto err_free_devcmd2;
1079 
1080 	vdev->devcmd_rtn = _vnic_dev_cmd2;
1081 
1082 	return (err);
1083 
1084 err_free_devcmd2:
1085 	err = ENOMEM;
1086 	if (vdev->devcmd2->wq_ctrl)
1087 		vnic_wq_free(&vdev->devcmd2->wq);
1088 	if (vdev->devcmd2->result)
1089 		vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
1090 	free(vdev->devcmd2, M_DEVBUF);
1091 	vdev->devcmd2 = NULL;
1092 
1093 	return (err);
1094 }
1095 
1096 /*
1097  *  vnic_dev_classifier: Add/Delete classifier entries
1098  *  @vdev: vdev of the device
1099  *  @cmd: CLSF_ADD for Add filter
1100  *        CLSF_DEL for Delete filter
1101  *  @entry: In case of ADD filter, the caller passes the RQ number in this
1102  *          variable.
1103  *          This function stores the filter_id returned by the
1104  *          firmware in the same variable before return;
1105  *
1106  *          In case of DEL filter, the caller passes the RQ number. Return
1107  *          value is irrelevant.
1108  * @data: filter data
1109  * @action: action data
1110  */
1111 
vnic_dev_overlay_offload_ctrl(struct vnic_dev * vdev,u8 overlay,u8 config)1112 int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, u8 overlay, u8 config)
1113 {
1114 	u64 a0 = overlay;
1115 	u64 a1 = config;
1116 	int wait = 1000;
1117 
1118 	return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CTRL, &a0, &a1, wait);
1119 }
1120 
vnic_dev_overlay_offload_cfg(struct vnic_dev * vdev,u8 overlay,u16 vxlan_udp_port_number)1121 int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay,
1122 				 u16 vxlan_udp_port_number)
1123 {
1124 	u64 a1 = vxlan_udp_port_number;
1125 	u64 a0 = overlay;
1126 	int wait = 1000;
1127 
1128 	return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CFG, &a0, &a1, wait);
1129 }
1130 
vnic_dev_capable_vxlan(struct vnic_dev * vdev)1131 int vnic_dev_capable_vxlan(struct vnic_dev *vdev)
1132 {
1133 	u64 a0 = VIC_FEATURE_VXLAN;
1134 	u64 a1 = 0;
1135 	int wait = 1000;
1136 	int ret;
1137 
1138 	ret = vnic_dev_cmd(vdev, CMD_GET_SUPP_FEATURE_VER, &a0, &a1, wait);
1139 	/* 1 if the NIC can do VXLAN for both IPv4 and IPv6 with multiple WQs */
1140 	return ret == 0 &&
1141 		(a1 & (FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ)) ==
1142 		(FEATURE_VXLAN_IPV6 | FEATURE_VXLAN_MULTI_WQ);
1143 }
1144 
vnic_dev_counter_alloc(struct vnic_dev * vdev,uint32_t * idx)1145 bool vnic_dev_counter_alloc(struct vnic_dev *vdev, uint32_t *idx)
1146 {
1147 	u64 a0 = 0;
1148 	u64 a1 = 0;
1149 	int wait = 1000;
1150 
1151 	if (vnic_dev_cmd(vdev, CMD_COUNTER_ALLOC, &a0, &a1, wait))
1152 		return false;
1153 	*idx = (uint32_t)a0;
1154 	return true;
1155 }
1156 
vnic_dev_counter_free(struct vnic_dev * vdev,uint32_t idx)1157 bool vnic_dev_counter_free(struct vnic_dev *vdev, uint32_t idx)
1158 {
1159 	u64 a0 = idx;
1160 	u64 a1 = 0;
1161 	int wait = 1000;
1162 
1163 	return vnic_dev_cmd(vdev, CMD_COUNTER_FREE, &a0, &a1,
1164 			    wait) == 0;
1165 }
1166 
vnic_dev_counter_query(struct vnic_dev * vdev,uint32_t idx,bool reset,uint64_t * packets,uint64_t * bytes)1167 bool vnic_dev_counter_query(struct vnic_dev *vdev, uint32_t idx,
1168 			    bool reset, uint64_t *packets, uint64_t *bytes)
1169 {
1170 	u64 a0 = idx;
1171 	u64 a1 = reset ? 1 : 0;
1172 	int wait = 1000;
1173 
1174 	if (reset) {
1175 		/* query/reset returns updated counters */
1176 		if (vnic_dev_cmd(vdev, CMD_COUNTER_QUERY, &a0, &a1, wait))
1177 			return false;
1178 		*packets = a0;
1179 		*bytes = a1;
1180 	} else {
1181 		/* Get values DMA'd from the adapter */
1182 		*packets = vdev->flow_counters[idx].vcc_packets;
1183 		*bytes = vdev->flow_counters[idx].vcc_bytes;
1184 	}
1185 	return true;
1186 }
1187 
dev_from_vnic_dev(struct vnic_dev * vdev)1188 device_t dev_from_vnic_dev(struct vnic_dev *vdev) {
1189 	return (vdev->softc->dev);
1190 }
1191 
vnic_dev_cmd_init(struct vnic_dev * vdev)1192 int vnic_dev_cmd_init(struct vnic_dev *vdev) {
1193 	int err;
1194 	void __iomem *res;
1195 
1196 	res = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
1197 	if (res) {
1198 		err = vnic_dev_init_devcmd2(vdev);
1199 		if (err)
1200 			device_printf(dev_from_vnic_dev(vdev),
1201 			    "DEVCMD2 init failed, Using DEVCMD1\n");
1202 		else
1203 			return 0;
1204 	}
1205 
1206 	err = vnic_dev_init_devcmd1(vdev);
1207 
1208 	return (err);
1209 }
1210