xref: /freebsd/sys/dev/liquidio/lio_ioctl.c (revision a2aef24aa3c8458e4036735dd6928b4ef77294e5)
1 /*
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2017 Cavium, Inc.. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Cavium, Inc. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 /*$FreeBSD$*/
34 
35 #include "lio_bsd.h"
36 #include "lio_common.h"
37 #include "lio_droq.h"
38 #include "lio_iq.h"
39 #include "lio_response_manager.h"
40 #include "lio_device.h"
41 #include "lio_network.h"
42 #include "lio_ctrl.h"
43 #include "cn23xx_pf_device.h"
44 #include "lio_image.h"
45 #include "lio_ioctl.h"
46 #include "lio_main.h"
47 #include "lio_rxtx.h"
48 
49 static int	lio_set_rx_csum(struct ifnet *ifp, uint32_t data);
50 static int	lio_set_tso4(struct ifnet *ifp);
51 static int	lio_set_tso6(struct ifnet *ifp);
52 static int	lio_set_lro(struct ifnet *ifp);
53 static int	lio_change_mtu(struct ifnet *ifp, int new_mtu);
54 static int	lio_set_mcast_list(struct ifnet *ifp);
55 static inline enum	lio_ifflags lio_get_new_flags(struct ifnet *ifp);
56 
57 static inline bool
58 lio_is_valid_ether_addr(const uint8_t *addr)
59 {
60 
61 	return (!(0x01 & addr[0]) && !((addr[0] + addr[1] + addr[2] + addr[3] +
62 					addr[4] + addr[5]) == 0x00));
63 }
64 
65 static int
66 lio_change_dev_flags(struct ifnet *ifp)
67 {
68 	struct lio_ctrl_pkt	nctrl;
69 	struct lio		*lio = if_getsoftc(ifp);
70 	struct octeon_device	*oct = lio->oct_dev;
71 	int ret = 0;
72 
73 	bzero(&nctrl, sizeof(struct lio_ctrl_pkt));
74 
75 	/* Create a ctrl pkt command to be sent to core app. */
76 	nctrl.ncmd.cmd64 = 0;
77 	nctrl.ncmd.s.cmd = LIO_CMD_CHANGE_DEVFLAGS;
78 	nctrl.ncmd.s.param1 = lio_get_new_flags(ifp);
79 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
80 	nctrl.lio = lio;
81 	nctrl.cb_fn = lio_ctrl_cmd_completion;
82 
83 	ret = lio_send_ctrl_pkt(oct, &nctrl);
84 	if (ret)
85 		lio_dev_err(oct, "Failed to change flags ret %d\n", ret);
86 
87 	return (ret);
88 }
89 
90 /*
91  * lio_ioctl : User calls this routine for configuring
92  * the interface.
93  *
94  * return 0 on success, positive on failure
95  */
96 int
97 lio_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
98 {
99 	struct lio	*lio = if_getsoftc(ifp);
100 	struct ifreq	*ifrequest = (struct ifreq *)data;
101 	int	error = 0;
102 
103 	switch (cmd) {
104 	case SIOCSIFADDR:
105 		lio_dev_dbg(lio->oct_dev, "ioctl: SIOCSIFADDR\n");
106 		if_setflagbits(ifp, IFF_UP, 0);
107 		error = ether_ioctl(ifp, cmd, data);
108 		break;
109 	case SIOCSIFMTU:
110 		lio_dev_dbg(lio->oct_dev, "ioctl: SIOCSIFMTU\n");
111 		error = lio_change_mtu(ifp, ifrequest->ifr_mtu);
112 		break;
113 	case SIOCSIFFLAGS:
114 		lio_dev_dbg(lio->oct_dev, "ioctl: SIOCSIFFLAGS\n");
115 		if (if_getflags(ifp) & IFF_UP) {
116 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
117 				if ((if_getflags(ifp) ^ lio->if_flags) &
118 				    (IFF_PROMISC | IFF_ALLMULTI))
119 					error = lio_change_dev_flags(ifp);
120 			} else {
121 				if (!(atomic_load_acq_int(&lio->ifstate) &
122 				      LIO_IFSTATE_DETACH))
123 					lio_open(lio);
124 			}
125 		} else {
126 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
127 				lio_stop(ifp);
128 		}
129 		lio->if_flags = if_getflags(ifp);
130 		break;
131 	case SIOCADDMULTI:
132 		lio_dev_dbg(lio->oct_dev, "ioctl: SIOCADDMULTI\n");
133 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
134 			error = lio_set_mcast_list(ifp);
135 		break;
136 	case SIOCDELMULTI:
137 		lio_dev_dbg(lio->oct_dev, "ioctl: SIOCSIFMULTI\n");
138 		break;
139 	case SIOCSIFMEDIA:
140 		lio_dev_dbg(lio->oct_dev, "ioctl: SIOCSIFMEDIA\n");
141 	case SIOCGIFMEDIA:
142 		lio_dev_dbg(lio->oct_dev, "ioctl: SIOCGIFMEDIA\n");
143 	case SIOCGIFXMEDIA:
144 		lio_dev_dbg(lio->oct_dev, "ioctl: SIOCGIFXMEDIA\n");
145 		error = ifmedia_ioctl(ifp, ifrequest, &lio->ifmedia, cmd);
146 		break;
147 	case SIOCSIFCAP:
148 		{
149 			int	features = ifrequest->ifr_reqcap ^
150 					if_getcapenable(ifp);
151 
152 			lio_dev_dbg(lio->oct_dev, "ioctl: SIOCSIFCAP (Set Capabilities)\n");
153 
154 			if (!features)
155 				break;
156 
157 			if (features & IFCAP_TXCSUM) {
158 				if_togglecapenable(ifp, IFCAP_TXCSUM);
159 				if (if_getcapenable(ifp) & IFCAP_TXCSUM)
160 					if_sethwassistbits(ifp, (CSUM_TCP |
161 								 CSUM_UDP |
162 								 CSUM_IP), 0);
163 				else
164 					if_sethwassistbits(ifp, 0,
165 							(CSUM_TCP | CSUM_UDP |
166 							 CSUM_IP));
167 			}
168 			if (features & IFCAP_TXCSUM_IPV6) {
169 				if_togglecapenable(ifp, IFCAP_TXCSUM_IPV6);
170 				if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6)
171 					if_sethwassistbits(ifp, (CSUM_UDP_IPV6 |
172 							   CSUM_TCP_IPV6), 0);
173 				else
174 					if_sethwassistbits(ifp, 0,
175 							   (CSUM_UDP_IPV6 |
176 							    CSUM_TCP_IPV6));
177 			}
178 			if (features & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6))
179 				error |= lio_set_rx_csum(ifp, (features &
180 							       (IFCAP_RXCSUM |
181 							 IFCAP_RXCSUM_IPV6)));
182 
183 			if (features & IFCAP_TSO4)
184 				error |= lio_set_tso4(ifp);
185 
186 			if (features & IFCAP_TSO6)
187 				error |= lio_set_tso6(ifp);
188 
189 			if (features & IFCAP_LRO)
190 				error |= lio_set_lro(ifp);
191 
192 			if (features & IFCAP_VLAN_HWTAGGING)
193 				if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
194 
195 			if (features & IFCAP_VLAN_HWFILTER)
196 				if_togglecapenable(ifp, IFCAP_VLAN_HWFILTER);
197 
198 			if (features & IFCAP_VLAN_HWTSO)
199 				if_togglecapenable(ifp, IFCAP_VLAN_HWTSO);
200 
201 			VLAN_CAPABILITIES(ifp);
202 			break;
203 		}
204 	default:
205 		lio_dev_dbg(lio->oct_dev, "ioctl: UNKNOWN (0x%X)\n", (int)cmd);
206 		error = ether_ioctl(ifp, cmd, data);
207 		break;
208 	}
209 
210 	return (error);
211 }
212 
213 static int
214 lio_set_tso4(struct ifnet *ifp)
215 {
216 	struct lio	*lio = if_getsoftc(ifp);
217 
218 	if (if_getcapabilities(ifp) & IFCAP_TSO4) {
219 		if_togglecapenable(ifp, IFCAP_TSO4);
220 		if (if_getcapenable(ifp) & IFCAP_TSO4)
221 			if_sethwassistbits(ifp, CSUM_IP_TSO, 0);
222 		else
223 			if_sethwassistbits(ifp, 0, CSUM_IP_TSO);
224 	} else {
225 		lio_dev_info(lio->oct_dev, "TSO4 capability not supported\n");
226 		return (EINVAL);
227 	}
228 
229 	return (0);
230 }
231 
232 static int
233 lio_set_tso6(struct ifnet *ifp)
234 {
235 	struct lio	*lio = if_getsoftc(ifp);
236 
237 	if (if_getcapabilities(ifp) & IFCAP_TSO6) {
238 		if_togglecapenable(ifp, IFCAP_TSO6);
239 		if (if_getcapenable(ifp) & IFCAP_TSO6)
240 			if_sethwassistbits(ifp, CSUM_IP6_TSO, 0);
241 		else
242 			if_sethwassistbits(ifp, 0, CSUM_IP6_TSO);
243 	} else {
244 		lio_dev_info(lio->oct_dev, "TSO6 capability not supported\n");
245 		return (EINVAL);
246 	}
247 
248 	return (0);
249 }
250 
251 static int
252 lio_set_rx_csum(struct ifnet *ifp, uint32_t data)
253 {
254 	struct lio	*lio = if_getsoftc(ifp);
255 	int	ret = 0;
256 
257 	if (if_getcapabilities(ifp) & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
258 		if_togglecapenable(ifp, (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6));
259 
260 		if (data) {
261 			/* LRO requires RXCSUM */
262 			if ((if_getcapabilities(ifp) & IFCAP_LRO) &&
263 			    (if_getcapenable(ifp) & IFCAP_LRO)) {
264 				ret = lio_set_feature(ifp, LIO_CMD_LRO_DISABLE,
265 						      LIO_LROIPV4 |
266 						      LIO_LROIPV6);
267 				if_togglecapenable(ifp, IFCAP_LRO);
268 			}
269 		}
270 	} else {
271 		lio_dev_info(lio->oct_dev, "Rx checksum offload capability not supported\n");
272 		return (ENODEV);
273 	}
274 
275 	return ((ret) ? EINVAL : 0);
276 }
277 
278 static int
279 lio_set_lro(struct ifnet *ifp)
280 {
281 	struct lio	*lio = if_getsoftc(ifp);
282 	int	ret = 0;
283 
284 	if (!(if_getcapabilities(ifp) & IFCAP_LRO)) {
285 		lio_dev_info(lio->oct_dev, "LRO capability not supported\n");
286 		return (ENODEV);
287 	}
288 
289 	if ((!(if_getcapenable(ifp) & IFCAP_LRO)) &&
290 	    (if_getcapenable(ifp) & IFCAP_RXCSUM) &&
291 	    (if_getcapenable(ifp) & IFCAP_RXCSUM_IPV6)) {
292 		if_togglecapenable(ifp, IFCAP_LRO);
293 
294 		if (lio_hwlro)
295 			ret = lio_set_feature(ifp, LIO_CMD_LRO_ENABLE, LIO_LROIPV4 |
296 					      LIO_LROIPV6);
297 
298 	} else if (if_getcapenable(ifp) & IFCAP_LRO) {
299 		if_togglecapenable(ifp, IFCAP_LRO);
300 
301 		if (lio_hwlro)
302 			ret = lio_set_feature(ifp, LIO_CMD_LRO_DISABLE, LIO_LROIPV4 |
303 					      LIO_LROIPV6);
304 	} else
305 		lio_dev_info(lio->oct_dev, "LRO requires RXCSUM");
306 
307 	return ((ret) ? EINVAL : 0);
308 }
309 
310 static void
311 lio_mtu_ctl_callback(struct octeon_device *oct, uint32_t status, void *buf)
312 {
313 	struct lio_soft_command	*sc = buf;
314 	volatile int		*mtu_sc_ctx;
315 
316 	mtu_sc_ctx = sc->ctxptr;
317 
318 	if (status) {
319 		lio_dev_err(oct, "MTU updation ctl instruction failed. Status: %llx\n",
320 			    LIO_CAST64(status));
321 		*mtu_sc_ctx = -1;
322 		/*
323 		 * This barrier is required to be sure that the
324 		 * response has been written fully.
325 		 */
326 		wmb();
327 		return;
328 	}
329 
330 	*mtu_sc_ctx = 1;
331 
332 	/*
333 	 * This barrier is required to be sure that the response has been
334 	 * written fully.
335 	 */
336 	wmb();
337 }
338 
339 /* @param ifp is network device */
340 static int
341 lio_change_mtu(struct ifnet *ifp, int new_mtu)
342 {
343 	struct lio		*lio = if_getsoftc(ifp);
344 	struct octeon_device	*oct = lio->oct_dev;
345 	struct lio_soft_command	*sc;
346 	union octeon_cmd	*ncmd;
347 	volatile int		*mtu_sc_ctx;
348 	int	retval = 0;
349 
350 	if (lio->mtu == new_mtu)
351 		return (0);
352 
353 	/*
354 	 * Limit the MTU to make sure the ethernet packets are between
355 	 * LIO_MIN_MTU_SIZE bytes and LIO_MAX_MTU_SIZE bytes
356 	 */
357 	if ((new_mtu < LIO_MIN_MTU_SIZE) || (new_mtu > LIO_MAX_MTU_SIZE)) {
358 		lio_dev_err(oct, "Invalid MTU: %d\n", new_mtu);
359 		lio_dev_err(oct, "Valid range %d and %d\n",
360 			    LIO_MIN_MTU_SIZE, LIO_MAX_MTU_SIZE);
361 		return (EINVAL);
362 	}
363 
364 	sc = lio_alloc_soft_command(oct, OCTEON_CMD_SIZE, 16,
365 				    sizeof(*mtu_sc_ctx));
366 	if (sc == NULL)
367 		return (ENOMEM);
368 
369 	ncmd = (union octeon_cmd *)sc->virtdptr;
370 	mtu_sc_ctx = sc->ctxptr;
371 
372 	*mtu_sc_ctx = 0;
373 
374 	ncmd->cmd64 = 0;
375 	ncmd->s.cmd = LIO_CMD_CHANGE_MTU;
376 	ncmd->s.param1 = new_mtu;
377 
378 	lio_swap_8B_data((uint64_t *)ncmd, (OCTEON_CMD_SIZE >> 3));
379 
380 	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
381 
382 	lio_prepare_soft_command(oct, sc, LIO_OPCODE_NIC,
383 				 LIO_OPCODE_NIC_CMD, 0, 0, 0);
384 
385 	sc->callback = lio_mtu_ctl_callback;
386 	sc->callback_arg = sc;
387 	sc->wait_time = 5000;
388 
389 	retval = lio_send_soft_command(oct, sc);
390 	if (retval == LIO_IQ_SEND_FAILED) {
391 		lio_dev_info(oct,
392 			     "Failed to send MTU update Control message\n");
393 		retval = EBUSY;
394 		goto mtu_updation_failed;
395 	}
396 
397 	/*
398 	 * Sleep on a wait queue till the cond flag indicates that the
399 	 * response arrived or timed-out.
400 	 */
401 	lio_sleep_cond(oct, mtu_sc_ctx);
402 
403 	if (*mtu_sc_ctx < 0) {
404 		retval = EBUSY;
405 		goto mtu_updation_failed;
406 	}
407 	lio_dev_info(oct, "MTU Changed from %d to %d\n", if_getmtu(ifp),
408 		     new_mtu);
409 	if_setmtu(ifp, new_mtu);
410 	lio->mtu = new_mtu;
411 	retval = 0;			/*
412 				         * this updation is make sure that LIO_IQ_SEND_STOP case
413 				         * also success
414 				         */
415 
416 mtu_updation_failed:
417 	lio_free_soft_command(oct, sc);
418 
419 	return (retval);
420 }
421 
422 /* @param ifp network device */
423 int
424 lio_set_mac(struct ifnet *ifp, uint8_t *p)
425 {
426 	struct lio_ctrl_pkt	nctrl;
427 	struct lio		*lio = if_getsoftc(ifp);
428 	struct octeon_device	*oct = lio->oct_dev;
429 	int	ret = 0;
430 
431 	if (!lio_is_valid_ether_addr(p))
432 		return (EADDRNOTAVAIL);
433 
434 	bzero(&nctrl, sizeof(struct lio_ctrl_pkt));
435 
436 	nctrl.ncmd.cmd64 = 0;
437 	nctrl.ncmd.s.cmd = LIO_CMD_CHANGE_MACADDR;
438 	nctrl.ncmd.s.param1 = 0;
439 	nctrl.ncmd.s.more = 1;
440 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
441 	nctrl.lio = lio;
442 	nctrl.cb_fn = lio_ctrl_cmd_completion;
443 	nctrl.wait_time = 100;
444 
445 	nctrl.udd[0] = 0;
446 	/* The MAC Address is presented in network byte order. */
447 	memcpy((uint8_t *)&nctrl.udd[0] + 2, p, ETHER_HDR_LEN);
448 
449 	ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl);
450 	if (ret < 0) {
451 		lio_dev_err(oct, "MAC Address change failed\n");
452 		return (ENOMEM);
453 	}
454 
455 	memcpy(((uint8_t *)&lio->linfo.hw_addr) + 2, p, ETHER_HDR_LEN);
456 
457 	return (0);
458 }
459 
460 /*
461  * \brief Converts a mask based on ifp flags
462  * @param ifp network device
463  *
464  * This routine generates a lio_ifflags mask from the ifp flags
465  * received from the OS.
466  */
467 static inline enum lio_ifflags
468 lio_get_new_flags(struct ifnet *ifp)
469 {
470 	enum lio_ifflags f = LIO_IFFLAG_UNICAST;
471 
472 	if (if_getflags(ifp) & IFF_PROMISC)
473 		f |= LIO_IFFLAG_PROMISC;
474 
475 	if (if_getflags(ifp) & IFF_ALLMULTI)
476 		f |= LIO_IFFLAG_ALLMULTI;
477 
478 	if (if_getflags(ifp) & IFF_MULTICAST) {
479 		f |= LIO_IFFLAG_MULTICAST;
480 
481 		/*
482 		 * Accept all multicast addresses if there are more than we
483 		 * can handle
484 		 */
485 		if (if_getamcount(ifp) > LIO_MAX_MULTICAST_ADDR)
486 			f |= LIO_IFFLAG_ALLMULTI;
487 	}
488 	if (if_getflags(ifp) & IFF_BROADCAST)
489 		f |= LIO_IFFLAG_BROADCAST;
490 
491 	return (f);
492 }
493 
494 /* @param ifp network device */
495 static int
496 lio_set_mcast_list(struct ifnet *ifp)
497 {
498 	struct lio		*lio = if_getsoftc(ifp);
499 	struct octeon_device	*oct = lio->oct_dev;
500 	struct lio_ctrl_pkt	nctrl;
501 	struct ifmultiaddr	*ifma;
502 	uint64_t		*mc;
503 	int	mc_count = 0;
504 	int	ret;
505 
506 	bzero(&nctrl, sizeof(struct lio_ctrl_pkt));
507 
508 	/* Create a ctrl pkt command to be sent to core app. */
509 	nctrl.ncmd.cmd64 = 0;
510 	nctrl.ncmd.s.cmd = LIO_CMD_SET_MULTI_LIST;
511 	nctrl.ncmd.s.param1 = lio_get_new_flags(ifp);
512 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
513 	nctrl.lio = lio;
514 	nctrl.cb_fn = lio_ctrl_cmd_completion;
515 
516 	/* copy all the addresses into the udd */
517 	mc = &nctrl.udd[0];
518 
519 	/* to protect access to if_multiaddrs */
520 	if_maddr_rlock(ifp);
521 
522 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
523 		if (ifma->ifma_addr->sa_family != AF_LINK)
524 			continue;
525 		*mc = 0;
526 		memcpy(((uint8_t *)mc) + 2,
527 		       LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
528 		       ETHER_ADDR_LEN);
529 		/* no need to swap bytes */
530 
531 		mc_count++;
532 		if (++mc > &nctrl.udd[LIO_MAX_MULTICAST_ADDR])
533 			break;
534 	}
535 
536 	if_maddr_runlock(ifp);
537 
538 	/*
539 	 * Apparently, any activity in this call from the kernel has to
540 	 * be atomic. So we won't wait for response.
541 	 */
542 	nctrl.wait_time = 0;
543 	nctrl.ncmd.s.param2 = mc_count;
544 	nctrl.ncmd.s.more = mc_count;
545 
546 	ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl);
547 	if (ret < 0) {
548 		lio_dev_err(oct, "DEVFLAGS change failed in core (ret: 0x%x)\n",
549 			    ret);
550 	}
551 
552 	return ((ret) ? EINVAL : 0);
553 }
554