xref: /titanic_41/usr/src/uts/common/xen/io/xnbu.c (revision bbb1277b6ec1b0daad4e3ed1a2b891d3e2ece2eb)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Xen inter-domain backend - GLDv3 driver edition.
29  *
30  * A traditional GLDv3 driver used to communicate with a guest
31  * domain.  This driver is typically plumbed underneath the IP stack
32  * or a software ethernet bridge.
33  */
34 
35 #include "xnb.h"
36 
37 #include <sys/sunddi.h>
38 #include <sys/conf.h>
39 #include <sys/modctl.h>
40 #include <sys/strsubr.h>
41 #include <sys/dlpi.h>
42 #include <sys/pattr.h>
43 #include <sys/mac_provider.h>
44 #include <sys/mac_ether.h>
45 #include <xen/sys/xendev.h>
46 #include <sys/note.h>
47 
48 /* Required driver entry points for GLDv3 */
49 static int	xnbu_m_start(void *);
50 static void	xnbu_m_stop(void *);
51 static int	xnbu_m_set_mac_addr(void *, const uint8_t *);
52 static int	xnbu_m_set_multicast(void *, boolean_t, const uint8_t *);
53 static int	xnbu_m_set_promiscuous(void *, boolean_t);
54 static int	xnbu_m_stat(void *, uint_t, uint64_t *);
55 static boolean_t xnbu_m_getcapab(void *, mac_capab_t, void *);
56 static mblk_t	*xnbu_m_send(void *, mblk_t *);
57 
58 typedef struct xnbu {
59 	mac_handle_t		u_mh;
60 	boolean_t		u_need_sched;
61 } xnbu_t;
62 
63 static mac_callbacks_t xnbu_callbacks = {
64 	MC_GETCAPAB,
65 	xnbu_m_stat,
66 	xnbu_m_start,
67 	xnbu_m_stop,
68 	xnbu_m_set_promiscuous,
69 	xnbu_m_set_multicast,
70 	xnbu_m_set_mac_addr,
71 	xnbu_m_send,
72 	NULL,
73 	xnbu_m_getcapab
74 };
75 
76 static void
77 xnbu_to_host(xnb_t *xnbp, mblk_t *mp)
78 {
79 	xnbu_t *xnbup = xnbp->xnb_flavour_data;
80 	boolean_t sched = B_FALSE;
81 
82 	ASSERT(mp != NULL);
83 
84 	mac_rx(xnbup->u_mh, NULL, mp);
85 
86 	mutex_enter(&xnbp->xnb_rx_lock);
87 
88 	/*
89 	 * If a transmit attempt failed because we ran out of ring
90 	 * space and there is now some space, re-enable the transmit
91 	 * path.
92 	 */
93 	if (xnbup->u_need_sched &&
94 	    RING_HAS_UNCONSUMED_REQUESTS(&xnbp->xnb_rx_ring)) {
95 		sched = B_TRUE;
96 		xnbup->u_need_sched = B_FALSE;
97 	}
98 
99 	mutex_exit(&xnbp->xnb_rx_lock);
100 
101 	if (sched)
102 		mac_tx_update(xnbup->u_mh);
103 }
104 
105 static mblk_t *
106 xnbu_cksum_from_peer(xnb_t *xnbp, mblk_t *mp, uint16_t flags)
107 {
108 	/*
109 	 * Take a conservative approach - if the checksum is blank
110 	 * then we fill it in.
111 	 *
112 	 * If the consumer of the packet is IP then we might actually
113 	 * only need fill it in if the data is not validated, but how
114 	 * do we know who might end up with the packet?
115 	 */
116 
117 	if ((flags & NETTXF_csum_blank) != 0) {
118 		/*
119 		 * The checksum is blank.  We must fill it in here.
120 		 */
121 		mp = xnb_process_cksum_flags(xnbp, mp, 0);
122 
123 		/*
124 		 * Because we calculated the checksum ourselves we
125 		 * know that it must be good, so we assert this.
126 		 */
127 		flags |= NETTXF_data_validated;
128 	}
129 
130 	if ((flags & NETTXF_data_validated) != 0) {
131 		/*
132 		 * The checksum is asserted valid.
133 		 *
134 		 * The hardware checksum offload specification says
135 		 * that we must provide the actual checksum as well as
136 		 * an assertion that it is valid, but the protocol
137 		 * stack doesn't actually use it so we don't bother.
138 		 * If it was necessary we could grovel in the packet
139 		 * to find it.
140 		 */
141 		(void) hcksum_assoc(mp, NULL, NULL, 0, 0, 0, 0,
142 		    HCK_FULLCKSUM | HCK_FULLCKSUM_OK, KM_NOSLEEP);
143 	}
144 
145 	return (mp);
146 }
147 
148 static uint16_t
149 xnbu_cksum_to_peer(xnb_t *xnbp, mblk_t *mp)
150 {
151 	_NOTE(ARGUNUSED(xnbp));
152 	uint16_t r = 0;
153 	uint32_t pflags;
154 
155 	hcksum_retrieve(mp, NULL, NULL, NULL, NULL,
156 	    NULL, NULL, &pflags);
157 
158 	/*
159 	 * If the protocol stack has requested checksum
160 	 * offload, inform the peer that we have not
161 	 * calculated the checksum.
162 	 */
163 	if ((pflags & HCK_FULLCKSUM) != 0)
164 		r |= NETRXF_csum_blank;
165 
166 	return (r);
167 }
168 
169 static boolean_t
170 xnbu_start_connect(xnb_t *xnbp)
171 {
172 	xnbu_t *xnbup = xnbp->xnb_flavour_data;
173 
174 	mac_link_update(xnbup->u_mh, LINK_STATE_UP);
175 	/*
176 	 * We are able to send packets now - bring them on.
177 	 */
178 	mac_tx_update(xnbup->u_mh);
179 
180 	return (B_TRUE);
181 }
182 
183 static boolean_t
184 xnbu_peer_connected(xnb_t *xnbp)
185 {
186 	_NOTE(ARGUNUSED(xnbp));
187 
188 	return (B_TRUE);
189 }
190 
191 static void
192 xnbu_peer_disconnected(xnb_t *xnbp)
193 {
194 	xnbu_t *xnbup = xnbp->xnb_flavour_data;
195 
196 	mac_link_update(xnbup->u_mh, LINK_STATE_DOWN);
197 }
198 
199 /*ARGSUSED*/
200 static boolean_t
201 xnbu_hotplug_connected(xnb_t *xnbp)
202 {
203 	return (B_TRUE);
204 }
205 
206 static mblk_t *
207 xnbu_m_send(void *arg, mblk_t *mp)
208 {
209 	xnb_t *xnbp = arg;
210 	xnbu_t *xnbup = xnbp->xnb_flavour_data;
211 	boolean_t sched = B_FALSE;
212 
213 	mp = xnb_copy_to_peer(arg, mp);
214 
215 	mutex_enter(&xnbp->xnb_rx_lock);
216 	/*
217 	 * If we consumed all of the mblk_t's offered, perhaps we need
218 	 * to indicate that we can accept more.  Otherwise we are full
219 	 * and need to wait for space.
220 	 */
221 	if (mp == NULL) {
222 		sched = xnbup->u_need_sched;
223 		xnbup->u_need_sched = B_FALSE;
224 	} else {
225 		xnbup->u_need_sched = B_TRUE;
226 	}
227 	mutex_exit(&xnbp->xnb_rx_lock);
228 
229 	/*
230 	 * If a previous transmit attempt failed because the ring
231 	 * was full, try again now.
232 	 */
233 	if (sched)
234 		mac_tx_update(xnbup->u_mh);
235 
236 	return (mp);
237 }
238 
239 /*
240  *  xnbu_m_set_mac_addr() -- set the physical network address on the board
241  */
242 /* ARGSUSED */
243 static int
244 xnbu_m_set_mac_addr(void *arg, const uint8_t *macaddr)
245 {
246 	xnb_t *xnbp = arg;
247 	xnbu_t *xnbup = xnbp->xnb_flavour_data;
248 
249 	bcopy(macaddr, xnbp->xnb_mac_addr, ETHERADDRL);
250 	mac_unicst_update(xnbup->u_mh, xnbp->xnb_mac_addr);
251 
252 	return (0);
253 }
254 
255 /*
256  *  xnbu_m_set_multicast() -- set (enable) or disable a multicast address
257  */
258 /*ARGSUSED*/
259 static int
260 xnbu_m_set_multicast(void *arg, boolean_t add, const uint8_t *mca)
261 {
262 	/*
263 	 * We always accept all packets from the peer, so nothing to
264 	 * do for enable or disable.
265 	 */
266 	return (0);
267 }
268 
269 
270 /*
271  * xnbu_m_set_promiscuous() -- set or reset promiscuous mode on the board
272  */
273 /* ARGSUSED */
274 static int
275 xnbu_m_set_promiscuous(void *arg, boolean_t on)
276 {
277 	/*
278 	 * We always accept all packets from the peer, so nothing to
279 	 * do for enable or disable.
280 	 */
281 	return (0);
282 }
283 
284 /*
285  *  xnbu_m_start() -- start the board receiving and enable interrupts.
286  */
287 /*ARGSUSED*/
288 static int
289 xnbu_m_start(void *arg)
290 {
291 	return (0);
292 }
293 
294 /*
295  * xnbu_m_stop() - disable hardware
296  */
297 /*ARGSUSED*/
298 static void
299 xnbu_m_stop(void *arg)
300 {
301 }
302 
303 static int
304 xnbu_m_stat(void *arg, uint_t stat, uint64_t *val)
305 {
306 	xnb_t *xnbp = arg;
307 
308 	mutex_enter(&xnbp->xnb_tx_lock);
309 	mutex_enter(&xnbp->xnb_rx_lock);
310 
311 #define	map_stat(q, r)				\
312 	case (MAC_STAT_##q):			\
313 		*val = xnbp->xnb_stat_##r;	\
314 		break
315 
316 	switch (stat) {
317 
318 	map_stat(IPACKETS, opackets);
319 	map_stat(OPACKETS, ipackets);
320 	map_stat(RBYTES, obytes);
321 	map_stat(OBYTES, rbytes);
322 
323 	default:
324 		mutex_exit(&xnbp->xnb_rx_lock);
325 		mutex_exit(&xnbp->xnb_tx_lock);
326 
327 		return (ENOTSUP);
328 	}
329 
330 #undef map_stat
331 
332 	mutex_exit(&xnbp->xnb_rx_lock);
333 	mutex_exit(&xnbp->xnb_tx_lock);
334 
335 	return (0);
336 }
337 
338 static boolean_t
339 xnbu_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
340 {
341 	_NOTE(ARGUNUSED(arg));
342 
343 	switch (cap) {
344 	case MAC_CAPAB_HCKSUM: {
345 		uint32_t *capab = cap_data;
346 
347 		*capab = HCKSUM_INET_PARTIAL;
348 		break;
349 	}
350 	default:
351 		return (B_FALSE);
352 	}
353 
354 	return (B_TRUE);
355 }
356 
357 /*
358  * All packets are passed to the peer, so adding and removing
359  * multicast addresses is meaningless.
360  */
361 static boolean_t
362 xnbu_mcast_add(xnb_t *xnbp, ether_addr_t *addr)
363 {
364 	_NOTE(ARGUNUSED(xnbp, addr));
365 
366 	return (B_TRUE);
367 }
368 
369 static boolean_t
370 xnbu_mcast_del(xnb_t *xnbp, ether_addr_t *addr)
371 {
372 	_NOTE(ARGUNUSED(xnbp, addr));
373 
374 	return (B_TRUE);
375 }
376 
377 static int
378 xnbu_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
379 {
380 	static xnb_flavour_t flavour = {
381 		xnbu_to_host, xnbu_peer_connected, xnbu_peer_disconnected,
382 		xnbu_hotplug_connected, xnbu_start_connect,
383 		xnbu_cksum_from_peer, xnbu_cksum_to_peer,
384 		xnbu_mcast_add, xnbu_mcast_del,
385 	};
386 	xnbu_t *xnbup;
387 	xnb_t *xnbp;
388 	mac_register_t *mr;
389 	int err;
390 
391 	switch (cmd) {
392 	case DDI_ATTACH:
393 		break;
394 	case DDI_RESUME:
395 		return (DDI_SUCCESS);
396 	default:
397 		return (DDI_FAILURE);
398 	}
399 
400 	xnbup = kmem_zalloc(sizeof (*xnbup), KM_SLEEP);
401 
402 	if ((mr = mac_alloc(MAC_VERSION)) == NULL) {
403 		kmem_free(xnbup, sizeof (*xnbup));
404 		return (DDI_FAILURE);
405 	}
406 
407 	if (xnb_attach(dip, &flavour, xnbup) != DDI_SUCCESS) {
408 		mac_free(mr);
409 		kmem_free(xnbup, sizeof (*xnbup));
410 		return (DDI_FAILURE);
411 	}
412 
413 	xnbp = ddi_get_driver_private(dip);
414 	ASSERT(xnbp != NULL);
415 
416 	mr->m_dip = dip;
417 	mr->m_driver = xnbp;
418 
419 	/*
420 	 *  Initialize pointers to device specific functions which will be
421 	 *  used by the generic layer.
422 	 */
423 	mr->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
424 	mr->m_src_addr = xnbp->xnb_mac_addr;
425 	mr->m_callbacks = &xnbu_callbacks;
426 	mr->m_min_sdu = 0;
427 	mr->m_max_sdu = XNBMAXPKT;
428 	/*
429 	 * xnbu is a virtual device, and it is not associated with any
430 	 * physical device. Its margin size is determined by the maximum
431 	 * packet size it can handle, which is PAGESIZE.
432 	 */
433 	mr->m_margin = PAGESIZE - XNBMAXPKT - sizeof (struct ether_header);
434 
435 	(void) memset(xnbp->xnb_mac_addr, 0xff, ETHERADDRL);
436 	xnbp->xnb_mac_addr[0] &= 0xfe;
437 	xnbup->u_need_sched = B_FALSE;
438 
439 	/*
440 	 * Register ourselves with the GLDv3 interface.
441 	 */
442 	err = mac_register(mr, &xnbup->u_mh);
443 	mac_free(mr);
444 	if (err != 0) {
445 		xnb_detach(dip);
446 		kmem_free(xnbup, sizeof (*xnbup));
447 		return (DDI_FAILURE);
448 	}
449 
450 	mac_link_update(xnbup->u_mh, LINK_STATE_DOWN);
451 
452 	return (DDI_SUCCESS);
453 }
454 
455 /*ARGSUSED*/
456 int
457 xnbu_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
458 {
459 	xnb_t *xnbp = ddi_get_driver_private(dip);
460 	xnbu_t *xnbup = xnbp->xnb_flavour_data;
461 
462 	switch (cmd) {
463 	case DDI_DETACH:
464 		break;
465 	case DDI_SUSPEND:
466 		return (DDI_SUCCESS);
467 	default:
468 		return (DDI_FAILURE);
469 	}
470 
471 	ASSERT(xnbp != NULL);
472 	ASSERT(xnbup != NULL);
473 
474 	mutex_enter(&xnbp->xnb_tx_lock);
475 	mutex_enter(&xnbp->xnb_rx_lock);
476 
477 	if (!xnbp->xnb_detachable || xnbp->xnb_connected ||
478 	    (xnbp->xnb_tx_buf_count > 0)) {
479 		mutex_exit(&xnbp->xnb_rx_lock);
480 		mutex_exit(&xnbp->xnb_tx_lock);
481 
482 		return (DDI_FAILURE);
483 	}
484 
485 	mutex_exit(&xnbp->xnb_rx_lock);
486 	mutex_exit(&xnbp->xnb_tx_lock);
487 
488 	/*
489 	 * Attempt to unregister the mac.
490 	 */
491 	if ((xnbup->u_mh != NULL) && (mac_unregister(xnbup->u_mh) != 0))
492 		return (DDI_FAILURE);
493 	kmem_free(xnbup, sizeof (*xnbup));
494 
495 	xnb_detach(dip);
496 
497 	return (DDI_SUCCESS);
498 }
499 
500 DDI_DEFINE_STREAM_OPS(ops, nulldev, nulldev, xnbu_attach, xnbu_detach,
501     nodev, NULL, D_MP, NULL, ddi_quiesce_not_supported);
502 
503 static struct modldrv modldrv = {
504 	&mod_driverops, "xnbu driver", &ops
505 };
506 
507 static struct modlinkage modlinkage = {
508 	MODREV_1, &modldrv, NULL
509 };
510 
511 int
512 _init(void)
513 {
514 	int i;
515 
516 	mac_init_ops(&ops, "xnbu");
517 
518 	i = mod_install(&modlinkage);
519 	if (i != DDI_SUCCESS)
520 		mac_fini_ops(&ops);
521 
522 	return (i);
523 }
524 
525 int
526 _fini(void)
527 {
528 	int i;
529 
530 	i = mod_remove(&modlinkage);
531 	if (i == DDI_SUCCESS)
532 		mac_fini_ops(&ops);
533 
534 	return (i);
535 }
536 
537 int
538 _info(struct modinfo *modinfop)
539 {
540 	return (mod_info(&modlinkage, modinfop));
541 }
542