xref: /titanic_52/usr/src/uts/common/xen/io/xnbu.c (revision 5bbb4db2c3f208d12bf0fd11769728f9e5ba66a2)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Xen inter-domain backend - GLDv3 driver edition.
29  *
30  * A traditional GLDv3 driver used to communicate with a guest
31  * domain.  This driver is typically plumbed underneath the IP stack
32  * or a software ethernet bridge.
33  */
34 
35 #include "xnb.h"
36 
37 #include <sys/sunddi.h>
38 #include <sys/conf.h>
39 #include <sys/modctl.h>
40 #include <sys/strsubr.h>
41 #include <sys/dlpi.h>
42 #include <sys/pattr.h>
43 #include <sys/mac_provider.h>
44 #include <sys/mac_ether.h>
45 #include <xen/sys/xendev.h>
46 
47 /* Required driver entry points for GLDv3 */
48 static int	xnbu_m_start(void *);
49 static void	xnbu_m_stop(void *);
50 static int	xnbu_m_set_mac_addr(void *, const uint8_t *);
51 static int	xnbu_m_set_multicast(void *, boolean_t, const uint8_t *);
52 static int	xnbu_m_set_promiscuous(void *, boolean_t);
53 static int	xnbu_m_stat(void *, uint_t, uint64_t *);
54 static boolean_t xnbu_m_getcapab(void *, mac_capab_t, void *);
55 static mblk_t	*xnbu_m_send(void *, mblk_t *);
56 
57 typedef struct xnbu {
58 	mac_handle_t		u_mh;
59 	boolean_t		u_need_sched;
60 } xnbu_t;
61 
62 static mac_callbacks_t xnb_callbacks = {
63 	MC_GETCAPAB,
64 	xnbu_m_stat,
65 	xnbu_m_start,
66 	xnbu_m_stop,
67 	xnbu_m_set_promiscuous,
68 	xnbu_m_set_multicast,
69 	xnbu_m_set_mac_addr,
70 	xnbu_m_send,
71 	NULL,
72 	xnbu_m_getcapab
73 };
74 
75 static void
76 xnbu_to_host(xnb_t *xnbp, mblk_t *mp)
77 {
78 	xnbu_t *xnbup = xnbp->xnb_flavour_data;
79 	boolean_t sched = B_FALSE;
80 
81 	ASSERT(mp != NULL);
82 
83 	mac_rx(xnbup->u_mh, NULL, mp);
84 
85 	mutex_enter(&xnbp->xnb_rx_lock);
86 
87 	/*
88 	 * If a transmit attempt failed because we ran out of ring
89 	 * space and there is now some space, re-enable the transmit
90 	 * path.
91 	 */
92 	if (xnbup->u_need_sched &&
93 	    RING_HAS_UNCONSUMED_REQUESTS(&xnbp->xnb_rx_ring)) {
94 		sched = B_TRUE;
95 		xnbup->u_need_sched = B_FALSE;
96 	}
97 
98 	mutex_exit(&xnbp->xnb_rx_lock);
99 
100 	if (sched)
101 		mac_tx_update(xnbup->u_mh);
102 }
103 
104 static mblk_t *
105 xnbu_cksum_from_peer(xnb_t *xnbp, mblk_t *mp, uint16_t flags)
106 {
107 	/*
108 	 * Take a conservative approach - if the checksum is blank
109 	 * then we fill it in.
110 	 *
111 	 * If the consumer of the packet is IP then we might actually
112 	 * only need fill it in if the data is not validated, but how
113 	 * do we know who might end up with the packet?
114 	 */
115 
116 	if ((flags & NETTXF_csum_blank) != 0) {
117 		/*
118 		 * The checksum is blank.  We must fill it in here.
119 		 */
120 		mp = xnb_process_cksum_flags(xnbp, mp, 0);
121 
122 		/*
123 		 * Because we calculated the checksum ourselves we
124 		 * know that it must be good, so we assert this.
125 		 */
126 		flags |= NETTXF_data_validated;
127 	}
128 
129 	if ((flags & NETTXF_data_validated) != 0) {
130 		/*
131 		 * The checksum is asserted valid.
132 		 *
133 		 * The hardware checksum offload specification says
134 		 * that we must provide the actual checksum as well as
135 		 * an assertion that it is valid, but the protocol
136 		 * stack doesn't actually use it so we don't bother.
137 		 * If it was necessary we could grovel in the packet
138 		 * to find it.
139 		 */
140 		(void) hcksum_assoc(mp, NULL, NULL, 0, 0, 0, 0,
141 		    HCK_FULLCKSUM | HCK_FULLCKSUM_OK, KM_NOSLEEP);
142 	}
143 
144 	return (mp);
145 }
146 
147 static uint16_t
148 xnbu_cksum_to_peer(xnb_t *xnbp, mblk_t *mp)
149 {
150 	uint16_t r = 0;
151 
152 	if (xnbp->xnb_cksum_offload) {
153 		uint32_t pflags;
154 
155 		hcksum_retrieve(mp, NULL, NULL, NULL, NULL,
156 		    NULL, NULL, &pflags);
157 
158 		/*
159 		 * If the protocol stack has requested checksum
160 		 * offload, inform the peer that we have not
161 		 * calculated the checksum.
162 		 */
163 		if ((pflags & HCK_FULLCKSUM) != 0)
164 			r |= NETRXF_csum_blank;
165 	}
166 
167 	return (r);
168 }
169 
170 static void
171 xnbu_connected(xnb_t *xnbp)
172 {
173 	xnbu_t *xnbup = xnbp->xnb_flavour_data;
174 
175 	mac_link_update(xnbup->u_mh, LINK_STATE_UP);
176 	/*
177 	 * We are able to send packets now - bring them on.
178 	 */
179 	mac_tx_update(xnbup->u_mh);
180 }
181 
182 static void
183 xnbu_disconnected(xnb_t *xnbp)
184 {
185 	xnbu_t *xnbup = xnbp->xnb_flavour_data;
186 
187 	mac_link_update(xnbup->u_mh, LINK_STATE_DOWN);
188 }
189 
190 /*ARGSUSED*/
191 static boolean_t
192 xnbu_hotplug(xnb_t *xnbp)
193 {
194 	return (B_TRUE);
195 }
196 
197 static mblk_t *
198 xnbu_m_send(void *arg, mblk_t *mp)
199 {
200 	xnb_t *xnbp = arg;
201 	xnbu_t *xnbup = xnbp->xnb_flavour_data;
202 
203 	mp = xnb_copy_to_peer(arg, mp);
204 
205 	/* XXPV dme: playing with need_sched without txlock? */
206 
207 	/*
208 	 * If we consumed all of the mblk_t's offered, perhaps we need
209 	 * to indicate that we can accept more.  Otherwise we are full
210 	 * and need to wait for space.
211 	 */
212 	if (mp == NULL) {
213 		/*
214 		 * If a previous transmit attempt failed because the ring
215 		 * was full, try again now.
216 		 */
217 		if (xnbup->u_need_sched) {
218 			xnbup->u_need_sched = B_FALSE;
219 			mac_tx_update(xnbup->u_mh);
220 		}
221 	} else {
222 		xnbup->u_need_sched = B_TRUE;
223 	}
224 
225 	return (mp);
226 }
227 
228 /*
229  *  xnbu_m_set_mac_addr() -- set the physical network address on the board
230  */
231 /* ARGSUSED */
232 static int
233 xnbu_m_set_mac_addr(void *arg, const uint8_t *macaddr)
234 {
235 	xnb_t *xnbp = arg;
236 	xnbu_t *xnbup = xnbp->xnb_flavour_data;
237 
238 	bcopy(macaddr, xnbp->xnb_mac_addr, ETHERADDRL);
239 	mac_unicst_update(xnbup->u_mh, xnbp->xnb_mac_addr);
240 
241 	return (0);
242 }
243 
244 /*
245  *  xnbu_m_set_multicast() -- set (enable) or disable a multicast address
246  */
247 /*ARGSUSED*/
248 static int
249 xnbu_m_set_multicast(void *arg, boolean_t add, const uint8_t *mca)
250 {
251 	/*
252 	 * We always accept all packets from the peer, so nothing to
253 	 * do for enable or disable.
254 	 */
255 	return (0);
256 }
257 
258 
259 /*
260  * xnbu_m_set_promiscuous() -- set or reset promiscuous mode on the board
261  */
262 /* ARGSUSED */
263 static int
264 xnbu_m_set_promiscuous(void *arg, boolean_t on)
265 {
266 	/*
267 	 * We always accept all packets from the peer, so nothing to
268 	 * do for enable or disable.
269 	 */
270 	return (0);
271 }
272 
273 /*
274  *  xnbu_m_start() -- start the board receiving and enable interrupts.
275  */
276 /*ARGSUSED*/
277 static int
278 xnbu_m_start(void *arg)
279 {
280 	return (0);
281 }
282 
283 /*
284  * xnbu_m_stop() - disable hardware
285  */
286 /*ARGSUSED*/
287 static void
288 xnbu_m_stop(void *arg)
289 {
290 }
291 
292 static int
293 xnbu_m_stat(void *arg, uint_t stat, uint64_t *val)
294 {
295 	xnb_t *xnbp = arg;
296 
297 	mutex_enter(&xnbp->xnb_tx_lock);
298 	mutex_enter(&xnbp->xnb_rx_lock);
299 
300 #define	map_stat(q, r)				\
301 	case (MAC_STAT_##q):			\
302 		*val = xnbp->xnb_stat_##r;	\
303 		break
304 
305 	switch (stat) {
306 
307 	map_stat(IPACKETS, opackets);
308 	map_stat(OPACKETS, ipackets);
309 	map_stat(RBYTES, obytes);
310 	map_stat(OBYTES, rbytes);
311 
312 	default:
313 		mutex_exit(&xnbp->xnb_rx_lock);
314 		mutex_exit(&xnbp->xnb_tx_lock);
315 
316 		return (ENOTSUP);
317 	}
318 
319 #undef map_stat
320 
321 	mutex_exit(&xnbp->xnb_rx_lock);
322 	mutex_exit(&xnbp->xnb_tx_lock);
323 
324 	return (0);
325 }
326 
327 static boolean_t
328 xnbu_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
329 {
330 	xnb_t *xnbp = arg;
331 
332 	switch (cap) {
333 	case MAC_CAPAB_HCKSUM: {
334 		uint32_t *capab = cap_data;
335 
336 		if (xnbp->xnb_cksum_offload)
337 			*capab = HCKSUM_INET_PARTIAL;
338 		else
339 			*capab = 0;
340 		break;
341 	}
342 	default:
343 		return (B_FALSE);
344 	}
345 
346 	return (B_TRUE);
347 }
348 
349 static int
350 xnbu_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
351 {
352 	static xnb_flavour_t flavour = {
353 		xnbu_to_host, xnbu_connected, xnbu_disconnected, xnbu_hotplug,
354 		xnbu_cksum_from_peer, xnbu_cksum_to_peer,
355 	};
356 	xnbu_t *xnbup;
357 	xnb_t *xnbp;
358 	mac_register_t *mr;
359 	int err;
360 
361 	switch (cmd) {
362 	case DDI_ATTACH:
363 		break;
364 	case DDI_RESUME:
365 		return (DDI_SUCCESS);
366 	default:
367 		return (DDI_FAILURE);
368 	}
369 
370 	xnbup = kmem_zalloc(sizeof (*xnbup), KM_SLEEP);
371 
372 	if ((mr = mac_alloc(MAC_VERSION)) == NULL) {
373 		kmem_free(xnbup, sizeof (*xnbup));
374 		return (DDI_FAILURE);
375 	}
376 
377 	if (xnb_attach(dip, &flavour, xnbup) != DDI_SUCCESS) {
378 		mac_free(mr);
379 		kmem_free(xnbup, sizeof (*xnbup));
380 		return (DDI_FAILURE);
381 	}
382 
383 	xnbp = ddi_get_driver_private(dip);
384 	ASSERT(xnbp != NULL);
385 
386 	mr->m_dip = dip;
387 	mr->m_driver = xnbp;
388 
389 	/*
390 	 *  Initialize pointers to device specific functions which will be
391 	 *  used by the generic layer.
392 	 */
393 	mr->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
394 	mr->m_src_addr = xnbp->xnb_mac_addr;
395 	mr->m_callbacks = &xnb_callbacks;
396 	mr->m_min_sdu = 0;
397 	mr->m_max_sdu = XNBMAXPKT;
398 	/*
399 	 * xnbu is a virtual device, and it is not associated with any
400 	 * physical device. Its margin size is determined by the maximum
401 	 * packet size it can handle, which is PAGESIZE.
402 	 */
403 	mr->m_margin = PAGESIZE - XNBMAXPKT - sizeof (struct ether_header);
404 
405 	(void) memset(xnbp->xnb_mac_addr, 0xff, ETHERADDRL);
406 	xnbp->xnb_mac_addr[0] &= 0xfe;
407 	xnbup->u_need_sched = B_FALSE;
408 
409 	/*
410 	 * Register ourselves with the GLDv3 interface.
411 	 */
412 	err = mac_register(mr, &xnbup->u_mh);
413 	mac_free(mr);
414 	if (err != 0) {
415 		xnb_detach(dip);
416 		kmem_free(xnbup, sizeof (*xnbup));
417 		return (DDI_FAILURE);
418 	}
419 
420 	mac_link_update(xnbup->u_mh, LINK_STATE_DOWN);
421 
422 	return (DDI_SUCCESS);
423 }
424 
425 /*ARGSUSED*/
426 int
427 xnbu_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
428 {
429 	xnb_t *xnbp = ddi_get_driver_private(dip);
430 	xnbu_t *xnbup = xnbp->xnb_flavour_data;
431 
432 	switch (cmd) {
433 	case DDI_DETACH:
434 		break;
435 	case DDI_SUSPEND:
436 		return (DDI_SUCCESS);
437 	default:
438 		return (DDI_FAILURE);
439 	}
440 
441 	ASSERT(xnbp != NULL);
442 	ASSERT(xnbup != NULL);
443 
444 	mutex_enter(&xnbp->xnb_tx_lock);
445 	mutex_enter(&xnbp->xnb_rx_lock);
446 
447 	if (!xnbp->xnb_detachable || xnbp->xnb_connected ||
448 	    (xnbp->xnb_tx_buf_count > 0)) {
449 		mutex_exit(&xnbp->xnb_rx_lock);
450 		mutex_exit(&xnbp->xnb_tx_lock);
451 
452 		return (DDI_FAILURE);
453 	}
454 
455 	mutex_exit(&xnbp->xnb_rx_lock);
456 	mutex_exit(&xnbp->xnb_tx_lock);
457 
458 	/*
459 	 * Attempt to unregister the mac.
460 	 */
461 	if ((xnbup->u_mh != NULL) && (mac_unregister(xnbup->u_mh) != 0))
462 		return (DDI_FAILURE);
463 	kmem_free(xnbup, sizeof (*xnbup));
464 
465 	xnb_detach(dip);
466 
467 	return (DDI_SUCCESS);
468 }
469 
470 DDI_DEFINE_STREAM_OPS(ops, nulldev, nulldev, xnbu_attach, xnbu_detach,
471     nodev, NULL, D_MP, NULL, ddi_quiesce_not_supported);
472 
473 static struct modldrv modldrv = {
474 	&mod_driverops, "xnbu driver", &ops
475 };
476 
477 static struct modlinkage modlinkage = {
478 	MODREV_1, &modldrv, NULL
479 };
480 
481 int
482 _init(void)
483 {
484 	int i;
485 
486 	mac_init_ops(&ops, "xnbu");
487 
488 	i = mod_install(&modlinkage);
489 	if (i != DDI_SUCCESS)
490 		mac_fini_ops(&ops);
491 
492 	return (i);
493 }
494 
495 int
496 _fini(void)
497 {
498 	int i;
499 
500 	i = mod_remove(&modlinkage);
501 	if (i == DDI_SUCCESS)
502 		mac_fini_ops(&ops);
503 
504 	return (i);
505 }
506 
507 int
508 _info(struct modinfo *modinfop)
509 {
510 	return (mod_info(&modlinkage, modinfop));
511 }
512