xref: /titanic_50/usr/src/uts/common/xen/io/xnbu.c (revision a4e4e13f4001644f2f960e3be0056c22b3a40fd1)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * Xen inter-domain backend - GLDv3 driver edition.
31  *
32  * A traditional GLDv3 driver used to communicate with a guest
33  * domain.  This driver is typically plumbed underneath the IP stack
34  * or a software ethernet bridge.
35  */
36 
37 #include "xnb.h"
38 
39 #include <sys/sunddi.h>
40 #include <sys/conf.h>
41 #include <sys/modctl.h>
42 #include <sys/strsubr.h>
43 #include <sys/dlpi.h>
44 #include <sys/pattr.h>
45 #include <sys/mac.h>
46 #include <sys/mac_ether.h>
47 #include <xen/sys/xendev.h>
48 
49 /* Required driver entry points for GLDv3 */
50 static int	xnbu_m_start(void *);
51 static void	xnbu_m_stop(void *);
52 static int	xnbu_m_set_mac_addr(void *, const uint8_t *);
53 static int	xnbu_m_set_multicast(void *, boolean_t, const uint8_t *);
54 static int	xnbu_m_set_promiscuous(void *, boolean_t);
55 static int	xnbu_m_stat(void *, uint_t, uint64_t *);
56 static void	xnbu_m_blank(void *, time_t, uint_t);
57 static void	xnbu_m_resources(void *);
58 static boolean_t xnbu_m_getcapab(void *, mac_capab_t, void *);
59 static mblk_t	*xnbu_m_send(void *, mblk_t *);
60 
61 typedef struct xnbu {
62 	mac_handle_t		u_mh;
63 	mac_resource_handle_t	u_rx_handle;
64 	boolean_t		u_need_sched;
65 } xnbu_t;
66 
67 static mac_callbacks_t xnb_callbacks = {
68 	MC_RESOURCES | MC_GETCAPAB,
69 	xnbu_m_stat,
70 	xnbu_m_start,
71 	xnbu_m_stop,
72 	xnbu_m_set_promiscuous,
73 	xnbu_m_set_multicast,
74 	xnbu_m_set_mac_addr,
75 	xnbu_m_send,
76 	xnbu_m_resources,
77 	NULL,
78 	xnbu_m_getcapab
79 };
80 
81 static void
82 xnbu_to_host(xnb_t *xnbp, mblk_t *mp)
83 {
84 	xnbu_t *xnbup = xnbp->x_flavour_data;
85 	boolean_t sched = B_FALSE;
86 
87 	ASSERT(mp != NULL);
88 
89 	mac_rx(xnbup->u_mh, xnbup->u_rx_handle, mp);
90 
91 	mutex_enter(&xnbp->x_tx_lock);
92 
93 	/*
94 	 * If a transmit attempt failed because we ran out of ring
95 	 * space and there is now some space, re-enable the transmit
96 	 * path.
97 	 */
98 	if (xnbup->u_need_sched &&
99 	    RING_HAS_UNCONSUMED_REQUESTS(&xnbp->x_rx_ring)) {
100 		sched = B_TRUE;
101 		xnbup->u_need_sched = B_FALSE;
102 	}
103 
104 	mutex_exit(&xnbp->x_tx_lock);
105 
106 	if (sched)
107 		mac_tx_update(xnbup->u_mh);
108 }
109 
110 static mblk_t *
111 xnbu_cksum_from_peer(xnb_t *xnbp, mblk_t *mp, uint16_t flags)
112 {
113 	/*
114 	 * Take a conservative approach - if the checksum is blank
115 	 * then we fill it in.
116 	 *
117 	 * If the consumer of the packet is IP then we might actually
118 	 * only need fill it in if the data is not validated, but how
119 	 * do we know who might end up with the packet?
120 	 */
121 
122 	if ((flags & NETTXF_csum_blank) != 0) {
123 		/*
124 		 * The checksum is blank.  We must fill it in here.
125 		 */
126 		mp = xnb_process_cksum_flags(xnbp, mp, 0);
127 
128 		/*
129 		 * Because we calculated the checksum ourselves we
130 		 * know that it must be good, so we assert this.
131 		 */
132 		flags |= NETTXF_data_validated;
133 	}
134 
135 	if ((flags & NETTXF_data_validated) != 0) {
136 		/*
137 		 * The checksum is asserted valid.
138 		 *
139 		 * The hardware checksum offload specification says
140 		 * that we must provide the actual checksum as well as
141 		 * an assertion that it is valid, but the protocol
142 		 * stack doesn't actually use it so we don't bother.
143 		 * If it was necessary we could grovel in the packet
144 		 * to find it.
145 		 */
146 		(void) hcksum_assoc(mp, NULL, NULL, 0, 0, 0, 0,
147 		    HCK_FULLCKSUM | HCK_FULLCKSUM_OK, KM_NOSLEEP);
148 	}
149 
150 	return (mp);
151 }
152 
153 static uint16_t
154 xnbu_cksum_to_peer(xnb_t *xnbp, mblk_t *mp)
155 {
156 	uint16_t r = 0;
157 
158 	if (xnbp->x_cksum_offload) {
159 		uint32_t pflags;
160 
161 		hcksum_retrieve(mp, NULL, NULL, NULL, NULL,
162 		    NULL, NULL, &pflags);
163 
164 		/*
165 		 * If the protocol stack has requested checksum
166 		 * offload, inform the peer that we have not
167 		 * calculated the checksum.
168 		 */
169 		if ((pflags & HCK_FULLCKSUM) != 0)
170 			r |= NETRXF_csum_blank;
171 	}
172 
173 	return (r);
174 }
175 
176 static void
177 xnbu_connected(xnb_t *xnbp)
178 {
179 	xnbu_t *xnbup = xnbp->x_flavour_data;
180 
181 	mac_link_update(xnbup->u_mh, LINK_STATE_UP);
182 	/*
183 	 * We are able to send packets now - bring them on.
184 	 */
185 	mac_tx_update(xnbup->u_mh);
186 }
187 
188 static void
189 xnbu_disconnected(xnb_t *xnbp)
190 {
191 	xnbu_t *xnbup = xnbp->x_flavour_data;
192 
193 	mac_link_update(xnbup->u_mh, LINK_STATE_DOWN);
194 }
195 
196 /*ARGSUSED*/
197 static boolean_t
198 xnbu_hotplug(xnb_t *xnbp)
199 {
200 	return (B_TRUE);
201 }
202 
203 static mblk_t *
204 xnbu_m_send(void *arg, mblk_t *mp)
205 {
206 	xnb_t *xnbp = arg;
207 	xnbu_t *xnbup = xnbp->x_flavour_data;
208 
209 	mp = xnb_to_peer(arg, mp);
210 
211 	/* XXPV dme: playing with need_sched without txlock? */
212 
213 	/*
214 	 * If we consumed all of the mblk_t's offered, perhaps we need
215 	 * to indicate that we can accept more.  Otherwise we are full
216 	 * and need to wait for space.
217 	 */
218 	if (mp == NULL) {
219 		/*
220 		 * If a previous transmit attempt failed because the ring
221 		 * was full, try again now.
222 		 */
223 		if (xnbup->u_need_sched) {
224 			xnbup->u_need_sched = B_FALSE;
225 			mac_tx_update(xnbup->u_mh);
226 		}
227 	} else {
228 		xnbup->u_need_sched = B_TRUE;
229 	}
230 
231 	return (mp);
232 }
233 
234 /*
235  *  xnbu_m_set_mac_addr() -- set the physical network address on the board
236  */
237 /* ARGSUSED */
238 static int
239 xnbu_m_set_mac_addr(void *arg, const uint8_t *macaddr)
240 {
241 	xnb_t *xnbp = arg;
242 	xnbu_t *xnbup = xnbp->x_flavour_data;
243 
244 	bcopy(macaddr, xnbp->x_mac_addr, ETHERADDRL);
245 	mac_unicst_update(xnbup->u_mh, xnbp->x_mac_addr);
246 
247 	return (0);
248 }
249 
250 /*
251  *  xnbu_m_set_multicast() -- set (enable) or disable a multicast address
252  */
253 /*ARGSUSED*/
254 static int
255 xnbu_m_set_multicast(void *arg, boolean_t add, const uint8_t *mca)
256 {
257 	/*
258 	 * We always accept all packets from the peer, so nothing to
259 	 * do for enable or disable.
260 	 */
261 	return (0);
262 }
263 
264 
265 /*
266  * xnbu_m_set_promiscuous() -- set or reset promiscuous mode on the board
267  */
268 /* ARGSUSED */
269 static int
270 xnbu_m_set_promiscuous(void *arg, boolean_t on)
271 {
272 	/*
273 	 * We always accept all packets from the peer, so nothing to
274 	 * do for enable or disable.
275 	 */
276 	return (0);
277 }
278 
279 /*
280  *  xnbu_m_start() -- start the board receiving and enable interrupts.
281  */
282 /*ARGSUSED*/
283 static int
284 xnbu_m_start(void *arg)
285 {
286 	return (0);
287 }
288 
289 /*
290  * xnbu_m_stop() - disable hardware
291  */
292 /*ARGSUSED*/
293 static void
294 xnbu_m_stop(void *arg)
295 {
296 }
297 
298 static int
299 xnbu_m_stat(void *arg, uint_t stat, uint64_t *val)
300 {
301 	xnb_t *xnbp = arg;
302 
303 	mutex_enter(&xnbp->x_tx_lock);
304 	mutex_enter(&xnbp->x_rx_lock);
305 
306 #define	map_stat(q, r)				\
307 	case (MAC_STAT_##q):			\
308 		*val = xnbp->x_stat_##r;		\
309 		break
310 
311 	switch (stat) {
312 
313 	map_stat(IPACKETS, ipackets);
314 	map_stat(OPACKETS, opackets);
315 	map_stat(RBYTES, rbytes);
316 	map_stat(OBYTES, obytes);
317 
318 	default:
319 		mutex_exit(&xnbp->x_rx_lock);
320 		mutex_exit(&xnbp->x_tx_lock);
321 
322 		return (ENOTSUP);
323 	}
324 
325 #undef map_stat
326 
327 	mutex_exit(&xnbp->x_rx_lock);
328 	mutex_exit(&xnbp->x_tx_lock);
329 
330 	return (0);
331 }
332 
333 /*ARGSUSED*/
334 static void
335 xnbu_m_blank(void *arg, time_t ticks, uint_t count)
336 {
337 	/*
338 	 * XXPV dme: blanking is not currently implemented.
339 	 */
340 }
341 
342 static void
343 xnbu_m_resources(void *arg)
344 {
345 	xnb_t *xnbp = arg;
346 	xnbu_t *xnbup = xnbp->x_flavour_data;
347 	mac_rx_fifo_t mrf;
348 
349 	mrf.mrf_type = MAC_RX_FIFO;
350 	mrf.mrf_blank = xnbu_m_blank;
351 	mrf.mrf_arg = (void *)xnbp;
352 	mrf.mrf_normal_blank_time = 128; /* XXPV dme: see xnbu_m_blank() */
353 	mrf.mrf_normal_pkt_count = 8;    /* XXPV dme: see xnbu_m_blank() */
354 
355 	xnbup->u_rx_handle = mac_resource_add(xnbup->u_mh,
356 	    (mac_resource_t *)&mrf);
357 }
358 
359 static boolean_t
360 xnbu_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
361 {
362 	xnb_t *xnbp = arg;
363 
364 	switch (cap) {
365 	case MAC_CAPAB_HCKSUM: {
366 		uint32_t *capab = cap_data;
367 
368 		if (xnbp->x_cksum_offload)
369 			*capab = HCKSUM_INET_FULL_V4;
370 		else
371 			*capab = 0;
372 		break;
373 	}
374 
375 	case MAC_CAPAB_POLL:
376 		/* Just return B_TRUE. */
377 		break;
378 
379 	default:
380 		return (B_FALSE);
381 	}
382 
383 	return (B_TRUE);
384 }
385 
386 static int
387 xnbu_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
388 {
389 	static xnb_flavour_t flavour = {
390 		xnbu_to_host, xnbu_connected, xnbu_disconnected, xnbu_hotplug,
391 		xnbu_cksum_from_peer, xnbu_cksum_to_peer,
392 	};
393 	xnbu_t *xnbup;
394 	xnb_t *xnbp;
395 	mac_register_t *mr;
396 	int err;
397 
398 	switch (cmd) {
399 	case DDI_ATTACH:
400 		break;
401 	case DDI_RESUME:
402 		return (DDI_SUCCESS);
403 	default:
404 		return (DDI_FAILURE);
405 	}
406 
407 	xnbup = kmem_zalloc(sizeof (*xnbup), KM_SLEEP);
408 
409 	if ((mr = mac_alloc(MAC_VERSION)) == NULL) {
410 		kmem_free(xnbup, sizeof (*xnbup));
411 		return (DDI_FAILURE);
412 	}
413 
414 	if (xnb_attach(dip, &flavour, xnbup) != DDI_SUCCESS) {
415 		mac_free(mr);
416 		kmem_free(xnbup, sizeof (*xnbup));
417 		return (DDI_FAILURE);
418 	}
419 
420 	xnbp = ddi_get_driver_private(dip);
421 	ASSERT(xnbp != NULL);
422 
423 	mr->m_dip = dip;
424 	mr->m_driver = xnbp;
425 
426 	/*
427 	 *  Initialize pointers to device specific functions which will be
428 	 *  used by the generic layer.
429 	 */
430 	mr->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
431 	mr->m_src_addr = xnbp->x_mac_addr;
432 	mr->m_callbacks = &xnb_callbacks;
433 	mr->m_min_sdu = 0;
434 	mr->m_max_sdu = XNBMAXPKT;
435 
436 	(void) memset(xnbp->x_mac_addr, 0xff, ETHERADDRL);
437 	xnbp->x_mac_addr[0] &= 0xfe;
438 	xnbup->u_need_sched = B_FALSE;
439 
440 	/*
441 	 * Register ourselves with the GLDv3 interface.
442 	 */
443 	err = mac_register(mr, &xnbup->u_mh);
444 	mac_free(mr);
445 	if (err != 0) {
446 		xnb_detach(dip);
447 		kmem_free(xnbup, sizeof (*xnbup));
448 		return (DDI_FAILURE);
449 	}
450 
451 	mac_link_update(xnbup->u_mh, LINK_STATE_DOWN);
452 
453 	return (DDI_SUCCESS);
454 }
455 
456 /*ARGSUSED*/
457 int
458 xnbu_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
459 {
460 	xnb_t *xnbp = ddi_get_driver_private(dip);
461 	xnbu_t *xnbup = xnbp->x_flavour_data;
462 
463 	switch (cmd) {
464 	case DDI_DETACH:
465 		break;
466 	case DDI_SUSPEND:
467 		return (DDI_SUCCESS);
468 	default:
469 		return (DDI_FAILURE);
470 	}
471 
472 	ASSERT(xnbp != NULL);
473 	ASSERT(xnbup != NULL);
474 
475 	mutex_enter(&xnbp->x_tx_lock);
476 	mutex_enter(&xnbp->x_rx_lock);
477 
478 	if (!xnbp->x_detachable || xnbp->x_connected ||
479 	    (xnbp->x_rx_buf_count > 0)) {
480 		mutex_exit(&xnbp->x_rx_lock);
481 		mutex_exit(&xnbp->x_tx_lock);
482 
483 		return (DDI_FAILURE);
484 	}
485 
486 	mutex_exit(&xnbp->x_rx_lock);
487 	mutex_exit(&xnbp->x_tx_lock);
488 
489 	/*
490 	 * Attempt to unregister the mac.
491 	 */
492 	if ((xnbup->u_mh != NULL) && (mac_unregister(xnbup->u_mh) != 0))
493 		return (DDI_FAILURE);
494 	kmem_free(xnbup, sizeof (*xnbup));
495 
496 	xnb_detach(dip);
497 
498 	return (DDI_SUCCESS);
499 }
500 
501 DDI_DEFINE_STREAM_OPS(ops, nulldev, nulldev, xnbu_attach, xnbu_detach,
502     nodev, NULL, D_MP, NULL);
503 
504 static struct modldrv modldrv = {
505 	&mod_driverops, "xnbu driver %I%", &ops
506 };
507 
508 static struct modlinkage modlinkage = {
509 	MODREV_1, &modldrv, NULL
510 };
511 
512 int
513 _init(void)
514 {
515 	int i;
516 
517 	mac_init_ops(&ops, "xnbu");
518 
519 	i = mod_install(&modlinkage);
520 	if (i != DDI_SUCCESS)
521 		mac_fini_ops(&ops);
522 
523 	return (i);
524 }
525 
526 int
527 _fini(void)
528 {
529 	int i;
530 
531 	i = mod_remove(&modlinkage);
532 	if (i == DDI_SUCCESS)
533 		mac_fini_ops(&ops);
534 
535 	return (i);
536 }
537 
538 int
539 _info(struct modinfo *modinfop)
540 {
541 	return (mod_info(&modlinkage, modinfop));
542 }
543