xref: /titanic_44/usr/src/uts/common/io/dld/dld_proto.c (revision 9df12a23948bd40cbe37ce88d84e272c3894e675)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * Data-Link Driver
30  */
31 
32 #include <sys/types.h>
33 #include <sys/debug.h>
34 #include <sys/sysmacros.h>
35 #include <sys/stream.h>
36 #include <sys/ddi.h>
37 #include <sys/sunddi.h>
38 #include <sys/strsun.h>
39 #include <sys/cpuvar.h>
40 #include <sys/dlpi.h>
41 #include <netinet/in.h>
42 #include <sys/sdt.h>
43 #include <sys/strsubr.h>
44 #include <sys/vlan.h>
45 #include <sys/mac.h>
46 #include <sys/dls.h>
47 #include <sys/dld.h>
48 #include <sys/dld_impl.h>
49 #include <sys/dls_soft_ring.h>
50 
51 typedef boolean_t proto_reqfunc_t(dld_str_t *, union DL_primitives *, mblk_t *);
52 
53 static proto_reqfunc_t proto_info_req, proto_attach_req, proto_detach_req,
54     proto_bind_req, proto_unbind_req, proto_promiscon_req, proto_promiscoff_req,
55     proto_enabmulti_req, proto_disabmulti_req, proto_physaddr_req,
56     proto_setphysaddr_req, proto_udqos_req, proto_req, proto_capability_req,
57     proto_notify_req, proto_unitdata_req, proto_passive_req;
58 
59 static void proto_poll_disable(dld_str_t *);
60 static boolean_t proto_poll_enable(dld_str_t *, dl_capab_dls_t *);
61 static boolean_t proto_capability_advertise(dld_str_t *, mblk_t *);
62 
63 static task_func_t proto_process_unbind_req, proto_process_detach_req;
64 
65 static void proto_soft_ring_disable(dld_str_t *);
66 static boolean_t proto_soft_ring_enable(dld_str_t *, dl_capab_dls_t *);
67 static boolean_t proto_capability_advertise(dld_str_t *, mblk_t *);
68 static void proto_change_soft_ring_fanout(dld_str_t *, int);
69 
70 #define	DL_ACK_PENDING(state) \
71 	((state) == DL_ATTACH_PENDING || \
72 	(state) == DL_DETACH_PENDING || \
73 	(state) == DL_BIND_PENDING || \
74 	(state) == DL_UNBIND_PENDING)
75 
76 /*
77  * Process a DLPI protocol message.
78  * The primitives DL_BIND_REQ, DL_ENABMULTI_REQ, DL_PROMISCON_REQ,
79  * DL_SET_PHYS_ADDR_REQ put the data link below our dld_str_t into an
80  * 'active' state. The primitive DL_PASSIVE_REQ marks our dld_str_t
81  * as 'passive' and forbids it from being subsequently made 'active'
82  * by the above primitives.
83  */
84 void
85 dld_proto(dld_str_t *dsp, mblk_t *mp)
86 {
87 	union DL_primitives	*udlp;
88 	t_uscalar_t		prim;
89 
90 	if (MBLKL(mp) < sizeof (t_uscalar_t)) {
91 		freemsg(mp);
92 		return;
93 	}
94 
95 	udlp = (union DL_primitives *)mp->b_rptr;
96 	prim = udlp->dl_primitive;
97 
98 	switch (prim) {
99 	case DL_INFO_REQ:
100 		(void) proto_info_req(dsp, udlp, mp);
101 		break;
102 	case DL_BIND_REQ:
103 		(void) proto_bind_req(dsp, udlp, mp);
104 		break;
105 	case DL_UNBIND_REQ:
106 		(void) proto_unbind_req(dsp, udlp, mp);
107 		break;
108 	case DL_UNITDATA_REQ:
109 		(void) proto_unitdata_req(dsp, udlp, mp);
110 		break;
111 	case DL_UDQOS_REQ:
112 		(void) proto_udqos_req(dsp, udlp, mp);
113 		break;
114 	case DL_ATTACH_REQ:
115 		(void) proto_attach_req(dsp, udlp, mp);
116 		break;
117 	case DL_DETACH_REQ:
118 		(void) proto_detach_req(dsp, udlp, mp);
119 		break;
120 	case DL_ENABMULTI_REQ:
121 		(void) proto_enabmulti_req(dsp, udlp, mp);
122 		break;
123 	case DL_DISABMULTI_REQ:
124 		(void) proto_disabmulti_req(dsp, udlp, mp);
125 		break;
126 	case DL_PROMISCON_REQ:
127 		(void) proto_promiscon_req(dsp, udlp, mp);
128 		break;
129 	case DL_PROMISCOFF_REQ:
130 		(void) proto_promiscoff_req(dsp, udlp, mp);
131 		break;
132 	case DL_PHYS_ADDR_REQ:
133 		(void) proto_physaddr_req(dsp, udlp, mp);
134 		break;
135 	case DL_SET_PHYS_ADDR_REQ:
136 		(void) proto_setphysaddr_req(dsp, udlp, mp);
137 		break;
138 	case DL_NOTIFY_REQ:
139 		(void) proto_notify_req(dsp, udlp, mp);
140 		break;
141 	case DL_CAPABILITY_REQ:
142 		(void) proto_capability_req(dsp, udlp, mp);
143 		break;
144 	case DL_PASSIVE_REQ:
145 		(void) proto_passive_req(dsp, udlp, mp);
146 		break;
147 	default:
148 		(void) proto_req(dsp, udlp, mp);
149 		break;
150 	}
151 }
152 
153 /*
154  * Finish any pending operations.
155  * Requests that need to be processed asynchronously will be handled
156  * by a separate thread. After this function returns, other threads
157  * will be allowed to enter dld; they will not be able to do anything
158  * until ds_dlstate transitions to a non-pending state.
159  */
160 void
161 dld_finish_pending_ops(dld_str_t *dsp)
162 {
163 	task_func_t *op = NULL;
164 
165 	ASSERT(MUTEX_HELD(&dsp->ds_thr_lock));
166 	ASSERT(dsp->ds_thr == 0);
167 
168 	op = dsp->ds_pending_op;
169 	dsp->ds_pending_op = NULL;
170 	mutex_exit(&dsp->ds_thr_lock);
171 	if (op != NULL)
172 		(void) taskq_dispatch(system_taskq, op, dsp, TQ_SLEEP);
173 }
174 
175 #define	NEG(x)	-(x)
176 
177 typedef struct dl_info_ack_wrapper {
178 	dl_info_ack_t		dl_info;
179 	uint8_t			dl_addr[MAXADDRLEN + sizeof (uint16_t)];
180 	uint8_t			dl_brdcst_addr[MAXADDRLEN];
181 	dl_qos_cl_range1_t	dl_qos_range1;
182 	dl_qos_cl_sel1_t	dl_qos_sel1;
183 } dl_info_ack_wrapper_t;
184 
185 /*
186  * DL_INFO_REQ
187  */
188 /*ARGSUSED*/
189 static boolean_t
190 proto_info_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
191 {
192 	dl_info_ack_wrapper_t	*dlwp;
193 	dl_info_ack_t		*dlp;
194 	dl_qos_cl_sel1_t	*selp;
195 	dl_qos_cl_range1_t	*rangep;
196 	uint8_t			*addr;
197 	uint8_t			*brdcst_addr;
198 	uint_t			addr_length;
199 	uint_t			sap_length;
200 	mac_info_t		minfo;
201 	mac_info_t		*minfop;
202 	queue_t			*q = dsp->ds_wq;
203 
204 	/*
205 	 * Swap the request message for one large enough to contain the
206 	 * wrapper structure defined above.
207 	 */
208 	if ((mp = mexchange(q, mp, sizeof (dl_info_ack_wrapper_t),
209 	    M_PCPROTO, 0)) == NULL)
210 		return (B_FALSE);
211 
212 	rw_enter(&dsp->ds_lock, RW_READER);
213 
214 	bzero(mp->b_rptr, sizeof (dl_info_ack_wrapper_t));
215 	dlwp = (dl_info_ack_wrapper_t *)mp->b_rptr;
216 
217 	dlp = &(dlwp->dl_info);
218 	ASSERT(dlp == (dl_info_ack_t *)mp->b_rptr);
219 
220 	dlp->dl_primitive = DL_INFO_ACK;
221 
222 	/*
223 	 * Set up the sub-structure pointers.
224 	 */
225 	addr = dlwp->dl_addr;
226 	brdcst_addr = dlwp->dl_brdcst_addr;
227 	rangep = &(dlwp->dl_qos_range1);
228 	selp = &(dlwp->dl_qos_sel1);
229 
230 	/*
231 	 * This driver supports only version 2 connectionless DLPI provider
232 	 * nodes.
233 	 */
234 	dlp->dl_service_mode = DL_CLDLS;
235 	dlp->dl_version = DL_VERSION_2;
236 
237 	/*
238 	 * Set the style of the provider
239 	 */
240 	dlp->dl_provider_style = dsp->ds_style;
241 	ASSERT(dlp->dl_provider_style == DL_STYLE1 ||
242 	    dlp->dl_provider_style == DL_STYLE2);
243 
244 	/*
245 	 * Set the current DLPI state.
246 	 */
247 	dlp->dl_current_state = dsp->ds_dlstate;
248 
249 	/*
250 	 * Gratuitously set the media type. This is to deal with modules
251 	 * that assume the media type is known prior to DL_ATTACH_REQ
252 	 * being completed.
253 	 */
254 	dlp->dl_mac_type = DL_ETHER;
255 
256 	/*
257 	 * If the stream is not at least attached we try to retrieve the
258 	 * mac_info using mac_info_get()
259 	 */
260 	if (dsp->ds_dlstate == DL_UNATTACHED ||
261 	    dsp->ds_dlstate == DL_ATTACH_PENDING ||
262 	    dsp->ds_dlstate == DL_DETACH_PENDING) {
263 		if (!mac_info_get(ddi_major_to_name(dsp->ds_major), &minfo)) {
264 			/*
265 			 * Cannot find mac_info. giving up.
266 			 */
267 			goto done;
268 		}
269 		minfop = &minfo;
270 	} else {
271 		minfop = (mac_info_t *)dsp->ds_mip;
272 	}
273 
274 	/*
275 	 * Set the media type (properly this time).
276 	 */
277 	dlp->dl_mac_type = minfop->mi_media;
278 
279 	/*
280 	 * Set the DLSAP length. We only support 16 bit values and they
281 	 * appear after the MAC address portion of DLSAP addresses.
282 	 */
283 	sap_length = sizeof (uint16_t);
284 	dlp->dl_sap_length = NEG(sap_length);
285 
286 	/*
287 	 * Set the minimum and maximum payload sizes.
288 	 */
289 	dlp->dl_min_sdu = minfop->mi_sdu_min;
290 	dlp->dl_max_sdu = minfop->mi_sdu_max;
291 
292 	addr_length = minfop->mi_addr_length;
293 	ASSERT(addr_length != 0);
294 
295 	/*
296 	 * Copy in the media broadcast address.
297 	 */
298 	dlp->dl_brdcst_addr_offset = (uintptr_t)brdcst_addr - (uintptr_t)dlp;
299 	bcopy(minfop->mi_brdcst_addr, brdcst_addr, addr_length);
300 	dlp->dl_brdcst_addr_length = addr_length;
301 
302 	/*
303 	 * We only support QoS information for VLAN interfaces.
304 	 */
305 	if (dsp->ds_vid != VLAN_ID_NONE) {
306 		dlp->dl_qos_range_offset = (uintptr_t)rangep - (uintptr_t)dlp;
307 		dlp->dl_qos_range_length = sizeof (dl_qos_cl_range1_t);
308 
309 		rangep->dl_qos_type = DL_QOS_CL_RANGE1;
310 		rangep->dl_trans_delay.dl_target_value = DL_UNKNOWN;
311 		rangep->dl_trans_delay.dl_accept_value = DL_UNKNOWN;
312 		rangep->dl_protection.dl_min = DL_UNKNOWN;
313 		rangep->dl_protection.dl_max = DL_UNKNOWN;
314 		rangep->dl_residual_error = DL_UNKNOWN;
315 
316 		/*
317 		 * Specify the supported range of priorities.
318 		 */
319 		rangep->dl_priority.dl_min = 0;
320 		rangep->dl_priority.dl_max = (1 << VLAN_PRI_SIZE) - 1;
321 
322 		dlp->dl_qos_offset = (uintptr_t)selp - (uintptr_t)dlp;
323 		dlp->dl_qos_length = sizeof (dl_qos_cl_sel1_t);
324 
325 		selp->dl_qos_type = DL_QOS_CL_SEL1;
326 		selp->dl_trans_delay = DL_UNKNOWN;
327 		selp->dl_protection = DL_UNKNOWN;
328 		selp->dl_residual_error = DL_UNKNOWN;
329 
330 		/*
331 		 * Specify the current priority (which can be changed by
332 		 * the DL_UDQOS_REQ primitive).
333 		 */
334 		selp->dl_priority = dsp->ds_pri;
335 	} else {
336 		/*
337 		 * Shorten the buffer to lose the unused QoS information
338 		 * structures.
339 		 */
340 		mp->b_wptr = (uint8_t *)rangep;
341 	}
342 
343 	dlp->dl_addr_length = addr_length + sizeof (uint16_t);
344 	if (dsp->ds_dlstate == DL_IDLE) {
345 		/*
346 		 * The stream is bound. Therefore we can formulate a valid
347 		 * DLSAP address.
348 		 */
349 		dlp->dl_addr_offset = (uintptr_t)addr - (uintptr_t)dlp;
350 		bcopy(dsp->ds_curr_addr, addr, addr_length);
351 		*(uint16_t *)(addr + addr_length) = dsp->ds_sap;
352 	}
353 
354 done:
355 	ASSERT(IMPLY(dlp->dl_qos_offset != 0, dlp->dl_qos_length != 0));
356 	ASSERT(IMPLY(dlp->dl_qos_range_offset != 0,
357 	    dlp->dl_qos_range_length != 0));
358 	ASSERT(IMPLY(dlp->dl_addr_offset != 0, dlp->dl_addr_length != 0));
359 	ASSERT(IMPLY(dlp->dl_brdcst_addr_offset != 0,
360 	    dlp->dl_brdcst_addr_length != 0));
361 
362 	rw_exit(&dsp->ds_lock);
363 
364 	qreply(q, mp);
365 	return (B_TRUE);
366 }
367 
368 /*
369  * DL_ATTACH_REQ
370  */
371 static boolean_t
372 proto_attach_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
373 {
374 	dl_attach_req_t	*dlp = (dl_attach_req_t *)udlp;
375 	int		err = 0;
376 	t_uscalar_t	dl_err;
377 	queue_t		*q = dsp->ds_wq;
378 
379 	rw_enter(&dsp->ds_lock, RW_WRITER);
380 
381 	if (MBLKL(mp) < sizeof (dl_attach_req_t) ||
382 	    dlp->dl_ppa < 0 || dsp->ds_style == DL_STYLE1) {
383 		dl_err = DL_BADPRIM;
384 		goto failed;
385 	}
386 
387 	if (dsp->ds_dlstate != DL_UNATTACHED) {
388 		dl_err = DL_OUTSTATE;
389 		goto failed;
390 	}
391 
392 	dsp->ds_dlstate = DL_ATTACH_PENDING;
393 
394 	err = dld_str_attach(dsp, dlp->dl_ppa);
395 	if (err != 0) {
396 		switch (err) {
397 		case ENOENT:
398 			dl_err = DL_BADPPA;
399 			err = 0;
400 			break;
401 		default:
402 			dl_err = DL_SYSERR;
403 			break;
404 		}
405 		dsp->ds_dlstate = DL_UNATTACHED;
406 		goto failed;
407 	}
408 	ASSERT(dsp->ds_dlstate == DL_UNBOUND);
409 	rw_exit(&dsp->ds_lock);
410 
411 	dlokack(q, mp, DL_ATTACH_REQ);
412 	return (B_TRUE);
413 failed:
414 	rw_exit(&dsp->ds_lock);
415 	dlerrorack(q, mp, DL_ATTACH_REQ, dl_err, (t_uscalar_t)err);
416 	return (B_FALSE);
417 }
418 
419 /*
420  * DL_DETACH_REQ
421  */
422 static void
423 proto_process_detach_req(void *arg)
424 {
425 	dld_str_t	*dsp = arg;
426 	mblk_t		*mp;
427 
428 	/*
429 	 * We don't need to hold locks because no other thread
430 	 * would manipulate dsp while it is in a PENDING state.
431 	 */
432 	ASSERT(dsp->ds_pending_req != NULL);
433 	ASSERT(dsp->ds_dlstate == DL_DETACH_PENDING);
434 
435 	mp = dsp->ds_pending_req;
436 	dsp->ds_pending_req = NULL;
437 	dld_str_detach(dsp);
438 	dlokack(dsp->ds_wq, mp, DL_DETACH_REQ);
439 
440 	DLD_WAKEUP(dsp);
441 }
442 
443 /*ARGSUSED*/
444 static boolean_t
445 proto_detach_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
446 {
447 	queue_t		*q = dsp->ds_wq;
448 	t_uscalar_t	dl_err;
449 
450 	rw_enter(&dsp->ds_lock, RW_WRITER);
451 
452 	if (MBLKL(mp) < sizeof (dl_detach_req_t)) {
453 		dl_err = DL_BADPRIM;
454 		goto failed;
455 	}
456 
457 	if (dsp->ds_dlstate != DL_UNBOUND) {
458 		dl_err = DL_OUTSTATE;
459 		goto failed;
460 	}
461 
462 	if (dsp->ds_style == DL_STYLE1) {
463 		dl_err = DL_BADPRIM;
464 		goto failed;
465 	}
466 
467 	dsp->ds_dlstate = DL_DETACH_PENDING;
468 
469 	/*
470 	 * Complete the detach when the driver is single-threaded.
471 	 */
472 	mutex_enter(&dsp->ds_thr_lock);
473 	ASSERT(dsp->ds_pending_req == NULL);
474 	dsp->ds_pending_req = mp;
475 	dsp->ds_pending_op = proto_process_detach_req;
476 	dsp->ds_pending_cnt++;
477 	mutex_exit(&dsp->ds_thr_lock);
478 	rw_exit(&dsp->ds_lock);
479 
480 	return (B_TRUE);
481 failed:
482 	rw_exit(&dsp->ds_lock);
483 	dlerrorack(q, mp, DL_DETACH_REQ, dl_err, 0);
484 	return (B_FALSE);
485 }
486 
487 /*
488  * DL_BIND_REQ
489  */
490 static boolean_t
491 proto_bind_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
492 {
493 	dl_bind_req_t	*dlp = (dl_bind_req_t *)udlp;
494 	int		err = 0;
495 	uint8_t		addr[MAXADDRLEN];
496 	uint_t		addr_length;
497 	t_uscalar_t	dl_err;
498 	t_scalar_t	sap;
499 	queue_t		*q = dsp->ds_wq;
500 
501 	rw_enter(&dsp->ds_lock, RW_WRITER);
502 
503 	if (MBLKL(mp) < sizeof (dl_bind_req_t)) {
504 		dl_err = DL_BADPRIM;
505 		goto failed;
506 	}
507 
508 	if (dlp->dl_xidtest_flg != 0) {
509 		dl_err = DL_NOAUTO;
510 		goto failed;
511 	}
512 
513 	if (dlp->dl_service_mode != DL_CLDLS) {
514 		dl_err = DL_UNSUPPORTED;
515 		goto failed;
516 	}
517 
518 	if (dsp->ds_dlstate != DL_UNBOUND) {
519 		dl_err = DL_OUTSTATE;
520 		goto failed;
521 	}
522 
523 	if (dsp->ds_passivestate == DLD_UNINITIALIZED &&
524 	    !dls_active_set(dsp->ds_dc)) {
525 		dl_err = DL_SYSERR;
526 		err = EBUSY;
527 		goto failed;
528 	}
529 
530 	dsp->ds_dlstate = DL_BIND_PENDING;
531 	/*
532 	 * Set the receive callback.
533 	 */
534 	dls_rx_set(dsp->ds_dc, (dsp->ds_mode == DLD_RAW) ?
535 	    dld_str_rx_raw : dld_str_rx_unitdata, dsp);
536 
537 	/*
538 	 * Bind the channel such that it can receive packets.
539 	 */
540 	sap = dsp->ds_sap = dlp->dl_sap;
541 	err = dls_bind(dsp->ds_dc, dlp->dl_sap);
542 	if (err != 0) {
543 		switch (err) {
544 		case EINVAL:
545 			dl_err = DL_BADADDR;
546 			err = 0;
547 			break;
548 		default:
549 			dl_err = DL_SYSERR;
550 			break;
551 		}
552 		dsp->ds_dlstate = DL_UNBOUND;
553 		if (dsp->ds_passivestate == DLD_UNINITIALIZED)
554 			dls_active_clear(dsp->ds_dc);
555 
556 		goto failed;
557 	}
558 
559 	/*
560 	 * Copy in MAC address.
561 	 */
562 	addr_length = dsp->ds_mip->mi_addr_length;
563 	bcopy(dsp->ds_curr_addr, addr, addr_length);
564 
565 	/*
566 	 * Copy in the DLSAP.
567 	 */
568 	*(uint16_t *)(addr + addr_length) = dsp->ds_sap;
569 	addr_length += sizeof (uint16_t);
570 
571 	dsp->ds_dlstate = DL_IDLE;
572 	if (dsp->ds_passivestate == DLD_UNINITIALIZED)
573 		dsp->ds_passivestate = DLD_ACTIVE;
574 
575 	rw_exit(&dsp->ds_lock);
576 
577 	dlbindack(q, mp, sap, (void *)addr, addr_length, 0, 0);
578 	return (B_TRUE);
579 failed:
580 	rw_exit(&dsp->ds_lock);
581 	dlerrorack(q, mp, DL_BIND_REQ, dl_err, (t_uscalar_t)err);
582 	return (B_FALSE);
583 }
584 
585 /*
586  * DL_UNBIND_REQ
587  */
588 /*ARGSUSED*/
589 static void
590 proto_process_unbind_req(void *arg)
591 {
592 	dld_str_t	*dsp = arg;
593 	mblk_t		*mp;
594 
595 	/*
596 	 * We don't need to hold locks because no other thread
597 	 * would manipulate dsp while it is in a PENDING state.
598 	 */
599 	ASSERT(dsp->ds_pending_req != NULL);
600 	ASSERT(dsp->ds_dlstate == DL_UNBIND_PENDING);
601 
602 	/*
603 	 * Flush any remaining packets scheduled for transmission.
604 	 */
605 	dld_tx_flush(dsp);
606 
607 	/*
608 	 * Unbind the channel to stop packets being received.
609 	 */
610 	dls_unbind(dsp->ds_dc);
611 
612 	/*
613 	 * Disable polling mode, if it is enabled.
614 	 */
615 	proto_poll_disable(dsp);
616 
617 	/*
618 	 * Clear the receive callback.
619 	 */
620 	dls_rx_set(dsp->ds_dc, NULL, NULL);
621 
622 	/*
623 	 * Set the mode back to the default (unitdata).
624 	 */
625 	dsp->ds_mode = DLD_UNITDATA;
626 
627 	/*
628 	 * If soft rings were enabled, the workers
629 	 * should be quiesced. We cannot check for
630 	 * ds_soft_ring flag because
631 	 * proto_soft_ring_disable() called from
632 	 * proto_capability_req() would have reset it.
633 	 */
634 	if (dls_soft_ring_workers(dsp->ds_dc))
635 		dls_soft_ring_disable(dsp->ds_dc);
636 
637 	mp = dsp->ds_pending_req;
638 	dsp->ds_pending_req = NULL;
639 	dsp->ds_dlstate = DL_UNBOUND;
640 	dlokack(dsp->ds_wq, mp, DL_UNBIND_REQ);
641 
642 	DLD_WAKEUP(dsp);
643 }
644 
645 /*ARGSUSED*/
646 static boolean_t
647 proto_unbind_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
648 {
649 	queue_t		*q = dsp->ds_wq;
650 	t_uscalar_t	dl_err;
651 
652 	rw_enter(&dsp->ds_lock, RW_WRITER);
653 
654 	if (MBLKL(mp) < sizeof (dl_unbind_req_t)) {
655 		dl_err = DL_BADPRIM;
656 		goto failed;
657 	}
658 
659 	if (dsp->ds_dlstate != DL_IDLE) {
660 		dl_err = DL_OUTSTATE;
661 		goto failed;
662 	}
663 
664 	dsp->ds_dlstate = DL_UNBIND_PENDING;
665 
666 	mutex_enter(&dsp->ds_thr_lock);
667 	ASSERT(dsp->ds_pending_req == NULL);
668 	dsp->ds_pending_req = mp;
669 	dsp->ds_pending_op = proto_process_unbind_req;
670 	dsp->ds_pending_cnt++;
671 	mutex_exit(&dsp->ds_thr_lock);
672 	rw_exit(&dsp->ds_lock);
673 
674 	return (B_TRUE);
675 failed:
676 	rw_exit(&dsp->ds_lock);
677 	dlerrorack(q, mp, DL_UNBIND_REQ, dl_err, 0);
678 	return (B_FALSE);
679 }
680 
681 /*
682  * DL_PROMISCON_REQ
683  */
684 static boolean_t
685 proto_promiscon_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
686 {
687 	dl_promiscon_req_t *dlp = (dl_promiscon_req_t *)udlp;
688 	int		err = 0;
689 	t_uscalar_t	dl_err;
690 	uint32_t	promisc_saved;
691 	queue_t		*q = dsp->ds_wq;
692 
693 	rw_enter(&dsp->ds_lock, RW_WRITER);
694 
695 	if (MBLKL(mp) < sizeof (dl_promiscon_req_t)) {
696 		dl_err = DL_BADPRIM;
697 		goto failed;
698 	}
699 
700 	if (dsp->ds_dlstate == DL_UNATTACHED ||
701 	    DL_ACK_PENDING(dsp->ds_dlstate)) {
702 		dl_err = DL_OUTSTATE;
703 		goto failed;
704 	}
705 
706 	promisc_saved = dsp->ds_promisc;
707 	switch (dlp->dl_level) {
708 	case DL_PROMISC_SAP:
709 		dsp->ds_promisc |= DLS_PROMISC_SAP;
710 		break;
711 
712 	case DL_PROMISC_MULTI:
713 		dsp->ds_promisc |= DLS_PROMISC_MULTI;
714 		break;
715 
716 	case DL_PROMISC_PHYS:
717 		dsp->ds_promisc |= DLS_PROMISC_PHYS;
718 		break;
719 
720 	default:
721 		dl_err = DL_NOTSUPPORTED;
722 		goto failed;
723 	}
724 
725 	if (dsp->ds_passivestate == DLD_UNINITIALIZED &&
726 	    !dls_active_set(dsp->ds_dc)) {
727 		dsp->ds_promisc = promisc_saved;
728 		dl_err = DL_SYSERR;
729 		err = EBUSY;
730 		goto failed;
731 	}
732 
733 	/*
734 	 * Adjust channel promiscuity.
735 	 */
736 	err = dls_promisc(dsp->ds_dc, dsp->ds_promisc);
737 	if (err != 0) {
738 		dl_err = DL_SYSERR;
739 		dsp->ds_promisc = promisc_saved;
740 		if (dsp->ds_passivestate == DLD_UNINITIALIZED)
741 			dls_active_clear(dsp->ds_dc);
742 
743 		goto failed;
744 	}
745 
746 	if (dsp->ds_passivestate == DLD_UNINITIALIZED)
747 		dsp->ds_passivestate = DLD_ACTIVE;
748 
749 	rw_exit(&dsp->ds_lock);
750 	dlokack(q, mp, DL_PROMISCON_REQ);
751 	return (B_TRUE);
752 failed:
753 	rw_exit(&dsp->ds_lock);
754 	dlerrorack(q, mp, DL_PROMISCON_REQ, dl_err, (t_uscalar_t)err);
755 	return (B_FALSE);
756 }
757 
758 /*
759  * DL_PROMISCOFF_REQ
760  */
761 static boolean_t
762 proto_promiscoff_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
763 {
764 	dl_promiscoff_req_t *dlp = (dl_promiscoff_req_t *)udlp;
765 	int		err = 0;
766 	t_uscalar_t	dl_err;
767 	uint32_t	promisc_saved;
768 	queue_t		*q = dsp->ds_wq;
769 
770 	rw_enter(&dsp->ds_lock, RW_WRITER);
771 
772 	if (MBLKL(mp) < sizeof (dl_promiscoff_req_t)) {
773 		dl_err = DL_BADPRIM;
774 		goto failed;
775 	}
776 
777 	if (dsp->ds_dlstate == DL_UNATTACHED ||
778 	    DL_ACK_PENDING(dsp->ds_dlstate)) {
779 		dl_err = DL_OUTSTATE;
780 		goto failed;
781 	}
782 
783 	promisc_saved = dsp->ds_promisc;
784 	switch (dlp->dl_level) {
785 	case DL_PROMISC_SAP:
786 		if (!(dsp->ds_promisc & DLS_PROMISC_SAP)) {
787 			dl_err = DL_NOTENAB;
788 			goto failed;
789 		}
790 		dsp->ds_promisc &= ~DLS_PROMISC_SAP;
791 		break;
792 
793 	case DL_PROMISC_MULTI:
794 		if (!(dsp->ds_promisc & DLS_PROMISC_MULTI)) {
795 			dl_err = DL_NOTENAB;
796 			goto failed;
797 		}
798 		dsp->ds_promisc &= ~DLS_PROMISC_MULTI;
799 		break;
800 
801 	case DL_PROMISC_PHYS:
802 		if (!(dsp->ds_promisc & DLS_PROMISC_PHYS)) {
803 			dl_err = DL_NOTENAB;
804 			goto failed;
805 		}
806 		dsp->ds_promisc &= ~DLS_PROMISC_PHYS;
807 		break;
808 
809 	default:
810 		dl_err = DL_NOTSUPPORTED;
811 		goto failed;
812 	}
813 
814 	/*
815 	 * Adjust channel promiscuity.
816 	 */
817 	err = dls_promisc(dsp->ds_dc, dsp->ds_promisc);
818 	if (err != 0) {
819 		dsp->ds_promisc = promisc_saved;
820 		dl_err = DL_SYSERR;
821 		goto failed;
822 	}
823 
824 	rw_exit(&dsp->ds_lock);
825 	dlokack(q, mp, DL_PROMISCOFF_REQ);
826 	return (B_TRUE);
827 failed:
828 	rw_exit(&dsp->ds_lock);
829 	dlerrorack(q, mp, DL_PROMISCOFF_REQ, dl_err, (t_uscalar_t)err);
830 	return (B_FALSE);
831 }
832 
833 /*
834  * DL_ENABMULTI_REQ
835  */
836 static boolean_t
837 proto_enabmulti_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
838 {
839 	dl_enabmulti_req_t *dlp = (dl_enabmulti_req_t *)udlp;
840 	int		err = 0;
841 	t_uscalar_t	dl_err;
842 	queue_t		*q = dsp->ds_wq;
843 
844 	rw_enter(&dsp->ds_lock, RW_WRITER);
845 
846 	if (dsp->ds_dlstate == DL_UNATTACHED ||
847 	    DL_ACK_PENDING(dsp->ds_dlstate)) {
848 		dl_err = DL_OUTSTATE;
849 		goto failed;
850 	}
851 
852 	if (MBLKL(mp) < sizeof (dl_enabmulti_req_t) ||
853 	    !MBLKIN(mp, dlp->dl_addr_offset, dlp->dl_addr_length) ||
854 	    dlp->dl_addr_length != dsp->ds_mip->mi_addr_length) {
855 		dl_err = DL_BADPRIM;
856 		goto failed;
857 	}
858 
859 	if (dsp->ds_passivestate == DLD_UNINITIALIZED &&
860 	    !dls_active_set(dsp->ds_dc)) {
861 		dl_err = DL_SYSERR;
862 		err = EBUSY;
863 		goto failed;
864 	}
865 
866 	err = dls_multicst_add(dsp->ds_dc, mp->b_rptr + dlp->dl_addr_offset);
867 	if (err != 0) {
868 		switch (err) {
869 		case EINVAL:
870 			dl_err = DL_BADADDR;
871 			err = 0;
872 			break;
873 		case ENOSPC:
874 			dl_err = DL_TOOMANY;
875 			err = 0;
876 			break;
877 		default:
878 			dl_err = DL_SYSERR;
879 			break;
880 		}
881 		if (dsp->ds_passivestate == DLD_UNINITIALIZED)
882 			dls_active_clear(dsp->ds_dc);
883 
884 		goto failed;
885 	}
886 
887 	if (dsp->ds_passivestate == DLD_UNINITIALIZED)
888 		dsp->ds_passivestate = DLD_ACTIVE;
889 
890 	rw_exit(&dsp->ds_lock);
891 	dlokack(q, mp, DL_ENABMULTI_REQ);
892 	return (B_TRUE);
893 failed:
894 	rw_exit(&dsp->ds_lock);
895 	dlerrorack(q, mp, DL_ENABMULTI_REQ, dl_err, (t_uscalar_t)err);
896 	return (B_FALSE);
897 }
898 
899 /*
900  * DL_DISABMULTI_REQ
901  */
902 static boolean_t
903 proto_disabmulti_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
904 {
905 	dl_disabmulti_req_t *dlp = (dl_disabmulti_req_t *)udlp;
906 	int		err = 0;
907 	t_uscalar_t	dl_err;
908 	queue_t		*q = dsp->ds_wq;
909 
910 	rw_enter(&dsp->ds_lock, RW_READER);
911 
912 	if (dsp->ds_dlstate == DL_UNATTACHED ||
913 	    DL_ACK_PENDING(dsp->ds_dlstate)) {
914 		dl_err = DL_OUTSTATE;
915 		goto failed;
916 	}
917 
918 	if (MBLKL(mp) < sizeof (dl_disabmulti_req_t) ||
919 	    !MBLKIN(mp, dlp->dl_addr_offset, dlp->dl_addr_length) ||
920 	    dlp->dl_addr_length != dsp->ds_mip->mi_addr_length) {
921 		dl_err = DL_BADPRIM;
922 		goto failed;
923 	}
924 
925 	err = dls_multicst_remove(dsp->ds_dc, mp->b_rptr + dlp->dl_addr_offset);
926 	if (err != 0) {
927 	switch (err) {
928 		case EINVAL:
929 			dl_err = DL_BADADDR;
930 			err = 0;
931 			break;
932 
933 		case ENOENT:
934 			dl_err = DL_NOTENAB;
935 			err = 0;
936 			break;
937 
938 		default:
939 			dl_err = DL_SYSERR;
940 			break;
941 		}
942 		goto failed;
943 	}
944 
945 	rw_exit(&dsp->ds_lock);
946 	dlokack(q, mp, DL_DISABMULTI_REQ);
947 	return (B_TRUE);
948 failed:
949 	rw_exit(&dsp->ds_lock);
950 	dlerrorack(q, mp, DL_DISABMULTI_REQ, dl_err, (t_uscalar_t)err);
951 	return (B_FALSE);
952 }
953 
954 /*
955  * DL_PHYS_ADDR_REQ
956  */
957 static boolean_t
958 proto_physaddr_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
959 {
960 	dl_phys_addr_req_t *dlp = (dl_phys_addr_req_t *)udlp;
961 	queue_t		*q = dsp->ds_wq;
962 	t_uscalar_t	dl_err;
963 	char		*addr;
964 	uint_t		addr_length;
965 
966 	rw_enter(&dsp->ds_lock, RW_READER);
967 
968 	if (MBLKL(mp) < sizeof (dl_phys_addr_req_t)) {
969 		dl_err = DL_BADPRIM;
970 		goto failed;
971 	}
972 
973 	if (dsp->ds_dlstate == DL_UNATTACHED ||
974 	    DL_ACK_PENDING(dsp->ds_dlstate)) {
975 		dl_err = DL_OUTSTATE;
976 		goto failed;
977 	}
978 
979 	if (dlp->dl_addr_type != DL_CURR_PHYS_ADDR &&
980 	    dlp->dl_addr_type != DL_FACT_PHYS_ADDR) {
981 		dl_err = DL_UNSUPPORTED;
982 		goto failed;
983 	}
984 
985 	addr_length = dsp->ds_mip->mi_addr_length;
986 	addr = kmem_alloc(addr_length, KM_NOSLEEP);
987 	if (addr == NULL) {
988 		rw_exit(&dsp->ds_lock);
989 		merror(q, mp, ENOSR);
990 		return (B_FALSE);
991 	}
992 
993 	/*
994 	 * Copy out the address before we drop the lock; we don't
995 	 * want to call dlphysaddrack() while holding ds_lock.
996 	 */
997 	bcopy((dlp->dl_addr_type == DL_CURR_PHYS_ADDR) ?
998 	    dsp->ds_curr_addr : dsp->ds_fact_addr, addr, addr_length);
999 
1000 	rw_exit(&dsp->ds_lock);
1001 	dlphysaddrack(q, mp, addr, (t_uscalar_t)addr_length);
1002 	kmem_free(addr, addr_length);
1003 	return (B_TRUE);
1004 failed:
1005 	rw_exit(&dsp->ds_lock);
1006 	dlerrorack(q, mp, DL_PHYS_ADDR_REQ, dl_err, 0);
1007 	return (B_FALSE);
1008 }
1009 
1010 /*
1011  * DL_SET_PHYS_ADDR_REQ
1012  */
1013 static boolean_t
1014 proto_setphysaddr_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
1015 {
1016 	dl_set_phys_addr_req_t *dlp = (dl_set_phys_addr_req_t *)udlp;
1017 	int		err = 0;
1018 	t_uscalar_t	dl_err;
1019 	queue_t		*q = dsp->ds_wq;
1020 
1021 	rw_enter(&dsp->ds_lock, RW_WRITER);
1022 
1023 	if (dsp->ds_dlstate == DL_UNATTACHED ||
1024 	    DL_ACK_PENDING(dsp->ds_dlstate)) {
1025 		dl_err = DL_OUTSTATE;
1026 		goto failed;
1027 	}
1028 
1029 	if (MBLKL(mp) < sizeof (dl_set_phys_addr_req_t) ||
1030 	    !MBLKIN(mp, dlp->dl_addr_offset, dlp->dl_addr_length) ||
1031 	    dlp->dl_addr_length != dsp->ds_mip->mi_addr_length) {
1032 		dl_err = DL_BADPRIM;
1033 		goto failed;
1034 	}
1035 
1036 	if (dsp->ds_passivestate == DLD_UNINITIALIZED &&
1037 	    !dls_active_set(dsp->ds_dc)) {
1038 		dl_err = DL_SYSERR;
1039 		err = EBUSY;
1040 		goto failed;
1041 	}
1042 
1043 	err = mac_unicst_set(dsp->ds_mh, mp->b_rptr + dlp->dl_addr_offset);
1044 	if (err != 0) {
1045 		switch (err) {
1046 		case EINVAL:
1047 			dl_err = DL_BADADDR;
1048 			err = 0;
1049 			break;
1050 
1051 		default:
1052 			dl_err = DL_SYSERR;
1053 			break;
1054 		}
1055 		if (dsp->ds_passivestate == DLD_UNINITIALIZED)
1056 			dls_active_clear(dsp->ds_dc);
1057 
1058 		goto failed;
1059 	}
1060 	if (dsp->ds_passivestate == DLD_UNINITIALIZED)
1061 		dsp->ds_passivestate = DLD_ACTIVE;
1062 
1063 	rw_exit(&dsp->ds_lock);
1064 	dlokack(q, mp, DL_SET_PHYS_ADDR_REQ);
1065 	return (B_TRUE);
1066 failed:
1067 	rw_exit(&dsp->ds_lock);
1068 	dlerrorack(q, mp, DL_SET_PHYS_ADDR_REQ, dl_err, (t_uscalar_t)err);
1069 	return (B_FALSE);
1070 }
1071 
1072 /*
1073  * DL_UDQOS_REQ
1074  */
1075 static boolean_t
1076 proto_udqos_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
1077 {
1078 	dl_udqos_req_t *dlp = (dl_udqos_req_t *)udlp;
1079 	dl_qos_cl_sel1_t *selp;
1080 	int		off, len;
1081 	t_uscalar_t	dl_err;
1082 	queue_t		*q = dsp->ds_wq;
1083 
1084 	off = dlp->dl_qos_offset;
1085 	len = dlp->dl_qos_length;
1086 
1087 	rw_enter(&dsp->ds_lock, RW_WRITER);
1088 
1089 	if (MBLKL(mp) < sizeof (dl_udqos_req_t) || !MBLKIN(mp, off, len)) {
1090 		dl_err = DL_BADPRIM;
1091 		goto failed;
1092 	}
1093 
1094 	selp = (dl_qos_cl_sel1_t *)(mp->b_rptr + off);
1095 	if (selp->dl_qos_type != DL_QOS_CL_SEL1) {
1096 		dl_err = DL_BADQOSTYPE;
1097 		goto failed;
1098 	}
1099 
1100 	if (dsp->ds_vid == VLAN_ID_NONE ||
1101 	    selp->dl_priority > (1 << VLAN_PRI_SIZE) - 1 ||
1102 	    selp->dl_priority < 0) {
1103 		dl_err = DL_BADQOSPARAM;
1104 		goto failed;
1105 	}
1106 
1107 	dsp->ds_pri = selp->dl_priority;
1108 
1109 	rw_exit(&dsp->ds_lock);
1110 	dlokack(q, mp, DL_UDQOS_REQ);
1111 	return (B_TRUE);
1112 failed:
1113 	rw_exit(&dsp->ds_lock);
1114 	dlerrorack(q, mp, DL_UDQOS_REQ, dl_err, 0);
1115 	return (B_FALSE);
1116 }
1117 
1118 static boolean_t
1119 check_ip_above(queue_t *q)
1120 {
1121 	queue_t		*next_q;
1122 	boolean_t	ret = B_TRUE;
1123 
1124 	claimstr(q);
1125 	next_q = q->q_next;
1126 	if (strcmp(next_q->q_qinfo->qi_minfo->mi_idname, "ip") != 0)
1127 		ret = B_FALSE;
1128 	releasestr(q);
1129 	return (ret);
1130 }
1131 
1132 /*
1133  * DL_CAPABILITY_REQ
1134  */
1135 /*ARGSUSED*/
1136 static boolean_t
1137 proto_capability_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
1138 {
1139 	dl_capability_req_t *dlp = (dl_capability_req_t *)udlp;
1140 	dl_capability_sub_t *sp;
1141 	size_t		size, len;
1142 	offset_t	off, end;
1143 	t_uscalar_t	dl_err;
1144 	queue_t		*q = dsp->ds_wq;
1145 	boolean_t	upgraded;
1146 
1147 	rw_enter(&dsp->ds_lock, RW_READER);
1148 
1149 	if (MBLKL(mp) < sizeof (dl_capability_req_t)) {
1150 		dl_err = DL_BADPRIM;
1151 		goto failed;
1152 	}
1153 
1154 	if (dsp->ds_dlstate == DL_UNATTACHED ||
1155 	    DL_ACK_PENDING(dsp->ds_dlstate)) {
1156 		dl_err = DL_OUTSTATE;
1157 		goto failed;
1158 	}
1159 
1160 	/*
1161 	 * This request is overloaded. If there are no requested capabilities
1162 	 * then we just want to acknowledge with all the capabilities we
1163 	 * support. Otherwise we enable the set of capabilities requested.
1164 	 */
1165 	if (dlp->dl_sub_length == 0) {
1166 		/* callee drops lock */
1167 		return (proto_capability_advertise(dsp, mp));
1168 	}
1169 
1170 	if (!MBLKIN(mp, dlp->dl_sub_offset, dlp->dl_sub_length)) {
1171 		dl_err = DL_BADPRIM;
1172 		goto failed;
1173 	}
1174 
1175 	dlp->dl_primitive = DL_CAPABILITY_ACK;
1176 
1177 	off = dlp->dl_sub_offset;
1178 	len = dlp->dl_sub_length;
1179 
1180 	/*
1181 	 * Walk the list of capabilities to be enabled.
1182 	 */
1183 	upgraded = B_FALSE;
1184 	for (end = off + len; off < end; ) {
1185 		sp = (dl_capability_sub_t *)(mp->b_rptr + off);
1186 		size = sizeof (dl_capability_sub_t) + sp->dl_length;
1187 
1188 		if (off + size > end ||
1189 		    !IS_P2ALIGNED(off, sizeof (uint32_t))) {
1190 			dl_err = DL_BADPRIM;
1191 			goto failed;
1192 		}
1193 
1194 		switch (sp->dl_cap) {
1195 		/*
1196 		 * TCP/IP checksum offload to hardware.
1197 		 */
1198 		case DL_CAPAB_HCKSUM: {
1199 			dl_capab_hcksum_t *hcksump;
1200 			dl_capab_hcksum_t hcksum;
1201 
1202 			ASSERT(dsp->ds_mip->mi_cksum != 0);
1203 
1204 			hcksump = (dl_capab_hcksum_t *)&sp[1];
1205 			/*
1206 			 * Copy for alignment.
1207 			 */
1208 			bcopy(hcksump, &hcksum, sizeof (dl_capab_hcksum_t));
1209 			dlcapabsetqid(&(hcksum.hcksum_mid), dsp->ds_rq);
1210 			bcopy(&hcksum, hcksump, sizeof (dl_capab_hcksum_t));
1211 			break;
1212 		}
1213 
1214 		/*
1215 		 * IP polling interface.
1216 		 */
1217 		case DL_CAPAB_POLL: {
1218 			dl_capab_dls_t *pollp;
1219 			dl_capab_dls_t	poll;
1220 
1221 			pollp = (dl_capab_dls_t *)&sp[1];
1222 			/*
1223 			 * Copy for alignment.
1224 			 */
1225 			bcopy(pollp, &poll, sizeof (dl_capab_dls_t));
1226 
1227 			/*
1228 			 * We need to become writer before enabling and/or
1229 			 * disabling the polling interface.  If we couldn'
1230 			 * upgrade, check state again after re-acquiring the
1231 			 * lock to make sure we can proceed.
1232 			 */
1233 			if (!upgraded && !rw_tryupgrade(&dsp->ds_lock)) {
1234 				rw_exit(&dsp->ds_lock);
1235 				rw_enter(&dsp->ds_lock, RW_WRITER);
1236 
1237 				if (dsp->ds_dlstate == DL_UNATTACHED ||
1238 				    DL_ACK_PENDING(dsp->ds_dlstate)) {
1239 					dl_err = DL_OUTSTATE;
1240 					goto failed;
1241 				}
1242 			}
1243 			upgraded = B_TRUE;
1244 
1245 			switch (poll.dls_flags) {
1246 			default:
1247 				/*FALLTHRU*/
1248 			case POLL_DISABLE:
1249 				proto_poll_disable(dsp);
1250 				break;
1251 
1252 			case POLL_ENABLE:
1253 				ASSERT(!(dld_opt & DLD_OPT_NO_POLL));
1254 
1255 				/*
1256 				 * Make sure polling is disabled.
1257 				 */
1258 				proto_poll_disable(dsp);
1259 
1260 				/*
1261 				 * Now attempt enable it.
1262 				 */
1263 				if (check_ip_above(dsp->ds_rq) &&
1264 				    proto_poll_enable(dsp, &poll)) {
1265 					bzero(&poll, sizeof (dl_capab_dls_t));
1266 					poll.dls_flags = POLL_ENABLE;
1267 				}
1268 				break;
1269 			}
1270 
1271 			dlcapabsetqid(&(poll.dls_mid), dsp->ds_rq);
1272 			bcopy(&poll, pollp, sizeof (dl_capab_dls_t));
1273 			break;
1274 		}
1275 		case DL_CAPAB_SOFT_RING: {
1276 			dl_capab_dls_t *soft_ringp;
1277 			dl_capab_dls_t soft_ring;
1278 
1279 			soft_ringp = (dl_capab_dls_t *)&sp[1];
1280 			/*
1281 			 * Copy for alignment.
1282 			 */
1283 			bcopy(soft_ringp, &soft_ring,
1284 			    sizeof (dl_capab_dls_t));
1285 
1286 			/*
1287 			 * We need to become writer before enabling and/or
1288 			 * disabling the soft_ring interface.  If we couldn'
1289 			 * upgrade, check state again after re-acquiring the
1290 			 * lock to make sure we can proceed.
1291 			 */
1292 			if (!upgraded && !rw_tryupgrade(&dsp->ds_lock)) {
1293 				rw_exit(&dsp->ds_lock);
1294 				rw_enter(&dsp->ds_lock, RW_WRITER);
1295 
1296 				if (dsp->ds_dlstate == DL_UNATTACHED ||
1297 				    DL_ACK_PENDING(dsp->ds_dlstate)) {
1298 					dl_err = DL_OUTSTATE;
1299 					goto failed;
1300 				}
1301 			}
1302 			upgraded = B_TRUE;
1303 
1304 			switch (soft_ring.dls_flags) {
1305 			default:
1306 				/*FALLTHRU*/
1307 			case SOFT_RING_DISABLE:
1308 				proto_soft_ring_disable(dsp);
1309 				break;
1310 
1311 			case SOFT_RING_ENABLE:
1312 				/*
1313 				 * Make sure soft_ring is disabled.
1314 				 */
1315 				proto_soft_ring_disable(dsp);
1316 
1317 				/*
1318 				 * Now attempt enable it.
1319 				 */
1320 				if (check_ip_above(dsp->ds_rq) &&
1321 				    proto_soft_ring_enable(dsp, &soft_ring)) {
1322 					bzero(&soft_ring,
1323 					    sizeof (dl_capab_dls_t));
1324 					soft_ring.dls_flags =
1325 					    SOFT_RING_ENABLE;
1326 				} else {
1327 					bzero(&soft_ring,
1328 					    sizeof (dl_capab_dls_t));
1329 					soft_ring.dls_flags =
1330 					    SOFT_RING_DISABLE;
1331 				}
1332 				break;
1333 			}
1334 
1335 			dlcapabsetqid(&(soft_ring.dls_mid), dsp->ds_rq);
1336 			bcopy(&soft_ring, soft_ringp,
1337 			    sizeof (dl_capab_dls_t));
1338 			break;
1339 		}
1340 		default:
1341 			break;
1342 		}
1343 
1344 		off += size;
1345 	}
1346 	rw_exit(&dsp->ds_lock);
1347 	qreply(q, mp);
1348 	return (B_TRUE);
1349 failed:
1350 	rw_exit(&dsp->ds_lock);
1351 	dlerrorack(q, mp, DL_CAPABILITY_REQ, dl_err, 0);
1352 	return (B_FALSE);
1353 }
1354 
1355 /*
1356  * DL_NOTIFY_REQ
1357  */
1358 static boolean_t
1359 proto_notify_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
1360 {
1361 	dl_notify_req_t	*dlp = (dl_notify_req_t *)udlp;
1362 	t_uscalar_t	dl_err;
1363 	queue_t		*q = dsp->ds_wq;
1364 	uint_t		note =
1365 	    DL_NOTE_PROMISC_ON_PHYS |
1366 	    DL_NOTE_PROMISC_OFF_PHYS |
1367 	    DL_NOTE_PHYS_ADDR |
1368 	    DL_NOTE_LINK_UP |
1369 	    DL_NOTE_LINK_DOWN |
1370 	    DL_NOTE_CAPAB_RENEG;
1371 
1372 	rw_enter(&dsp->ds_lock, RW_WRITER);
1373 
1374 	if (MBLKL(mp) < sizeof (dl_notify_req_t)) {
1375 		dl_err = DL_BADPRIM;
1376 		goto failed;
1377 	}
1378 
1379 	if (dsp->ds_dlstate == DL_UNATTACHED ||
1380 	    DL_ACK_PENDING(dsp->ds_dlstate)) {
1381 		dl_err = DL_OUTSTATE;
1382 		goto failed;
1383 	}
1384 
1385 	if (dsp->ds_mip->mi_stat[MAC_STAT_IFSPEED])
1386 		note |= DL_NOTE_SPEED;
1387 
1388 	/*
1389 	 * Cache the notifications that are being enabled.
1390 	 */
1391 	dsp->ds_notifications = dlp->dl_notifications & note;
1392 	rw_exit(&dsp->ds_lock);
1393 	/*
1394 	 * The ACK carries all notifications regardless of which set is
1395 	 * being enabled.
1396 	 */
1397 	dlnotifyack(q, mp, note);
1398 
1399 	/*
1400 	 * Solicit DL_NOTIFY_IND messages for each enabled notification.
1401 	 */
1402 	rw_enter(&dsp->ds_lock, RW_READER);
1403 	if (dsp->ds_notifications != 0) {
1404 		rw_exit(&dsp->ds_lock);
1405 		dld_str_notify_ind(dsp);
1406 	} else {
1407 		rw_exit(&dsp->ds_lock);
1408 	}
1409 	return (B_TRUE);
1410 failed:
1411 	rw_exit(&dsp->ds_lock);
1412 	dlerrorack(q, mp, DL_NOTIFY_REQ, dl_err, 0);
1413 	return (B_FALSE);
1414 }
1415 
1416 /*
1417  * DL_UINTDATA_REQ
1418  */
1419 static boolean_t
1420 proto_unitdata_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
1421 {
1422 	queue_t			*q = dsp->ds_wq;
1423 	dl_unitdata_req_t	*dlp = (dl_unitdata_req_t *)udlp;
1424 	off_t			off;
1425 	size_t			len, size;
1426 	const uint8_t		*addr;
1427 	uint16_t		sap;
1428 	uint_t			addr_length;
1429 	mblk_t			*bp, *cont;
1430 	uint32_t		start, stuff, end, value, flags;
1431 	t_uscalar_t		dl_err;
1432 
1433 	rw_enter(&dsp->ds_lock, RW_READER);
1434 
1435 	if (MBLKL(mp) < sizeof (dl_unitdata_req_t) || mp->b_cont == NULL) {
1436 		dl_err = DL_BADPRIM;
1437 		goto failed;
1438 	}
1439 
1440 	if (dsp->ds_dlstate != DL_IDLE) {
1441 		dl_err = DL_OUTSTATE;
1442 		goto failed;
1443 	}
1444 	addr_length = dsp->ds_mip->mi_addr_length;
1445 
1446 	off = dlp->dl_dest_addr_offset;
1447 	len = dlp->dl_dest_addr_length;
1448 
1449 	if (!MBLKIN(mp, off, len) || !IS_P2ALIGNED(off, sizeof (uint16_t))) {
1450 		dl_err = DL_BADPRIM;
1451 		goto failed;
1452 	}
1453 
1454 	if (len != addr_length + sizeof (uint16_t)) {
1455 		dl_err = DL_BADADDR;
1456 		goto failed;
1457 	}
1458 
1459 	addr = mp->b_rptr + off;
1460 	sap = *(uint16_t *)(mp->b_rptr + off + addr_length);
1461 
1462 	/*
1463 	 * Check the length of the packet and the block types.
1464 	 */
1465 	size = 0;
1466 	cont = mp->b_cont;
1467 	for (bp = cont; bp != NULL; bp = bp->b_cont) {
1468 		if (DB_TYPE(bp) != M_DATA)
1469 			goto baddata;
1470 
1471 		size += MBLKL(bp);
1472 	}
1473 
1474 	if (size > dsp->ds_mip->mi_sdu_max)
1475 		goto baddata;
1476 
1477 	/*
1478 	 * sap <= ETHERMTU indicates that LLC is being used
1479 	 * and ethertype needs to be set to the payload length.
1480 	 */
1481 	if (sap <= ETHERMTU)
1482 		sap = (uint16_t)size;
1483 
1484 	/*
1485 	 * Build a packet header.
1486 	 */
1487 	if ((bp = dls_header(dsp->ds_dc, addr, sap, dsp->ds_pri)) == NULL) {
1488 		dl_err = DL_BADADDR;
1489 		goto failed;
1490 	}
1491 
1492 	/*
1493 	 * We no longer need the M_PROTO header, so free it.
1494 	 */
1495 	freeb(mp);
1496 
1497 	/*
1498 	 * Transfer the checksum offload information if it is present.
1499 	 */
1500 	hcksum_retrieve(cont, NULL, NULL, &start, &stuff, &end, &value,
1501 	    &flags);
1502 	(void) hcksum_assoc(bp, NULL, NULL, start, stuff, end, value, flags,
1503 	    0);
1504 
1505 	/*
1506 	 * Link the payload onto the new header.
1507 	 */
1508 	ASSERT(bp->b_cont == NULL);
1509 	bp->b_cont = cont;
1510 
1511 	str_mdata_fastpath_put(dsp, bp);
1512 	rw_exit(&dsp->ds_lock);
1513 	return (B_TRUE);
1514 failed:
1515 	rw_exit(&dsp->ds_lock);
1516 	dlerrorack(q, mp, DL_UNITDATA_REQ, dl_err, 0);
1517 	return (B_FALSE);
1518 
1519 baddata:
1520 	rw_exit(&dsp->ds_lock);
1521 	dluderrorind(q, mp, (void *)addr, len, DL_BADDATA, 0);
1522 	return (B_FALSE);
1523 }
1524 
1525 /*
1526  * DL_PASSIVE_REQ
1527  */
1528 /* ARGSUSED */
1529 static boolean_t
1530 proto_passive_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
1531 {
1532 	t_uscalar_t dl_err;
1533 
1534 	rw_enter(&dsp->ds_lock, RW_WRITER);
1535 	/*
1536 	 * If we've already become active by issuing an active primitive,
1537 	 * then it's too late to try to become passive.
1538 	 */
1539 	if (dsp->ds_passivestate == DLD_ACTIVE) {
1540 		dl_err = DL_OUTSTATE;
1541 		goto failed;
1542 	}
1543 
1544 	if (MBLKL(mp) < sizeof (dl_passive_req_t)) {
1545 		dl_err = DL_BADPRIM;
1546 		goto failed;
1547 	}
1548 
1549 	dsp->ds_passivestate = DLD_PASSIVE;
1550 	rw_exit(&dsp->ds_lock);
1551 	dlokack(dsp->ds_wq, mp, DL_PASSIVE_REQ);
1552 	return (B_TRUE);
1553 failed:
1554 	rw_exit(&dsp->ds_lock);
1555 	dlerrorack(dsp->ds_wq, mp, DL_PASSIVE_REQ, dl_err, 0);
1556 	return (B_FALSE);
1557 }
1558 
1559 
1560 /*
1561  * Catch-all handler.
1562  */
1563 static boolean_t
1564 proto_req(dld_str_t *dsp, union DL_primitives *dlp, mblk_t *mp)
1565 {
1566 	dlerrorack(dsp->ds_wq, mp, dlp->dl_primitive, DL_UNSUPPORTED, 0);
1567 	return (B_FALSE);
1568 }
1569 
1570 static void
1571 proto_poll_disable(dld_str_t *dsp)
1572 {
1573 	mac_handle_t	mh;
1574 
1575 	ASSERT(dsp->ds_pending_req != NULL || RW_WRITE_HELD(&dsp->ds_lock));
1576 
1577 	if (!dsp->ds_polling)
1578 		return;
1579 
1580 	/*
1581 	 * It should be impossible to enable raw mode if polling is turned on.
1582 	 */
1583 	ASSERT(dsp->ds_mode != DLD_RAW);
1584 
1585 	/*
1586 	 * Reset the resource_add callback.
1587 	 */
1588 	mh = dls_mac(dsp->ds_dc);
1589 	mac_resource_set(mh, NULL, NULL);
1590 	mac_resources(mh);
1591 
1592 	/*
1593 	 * Set receive function back to default.
1594 	 */
1595 	dls_rx_set(dsp->ds_dc, (dsp->ds_mode == DLD_FASTPATH) ?
1596 	    dld_str_rx_fastpath : dld_str_rx_unitdata, (void *)dsp);
1597 
1598 	/*
1599 	 * Note that polling is disabled.
1600 	 */
1601 	dsp->ds_polling = B_FALSE;
1602 }
1603 
1604 static boolean_t
1605 proto_poll_enable(dld_str_t *dsp, dl_capab_dls_t *pollp)
1606 {
1607 	mac_handle_t	mh;
1608 
1609 	ASSERT(RW_WRITE_HELD(&dsp->ds_lock));
1610 	ASSERT(!dsp->ds_polling);
1611 
1612 	/*
1613 	 * We cannot enable polling if raw mode
1614 	 * has been enabled.
1615 	 */
1616 	if (dsp->ds_mode == DLD_RAW)
1617 		return (B_FALSE);
1618 
1619 	mh = dls_mac(dsp->ds_dc);
1620 
1621 	/*
1622 	 * Register resources.
1623 	 */
1624 	mac_resource_set(mh, (mac_resource_add_t)pollp->dls_ring_add,
1625 	    (void *)pollp->dls_rx_handle);
1626 	mac_resources(mh);
1627 
1628 	/*
1629 	 * Set the receive function.
1630 	 */
1631 	dls_rx_set(dsp->ds_dc, (dls_rx_t)pollp->dls_rx,
1632 	    (void *)pollp->dls_rx_handle);
1633 
1634 	/*
1635 	 * Note that polling is enabled. This prevents further DLIOCHDRINFO
1636 	 * ioctls from overwriting the receive function pointer.
1637 	 */
1638 	dsp->ds_polling = B_TRUE;
1639 	return (B_TRUE);
1640 }
1641 
1642 static void
1643 proto_soft_ring_disable(dld_str_t *dsp)
1644 {
1645 	ASSERT(RW_WRITE_HELD(&dsp->ds_lock));
1646 
1647 	if (!dsp->ds_soft_ring)
1648 		return;
1649 
1650 	/*
1651 	 * It should be impossible to enable raw mode if soft_ring is turned on.
1652 	 */
1653 	ASSERT(dsp->ds_mode != DLD_RAW);
1654 	proto_change_soft_ring_fanout(dsp, SOFT_RING_NONE);
1655 	/*
1656 	 * Note that fanout is disabled.
1657 	 */
1658 	dsp->ds_soft_ring = B_FALSE;
1659 }
1660 
1661 static boolean_t
1662 proto_soft_ring_enable(dld_str_t *dsp, dl_capab_dls_t *soft_ringp)
1663 {
1664 	ASSERT(RW_WRITE_HELD(&dsp->ds_lock));
1665 	ASSERT(!dsp->ds_soft_ring);
1666 
1667 	/*
1668 	 * We cannot enable soft_ring if raw mode
1669 	 * has been enabled.
1670 	 */
1671 	if (dsp->ds_mode == DLD_RAW)
1672 		return (B_FALSE);
1673 
1674 	if (dls_soft_ring_enable(dsp->ds_dc, soft_ringp) == B_FALSE)
1675 		return (B_FALSE);
1676 
1677 	dsp->ds_soft_ring = B_TRUE;
1678 	return (B_TRUE);
1679 }
1680 
1681 static void
1682 proto_change_soft_ring_fanout(dld_str_t *dsp, int type)
1683 {
1684 	dls_rx_t	rx;
1685 
1686 	if (type == SOFT_RING_NONE) {
1687 		rx = (dsp->ds_mode == DLD_FASTPATH) ?
1688 			    dld_str_rx_fastpath : dld_str_rx_unitdata;
1689 	} else {
1690 		rx = (dls_rx_t)dls_ether_soft_ring_fanout;
1691 	}
1692 	dls_soft_ring_rx_set(dsp->ds_dc, rx, dsp, type);
1693 }
1694 
1695 /*
1696  * DL_CAPABILITY_ACK/DL_ERROR_ACK
1697  */
1698 static boolean_t
1699 proto_capability_advertise(dld_str_t *dsp, mblk_t *mp)
1700 {
1701 	dl_capability_ack_t	*dlap;
1702 	dl_capability_sub_t	*dlsp;
1703 	size_t			subsize;
1704 	dl_capab_dls_t		poll;
1705 	dl_capab_dls_t	soft_ring;
1706 	dl_capab_hcksum_t	hcksum;
1707 	dl_capab_zerocopy_t	zcopy;
1708 	uint8_t			*ptr;
1709 	uint32_t		cksum;
1710 	boolean_t		poll_cap;
1711 	queue_t			*q = dsp->ds_wq;
1712 	mblk_t			*mp1;
1713 
1714 	ASSERT(RW_READ_HELD(&dsp->ds_lock));
1715 
1716 	/*
1717 	 * Initially assume no capabilities.
1718 	 */
1719 	subsize = 0;
1720 
1721 	/*
1722 	 * Advertize soft ring capability if
1723 	 * VLAN_ID_NONE for GLDv3 drivers
1724 	 */
1725 	if (dsp->ds_vid == VLAN_ID_NONE)
1726 		subsize += sizeof (dl_capability_sub_t) +
1727 				    sizeof (dl_capab_dls_t);
1728 
1729 	/*
1730 	 * Check if polling can be enabled on this interface.
1731 	 * If advertising DL_CAPAB_POLL has not been explicitly disabled
1732 	 * then reserve space for that capability.
1733 	 */
1734 	poll_cap = ((dsp->ds_mip->mi_poll & DL_CAPAB_POLL) &&
1735 	    !(dld_opt & DLD_OPT_NO_POLL) && (dsp->ds_vid == VLAN_ID_NONE));
1736 	if (poll_cap) {
1737 		subsize += sizeof (dl_capability_sub_t) +
1738 		    sizeof (dl_capab_dls_t);
1739 	}
1740 
1741 	/*
1742 	 * If the MAC interface supports checksum offload then reserve
1743 	 * space for the DL_CAPAB_HCKSUM capability.
1744 	 */
1745 	if ((cksum = dsp->ds_mip->mi_cksum) != 0) {
1746 		subsize += sizeof (dl_capability_sub_t) +
1747 		    sizeof (dl_capab_hcksum_t);
1748 	}
1749 
1750 	/*
1751 	 * If DL_CAPAB_ZEROCOPY has not be explicitly disabled then
1752 	 * reserve space for it.
1753 	 */
1754 	if (!(dld_opt & DLD_OPT_NO_ZEROCOPY)) {
1755 		subsize += sizeof (dl_capability_sub_t) +
1756 		    sizeof (dl_capab_zerocopy_t);
1757 	}
1758 
1759 	/*
1760 	 * If there are no capabilities to advertise or if we
1761 	 * can't allocate a response, send a DL_ERROR_ACK.
1762 	 */
1763 	if ((mp1 = reallocb(mp,
1764 	    sizeof (dl_capability_ack_t) + subsize, 0)) == NULL) {
1765 		rw_exit(&dsp->ds_lock);
1766 		dlerrorack(q, mp, DL_CAPABILITY_REQ, DL_NOTSUPPORTED, 0);
1767 		return (B_FALSE);
1768 	}
1769 
1770 	mp = mp1;
1771 	DB_TYPE(mp) = M_PROTO;
1772 	mp->b_wptr = mp->b_rptr + sizeof (dl_capability_ack_t) + subsize;
1773 	bzero(mp->b_rptr, MBLKL(mp));
1774 	dlap = (dl_capability_ack_t *)mp->b_rptr;
1775 	dlap->dl_primitive = DL_CAPABILITY_ACK;
1776 	dlap->dl_sub_offset = sizeof (dl_capability_ack_t);
1777 	dlap->dl_sub_length = subsize;
1778 	ptr = (uint8_t *)&dlap[1];
1779 
1780 	/*
1781 	 * IP polling interface.
1782 	 */
1783 	if (poll_cap) {
1784 		/*
1785 		 * Attempt to disable just in case this is a re-negotiation;
1786 		 * we need to become writer before doing so.
1787 		 */
1788 		if (!rw_tryupgrade(&dsp->ds_lock)) {
1789 			rw_exit(&dsp->ds_lock);
1790 			rw_enter(&dsp->ds_lock, RW_WRITER);
1791 		}
1792 
1793 		/*
1794 		 * Check if polling state has changed after we re-acquired
1795 		 * the lock above, so that we don't mis-advertise it.
1796 		 */
1797 		poll_cap = ((dsp->ds_mip->mi_poll & DL_CAPAB_POLL) &&
1798 		    !(dld_opt & DLD_OPT_NO_POLL) &&
1799 		    (dsp->ds_vid == VLAN_ID_NONE));
1800 
1801 		if (!poll_cap) {
1802 			int poll_capab_size;
1803 
1804 			rw_downgrade(&dsp->ds_lock);
1805 
1806 			poll_capab_size = sizeof (dl_capability_sub_t) +
1807 			    sizeof (dl_capab_dls_t);
1808 
1809 			mp->b_wptr -= poll_capab_size;
1810 			subsize -= poll_capab_size;
1811 			dlap->dl_sub_length = subsize;
1812 		} else {
1813 			proto_poll_disable(dsp);
1814 
1815 			rw_downgrade(&dsp->ds_lock);
1816 
1817 			dlsp = (dl_capability_sub_t *)ptr;
1818 
1819 			dlsp->dl_cap = DL_CAPAB_POLL;
1820 			dlsp->dl_length = sizeof (dl_capab_dls_t);
1821 			ptr += sizeof (dl_capability_sub_t);
1822 
1823 			bzero(&poll, sizeof (dl_capab_dls_t));
1824 			poll.dls_version = POLL_VERSION_1;
1825 			poll.dls_flags = POLL_CAPABLE;
1826 			poll.dls_tx_handle = (uintptr_t)dsp;
1827 			poll.dls_tx = (uintptr_t)str_mdata_fastpath_put;
1828 
1829 			dlcapabsetqid(&(poll.dls_mid), dsp->ds_rq);
1830 			bcopy(&poll, ptr, sizeof (dl_capab_dls_t));
1831 			ptr += sizeof (dl_capab_dls_t);
1832 		}
1833 	}
1834 
1835 	ASSERT(RW_READ_HELD(&dsp->ds_lock));
1836 
1837 	if (dsp->ds_vid == VLAN_ID_NONE) {
1838 		dlsp = (dl_capability_sub_t *)ptr;
1839 
1840 		dlsp->dl_cap = DL_CAPAB_SOFT_RING;
1841 		dlsp->dl_length = sizeof (dl_capab_dls_t);
1842 		ptr += sizeof (dl_capability_sub_t);
1843 
1844 		bzero(&soft_ring, sizeof (dl_capab_dls_t));
1845 		soft_ring.dls_version = SOFT_RING_VERSION_1;
1846 		soft_ring.dls_flags = SOFT_RING_CAPABLE;
1847 		soft_ring.dls_tx_handle = (uintptr_t)dsp;
1848 		soft_ring.dls_tx = (uintptr_t)str_mdata_fastpath_put;
1849 		soft_ring.dls_ring_change_status =
1850 		    (uintptr_t)proto_change_soft_ring_fanout;
1851 		soft_ring.dls_ring_bind = (uintptr_t)soft_ring_bind;
1852 		soft_ring.dls_ring_unbind = (uintptr_t)soft_ring_unbind;
1853 
1854 		dlcapabsetqid(&(soft_ring.dls_mid), dsp->ds_rq);
1855 		bcopy(&soft_ring, ptr, sizeof (dl_capab_dls_t));
1856 		ptr += sizeof (dl_capab_dls_t);
1857 	}
1858 
1859 	/*
1860 	 * TCP/IP checksum offload.
1861 	 */
1862 	if (cksum != 0) {
1863 		dlsp = (dl_capability_sub_t *)ptr;
1864 
1865 		dlsp->dl_cap = DL_CAPAB_HCKSUM;
1866 		dlsp->dl_length = sizeof (dl_capab_hcksum_t);
1867 		ptr += sizeof (dl_capability_sub_t);
1868 
1869 		bzero(&hcksum, sizeof (dl_capab_hcksum_t));
1870 		hcksum.hcksum_version = HCKSUM_VERSION_1;
1871 		hcksum.hcksum_txflags = cksum;
1872 
1873 		dlcapabsetqid(&(hcksum.hcksum_mid), dsp->ds_rq);
1874 		bcopy(&hcksum, ptr, sizeof (dl_capab_hcksum_t));
1875 		ptr += sizeof (dl_capab_hcksum_t);
1876 	}
1877 
1878 	/*
1879 	 * Zero copy
1880 	 */
1881 	if (!(dld_opt & DLD_OPT_NO_ZEROCOPY)) {
1882 		dlsp = (dl_capability_sub_t *)ptr;
1883 
1884 		dlsp->dl_cap = DL_CAPAB_ZEROCOPY;
1885 		dlsp->dl_length = sizeof (dl_capab_zerocopy_t);
1886 		ptr += sizeof (dl_capability_sub_t);
1887 
1888 		bzero(&zcopy, sizeof (dl_capab_zerocopy_t));
1889 		zcopy.zerocopy_version = ZEROCOPY_VERSION_1;
1890 		zcopy.zerocopy_flags = DL_CAPAB_VMSAFE_MEM;
1891 
1892 		dlcapabsetqid(&(zcopy.zerocopy_mid), dsp->ds_rq);
1893 		bcopy(&zcopy, ptr, sizeof (dl_capab_zerocopy_t));
1894 		ptr += sizeof (dl_capab_zerocopy_t);
1895 	}
1896 
1897 	ASSERT(ptr == mp->b_rptr + sizeof (dl_capability_ack_t) + subsize);
1898 
1899 	rw_exit(&dsp->ds_lock);
1900 	qreply(q, mp);
1901 	return (B_TRUE);
1902 }
1903