xref: /titanic_50/usr/src/uts/common/io/dld/dld_proto.c (revision e6e675992eea58770f8f0c1e7f2334b34258e26b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * Data-Link Driver
30  */
31 
32 #include <sys/types.h>
33 #include <sys/debug.h>
34 #include <sys/sysmacros.h>
35 #include <sys/stream.h>
36 #include <sys/ddi.h>
37 #include <sys/sunddi.h>
38 #include <sys/strsun.h>
39 #include <sys/cpuvar.h>
40 #include <sys/dlpi.h>
41 #include <netinet/in.h>
42 #include <sys/sdt.h>
43 #include <sys/strsubr.h>
44 #include <sys/vlan.h>
45 #include <sys/mac.h>
46 #include <sys/dls.h>
47 #include <sys/dld.h>
48 #include <sys/dld_impl.h>
49 #include <sys/dls_soft_ring.h>
50 
51 typedef boolean_t proto_reqfunc_t(dld_str_t *, union DL_primitives *, mblk_t *);
52 
53 static proto_reqfunc_t proto_info_req, proto_attach_req, proto_detach_req,
54     proto_bind_req, proto_unbind_req, proto_promiscon_req, proto_promiscoff_req,
55     proto_enabmulti_req, proto_disabmulti_req, proto_physaddr_req,
56     proto_setphysaddr_req, proto_udqos_req, proto_req, proto_capability_req,
57     proto_notify_req, proto_unitdata_req, proto_passive_req;
58 
59 static void proto_poll_disable(dld_str_t *);
60 static boolean_t proto_poll_enable(dld_str_t *, dl_capab_dls_t *);
61 static boolean_t proto_capability_advertise(dld_str_t *, mblk_t *);
62 
63 static task_func_t proto_process_unbind_req, proto_process_detach_req;
64 
65 static void proto_soft_ring_disable(dld_str_t *);
66 static boolean_t proto_soft_ring_enable(dld_str_t *, dl_capab_dls_t *);
67 static boolean_t proto_capability_advertise(dld_str_t *, mblk_t *);
68 static void proto_change_soft_ring_fanout(dld_str_t *, int);
69 
70 #define	DL_ACK_PENDING(state) \
71 	((state) == DL_ATTACH_PENDING || \
72 	(state) == DL_DETACH_PENDING || \
73 	(state) == DL_BIND_PENDING || \
74 	(state) == DL_UNBIND_PENDING)
75 
76 /*
77  * Process a DLPI protocol message.
78  * The primitives DL_BIND_REQ, DL_ENABMULTI_REQ, DL_PROMISCON_REQ,
79  * DL_SET_PHYS_ADDR_REQ put the data link below our dld_str_t into an
80  * 'active' state. The primitive DL_PASSIVE_REQ marks our dld_str_t
81  * as 'passive' and forbids it from being subsequently made 'active'
82  * by the above primitives.
83  */
84 void
85 dld_proto(dld_str_t *dsp, mblk_t *mp)
86 {
87 	union DL_primitives	*udlp;
88 	t_uscalar_t		prim;
89 
90 	if (MBLKL(mp) < sizeof (t_uscalar_t)) {
91 		freemsg(mp);
92 		return;
93 	}
94 
95 	udlp = (union DL_primitives *)mp->b_rptr;
96 	prim = udlp->dl_primitive;
97 
98 	switch (prim) {
99 	case DL_INFO_REQ:
100 		(void) proto_info_req(dsp, udlp, mp);
101 		break;
102 	case DL_BIND_REQ:
103 		(void) proto_bind_req(dsp, udlp, mp);
104 		break;
105 	case DL_UNBIND_REQ:
106 		(void) proto_unbind_req(dsp, udlp, mp);
107 		break;
108 	case DL_UNITDATA_REQ:
109 		(void) proto_unitdata_req(dsp, udlp, mp);
110 		break;
111 	case DL_UDQOS_REQ:
112 		(void) proto_udqos_req(dsp, udlp, mp);
113 		break;
114 	case DL_ATTACH_REQ:
115 		(void) proto_attach_req(dsp, udlp, mp);
116 		break;
117 	case DL_DETACH_REQ:
118 		(void) proto_detach_req(dsp, udlp, mp);
119 		break;
120 	case DL_ENABMULTI_REQ:
121 		(void) proto_enabmulti_req(dsp, udlp, mp);
122 		break;
123 	case DL_DISABMULTI_REQ:
124 		(void) proto_disabmulti_req(dsp, udlp, mp);
125 		break;
126 	case DL_PROMISCON_REQ:
127 		(void) proto_promiscon_req(dsp, udlp, mp);
128 		break;
129 	case DL_PROMISCOFF_REQ:
130 		(void) proto_promiscoff_req(dsp, udlp, mp);
131 		break;
132 	case DL_PHYS_ADDR_REQ:
133 		(void) proto_physaddr_req(dsp, udlp, mp);
134 		break;
135 	case DL_SET_PHYS_ADDR_REQ:
136 		(void) proto_setphysaddr_req(dsp, udlp, mp);
137 		break;
138 	case DL_NOTIFY_REQ:
139 		(void) proto_notify_req(dsp, udlp, mp);
140 		break;
141 	case DL_CAPABILITY_REQ:
142 		(void) proto_capability_req(dsp, udlp, mp);
143 		break;
144 	case DL_PASSIVE_REQ:
145 		(void) proto_passive_req(dsp, udlp, mp);
146 		break;
147 	default:
148 		(void) proto_req(dsp, udlp, mp);
149 		break;
150 	}
151 }
152 
153 /*
154  * Finish any pending operations.
155  * Requests that need to be processed asynchronously will be handled
156  * by a separate thread. After this function returns, other threads
157  * will be allowed to enter dld; they will not be able to do anything
158  * until ds_dlstate transitions to a non-pending state.
159  */
160 void
161 dld_finish_pending_ops(dld_str_t *dsp)
162 {
163 	task_func_t *op = NULL;
164 
165 	ASSERT(MUTEX_HELD(&dsp->ds_thr_lock));
166 	ASSERT(dsp->ds_thr == 0);
167 
168 	op = dsp->ds_pending_op;
169 	dsp->ds_pending_op = NULL;
170 	mutex_exit(&dsp->ds_thr_lock);
171 	if (op != NULL)
172 		(void) taskq_dispatch(system_taskq, op, dsp, TQ_SLEEP);
173 }
174 
175 #define	NEG(x)	-(x)
176 
177 typedef struct dl_info_ack_wrapper {
178 	dl_info_ack_t		dl_info;
179 	uint8_t			dl_addr[MAXMACADDRLEN + sizeof (uint16_t)];
180 	uint8_t			dl_brdcst_addr[MAXMACADDRLEN];
181 	dl_qos_cl_range1_t	dl_qos_range1;
182 	dl_qos_cl_sel1_t	dl_qos_sel1;
183 } dl_info_ack_wrapper_t;
184 
185 /*
186  * DL_INFO_REQ
187  */
188 /*ARGSUSED*/
189 static boolean_t
190 proto_info_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
191 {
192 	dl_info_ack_wrapper_t	*dlwp;
193 	dl_info_ack_t		*dlp;
194 	dl_qos_cl_sel1_t	*selp;
195 	dl_qos_cl_range1_t	*rangep;
196 	uint8_t			*addr;
197 	uint8_t			*brdcst_addr;
198 	uint_t			addr_length;
199 	uint_t			sap_length;
200 	mac_info_t		minfo;
201 	mac_info_t		*minfop;
202 	queue_t			*q = dsp->ds_wq;
203 
204 	/*
205 	 * Swap the request message for one large enough to contain the
206 	 * wrapper structure defined above.
207 	 */
208 	if ((mp = mexchange(q, mp, sizeof (dl_info_ack_wrapper_t),
209 	    M_PCPROTO, 0)) == NULL)
210 		return (B_FALSE);
211 
212 	rw_enter(&dsp->ds_lock, RW_READER);
213 
214 	bzero(mp->b_rptr, sizeof (dl_info_ack_wrapper_t));
215 	dlwp = (dl_info_ack_wrapper_t *)mp->b_rptr;
216 
217 	dlp = &(dlwp->dl_info);
218 	ASSERT(dlp == (dl_info_ack_t *)mp->b_rptr);
219 
220 	dlp->dl_primitive = DL_INFO_ACK;
221 
222 	/*
223 	 * Set up the sub-structure pointers.
224 	 */
225 	addr = dlwp->dl_addr;
226 	brdcst_addr = dlwp->dl_brdcst_addr;
227 	rangep = &(dlwp->dl_qos_range1);
228 	selp = &(dlwp->dl_qos_sel1);
229 
230 	/*
231 	 * This driver supports only version 2 connectionless DLPI provider
232 	 * nodes.
233 	 */
234 	dlp->dl_service_mode = DL_CLDLS;
235 	dlp->dl_version = DL_VERSION_2;
236 
237 	/*
238 	 * Set the style of the provider
239 	 */
240 	dlp->dl_provider_style = dsp->ds_style;
241 	ASSERT(dlp->dl_provider_style == DL_STYLE1 ||
242 	    dlp->dl_provider_style == DL_STYLE2);
243 
244 	/*
245 	 * Set the current DLPI state.
246 	 */
247 	dlp->dl_current_state = dsp->ds_dlstate;
248 
249 	/*
250 	 * Gratuitously set the media type. This is to deal with modules
251 	 * that assume the media type is known prior to DL_ATTACH_REQ
252 	 * being completed.
253 	 */
254 	dlp->dl_mac_type = DL_ETHER;
255 
256 	/*
257 	 * If the stream is not at least attached we try to retrieve the
258 	 * mac_info using mac_info_get()
259 	 */
260 	if (dsp->ds_dlstate == DL_UNATTACHED ||
261 	    dsp->ds_dlstate == DL_ATTACH_PENDING ||
262 	    dsp->ds_dlstate == DL_DETACH_PENDING) {
263 		if (!mac_info_get(ddi_major_to_name(dsp->ds_major), &minfo)) {
264 			/*
265 			 * Cannot find mac_info. giving up.
266 			 */
267 			goto done;
268 		}
269 		minfop = &minfo;
270 	} else {
271 		minfop = (mac_info_t *)dsp->ds_mip;
272 	}
273 
274 	/*
275 	 * Set the media type (properly this time).
276 	 */
277 	dlp->dl_mac_type = minfop->mi_media;
278 
279 	/*
280 	 * Set the DLSAP length. We only support 16 bit values and they
281 	 * appear after the MAC address portion of DLSAP addresses.
282 	 */
283 	sap_length = sizeof (uint16_t);
284 	dlp->dl_sap_length = NEG(sap_length);
285 
286 	/*
287 	 * Set the minimum and maximum payload sizes.
288 	 */
289 	dlp->dl_min_sdu = minfop->mi_sdu_min;
290 	dlp->dl_max_sdu = minfop->mi_sdu_max;
291 
292 	addr_length = minfop->mi_addr_length;
293 
294 	/*
295 	 * Copy in the media broadcast address.
296 	 */
297 	if (minfop->mi_brdcst_addr != NULL) {
298 		dlp->dl_brdcst_addr_offset =
299 		    (uintptr_t)brdcst_addr - (uintptr_t)dlp;
300 		bcopy(minfop->mi_brdcst_addr, brdcst_addr, addr_length);
301 		dlp->dl_brdcst_addr_length = addr_length;
302 	}
303 
304 	dlp->dl_qos_range_offset = (uintptr_t)rangep - (uintptr_t)dlp;
305 	dlp->dl_qos_range_length = sizeof (dl_qos_cl_range1_t);
306 
307 	rangep->dl_qos_type = DL_QOS_CL_RANGE1;
308 	rangep->dl_trans_delay.dl_target_value = DL_UNKNOWN;
309 	rangep->dl_trans_delay.dl_accept_value = DL_UNKNOWN;
310 	rangep->dl_protection.dl_min = DL_UNKNOWN;
311 	rangep->dl_protection.dl_max = DL_UNKNOWN;
312 	rangep->dl_residual_error = DL_UNKNOWN;
313 
314 	/*
315 	 * Specify the supported range of priorities.
316 	 */
317 	rangep->dl_priority.dl_min = 0;
318 	rangep->dl_priority.dl_max = (1 << VLAN_PRI_SIZE) - 1;
319 
320 	dlp->dl_qos_offset = (uintptr_t)selp - (uintptr_t)dlp;
321 	dlp->dl_qos_length = sizeof (dl_qos_cl_sel1_t);
322 
323 	selp->dl_qos_type = DL_QOS_CL_SEL1;
324 	selp->dl_trans_delay = DL_UNKNOWN;
325 	selp->dl_protection = DL_UNKNOWN;
326 	selp->dl_residual_error = DL_UNKNOWN;
327 
328 	/*
329 	 * Specify the current priority (which can be changed by
330 	 * the DL_UDQOS_REQ primitive).
331 	 */
332 	selp->dl_priority = dsp->ds_pri;
333 
334 	dlp->dl_addr_length = addr_length + sizeof (uint16_t);
335 	if (dsp->ds_dlstate == DL_IDLE) {
336 		/*
337 		 * The stream is bound. Therefore we can formulate a valid
338 		 * DLSAP address.
339 		 */
340 		dlp->dl_addr_offset = (uintptr_t)addr - (uintptr_t)dlp;
341 		if (addr_length > 0)
342 			bcopy(dsp->ds_curr_addr, addr, addr_length);
343 		*(uint16_t *)(addr + addr_length) = dsp->ds_sap;
344 	}
345 
346 done:
347 	ASSERT(IMPLY(dlp->dl_qos_offset != 0, dlp->dl_qos_length != 0));
348 	ASSERT(IMPLY(dlp->dl_qos_range_offset != 0,
349 	    dlp->dl_qos_range_length != 0));
350 	ASSERT(IMPLY(dlp->dl_addr_offset != 0, dlp->dl_addr_length != 0));
351 	ASSERT(IMPLY(dlp->dl_brdcst_addr_offset != 0,
352 	    dlp->dl_brdcst_addr_length != 0));
353 
354 	rw_exit(&dsp->ds_lock);
355 
356 	qreply(q, mp);
357 	return (B_TRUE);
358 }
359 
360 /*
361  * DL_ATTACH_REQ
362  */
363 static boolean_t
364 proto_attach_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
365 {
366 	dl_attach_req_t	*dlp = (dl_attach_req_t *)udlp;
367 	int		err = 0;
368 	t_uscalar_t	dl_err;
369 	queue_t		*q = dsp->ds_wq;
370 
371 	rw_enter(&dsp->ds_lock, RW_WRITER);
372 
373 	if (MBLKL(mp) < sizeof (dl_attach_req_t) ||
374 	    dlp->dl_ppa < 0 || dsp->ds_style == DL_STYLE1) {
375 		dl_err = DL_BADPRIM;
376 		goto failed;
377 	}
378 
379 	if (dsp->ds_dlstate != DL_UNATTACHED) {
380 		dl_err = DL_OUTSTATE;
381 		goto failed;
382 	}
383 
384 	dsp->ds_dlstate = DL_ATTACH_PENDING;
385 
386 	err = dld_str_attach(dsp, dlp->dl_ppa);
387 	if (err != 0) {
388 		switch (err) {
389 		case ENOENT:
390 			dl_err = DL_BADPPA;
391 			err = 0;
392 			break;
393 		default:
394 			dl_err = DL_SYSERR;
395 			break;
396 		}
397 		dsp->ds_dlstate = DL_UNATTACHED;
398 		goto failed;
399 	}
400 	ASSERT(dsp->ds_dlstate == DL_UNBOUND);
401 	rw_exit(&dsp->ds_lock);
402 
403 	dlokack(q, mp, DL_ATTACH_REQ);
404 	return (B_TRUE);
405 failed:
406 	rw_exit(&dsp->ds_lock);
407 	dlerrorack(q, mp, DL_ATTACH_REQ, dl_err, (t_uscalar_t)err);
408 	return (B_FALSE);
409 }
410 
411 /*
412  * DL_DETACH_REQ
413  */
414 static void
415 proto_process_detach_req(void *arg)
416 {
417 	dld_str_t	*dsp = arg;
418 	mblk_t		*mp;
419 
420 	/*
421 	 * We don't need to hold locks because no other thread
422 	 * would manipulate dsp while it is in a PENDING state.
423 	 */
424 	ASSERT(dsp->ds_pending_req != NULL);
425 	ASSERT(dsp->ds_dlstate == DL_DETACH_PENDING);
426 
427 	mp = dsp->ds_pending_req;
428 	dsp->ds_pending_req = NULL;
429 	dld_str_detach(dsp);
430 	dlokack(dsp->ds_wq, mp, DL_DETACH_REQ);
431 
432 	DLD_WAKEUP(dsp);
433 }
434 
435 /*ARGSUSED*/
436 static boolean_t
437 proto_detach_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
438 {
439 	queue_t		*q = dsp->ds_wq;
440 	t_uscalar_t	dl_err;
441 
442 	rw_enter(&dsp->ds_lock, RW_WRITER);
443 
444 	if (MBLKL(mp) < sizeof (dl_detach_req_t)) {
445 		dl_err = DL_BADPRIM;
446 		goto failed;
447 	}
448 
449 	if (dsp->ds_dlstate != DL_UNBOUND) {
450 		dl_err = DL_OUTSTATE;
451 		goto failed;
452 	}
453 
454 	if (dsp->ds_style == DL_STYLE1) {
455 		dl_err = DL_BADPRIM;
456 		goto failed;
457 	}
458 
459 	dsp->ds_dlstate = DL_DETACH_PENDING;
460 
461 	/*
462 	 * Complete the detach when the driver is single-threaded.
463 	 */
464 	mutex_enter(&dsp->ds_thr_lock);
465 	ASSERT(dsp->ds_pending_req == NULL);
466 	dsp->ds_pending_req = mp;
467 	dsp->ds_pending_op = proto_process_detach_req;
468 	dsp->ds_pending_cnt++;
469 	mutex_exit(&dsp->ds_thr_lock);
470 	rw_exit(&dsp->ds_lock);
471 
472 	return (B_TRUE);
473 failed:
474 	rw_exit(&dsp->ds_lock);
475 	dlerrorack(q, mp, DL_DETACH_REQ, dl_err, 0);
476 	return (B_FALSE);
477 }
478 
479 /*
480  * DL_BIND_REQ
481  */
482 static boolean_t
483 proto_bind_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
484 {
485 	dl_bind_req_t	*dlp = (dl_bind_req_t *)udlp;
486 	int		err = 0;
487 	uint8_t		addr[MAXMACADDRLEN];
488 	uint_t		addr_length;
489 	t_uscalar_t	dl_err;
490 	t_scalar_t	sap;
491 	queue_t		*q = dsp->ds_wq;
492 
493 	rw_enter(&dsp->ds_lock, RW_WRITER);
494 
495 	if (MBLKL(mp) < sizeof (dl_bind_req_t)) {
496 		dl_err = DL_BADPRIM;
497 		goto failed;
498 	}
499 
500 	if (dlp->dl_xidtest_flg != 0) {
501 		dl_err = DL_NOAUTO;
502 		goto failed;
503 	}
504 
505 	if (dlp->dl_service_mode != DL_CLDLS) {
506 		dl_err = DL_UNSUPPORTED;
507 		goto failed;
508 	}
509 
510 	if (dsp->ds_dlstate != DL_UNBOUND) {
511 		dl_err = DL_OUTSTATE;
512 		goto failed;
513 	}
514 
515 	if (dsp->ds_passivestate == DLD_UNINITIALIZED &&
516 	    !dls_active_set(dsp->ds_dc)) {
517 		dl_err = DL_SYSERR;
518 		err = EBUSY;
519 		goto failed;
520 	}
521 
522 	dsp->ds_dlstate = DL_BIND_PENDING;
523 	/*
524 	 * Set the receive callback.
525 	 */
526 	dls_rx_set(dsp->ds_dc, (dsp->ds_mode == DLD_RAW) ?
527 	    dld_str_rx_raw : dld_str_rx_unitdata, dsp);
528 
529 	/*
530 	 * Bind the channel such that it can receive packets.
531 	 */
532 	sap = dsp->ds_sap = dlp->dl_sap;
533 	err = dls_bind(dsp->ds_dc, dlp->dl_sap);
534 	if (err != 0) {
535 		switch (err) {
536 		case EINVAL:
537 			dl_err = DL_BADADDR;
538 			err = 0;
539 			break;
540 		default:
541 			dl_err = DL_SYSERR;
542 			break;
543 		}
544 		dsp->ds_dlstate = DL_UNBOUND;
545 		if (dsp->ds_passivestate == DLD_UNINITIALIZED)
546 			dls_active_clear(dsp->ds_dc);
547 
548 		goto failed;
549 	}
550 
551 	/*
552 	 * Copy in MAC address.
553 	 */
554 	addr_length = dsp->ds_mip->mi_addr_length;
555 	bcopy(dsp->ds_curr_addr, addr, addr_length);
556 
557 	/*
558 	 * Copy in the DLSAP.
559 	 */
560 	*(uint16_t *)(addr + addr_length) = dsp->ds_sap;
561 	addr_length += sizeof (uint16_t);
562 
563 	dsp->ds_dlstate = DL_IDLE;
564 	if (dsp->ds_passivestate == DLD_UNINITIALIZED)
565 		dsp->ds_passivestate = DLD_ACTIVE;
566 
567 	rw_exit(&dsp->ds_lock);
568 
569 	dlbindack(q, mp, sap, (void *)addr, addr_length, 0, 0);
570 	return (B_TRUE);
571 failed:
572 	rw_exit(&dsp->ds_lock);
573 	dlerrorack(q, mp, DL_BIND_REQ, dl_err, (t_uscalar_t)err);
574 	return (B_FALSE);
575 }
576 
577 /*
578  * DL_UNBIND_REQ
579  */
580 /*ARGSUSED*/
581 static void
582 proto_process_unbind_req(void *arg)
583 {
584 	dld_str_t	*dsp = arg;
585 	mblk_t		*mp;
586 
587 	/*
588 	 * We don't need to hold locks because no other thread
589 	 * would manipulate dsp while it is in a PENDING state.
590 	 */
591 	ASSERT(dsp->ds_pending_req != NULL);
592 	ASSERT(dsp->ds_dlstate == DL_UNBIND_PENDING);
593 
594 	/*
595 	 * Flush any remaining packets scheduled for transmission.
596 	 */
597 	dld_tx_flush(dsp);
598 
599 	/*
600 	 * Unbind the channel to stop packets being received.
601 	 */
602 	dls_unbind(dsp->ds_dc);
603 
604 	/*
605 	 * Disable polling mode, if it is enabled.
606 	 */
607 	proto_poll_disable(dsp);
608 
609 	/*
610 	 * Clear the receive callback.
611 	 */
612 	dls_rx_set(dsp->ds_dc, NULL, NULL);
613 
614 	/*
615 	 * Set the mode back to the default (unitdata).
616 	 */
617 	dsp->ds_mode = DLD_UNITDATA;
618 
619 	/*
620 	 * If soft rings were enabled, the workers
621 	 * should be quiesced. We cannot check for
622 	 * ds_soft_ring flag because
623 	 * proto_soft_ring_disable() called from
624 	 * proto_capability_req() would have reset it.
625 	 */
626 	if (dls_soft_ring_workers(dsp->ds_dc))
627 		dls_soft_ring_disable(dsp->ds_dc);
628 
629 	mp = dsp->ds_pending_req;
630 	dsp->ds_pending_req = NULL;
631 	dsp->ds_dlstate = DL_UNBOUND;
632 	dlokack(dsp->ds_wq, mp, DL_UNBIND_REQ);
633 
634 	DLD_WAKEUP(dsp);
635 }
636 
637 /*ARGSUSED*/
638 static boolean_t
639 proto_unbind_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
640 {
641 	queue_t		*q = dsp->ds_wq;
642 	t_uscalar_t	dl_err;
643 
644 	rw_enter(&dsp->ds_lock, RW_WRITER);
645 
646 	if (MBLKL(mp) < sizeof (dl_unbind_req_t)) {
647 		dl_err = DL_BADPRIM;
648 		goto failed;
649 	}
650 
651 	if (dsp->ds_dlstate != DL_IDLE) {
652 		dl_err = DL_OUTSTATE;
653 		goto failed;
654 	}
655 
656 	dsp->ds_dlstate = DL_UNBIND_PENDING;
657 
658 	mutex_enter(&dsp->ds_thr_lock);
659 	ASSERT(dsp->ds_pending_req == NULL);
660 	dsp->ds_pending_req = mp;
661 	dsp->ds_pending_op = proto_process_unbind_req;
662 	dsp->ds_pending_cnt++;
663 	mutex_exit(&dsp->ds_thr_lock);
664 	rw_exit(&dsp->ds_lock);
665 
666 	return (B_TRUE);
667 failed:
668 	rw_exit(&dsp->ds_lock);
669 	dlerrorack(q, mp, DL_UNBIND_REQ, dl_err, 0);
670 	return (B_FALSE);
671 }
672 
673 /*
674  * DL_PROMISCON_REQ
675  */
676 static boolean_t
677 proto_promiscon_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
678 {
679 	dl_promiscon_req_t *dlp = (dl_promiscon_req_t *)udlp;
680 	int		err = 0;
681 	t_uscalar_t	dl_err;
682 	uint32_t	promisc_saved;
683 	queue_t		*q = dsp->ds_wq;
684 
685 	rw_enter(&dsp->ds_lock, RW_WRITER);
686 
687 	if (MBLKL(mp) < sizeof (dl_promiscon_req_t)) {
688 		dl_err = DL_BADPRIM;
689 		goto failed;
690 	}
691 
692 	if (dsp->ds_dlstate == DL_UNATTACHED ||
693 	    DL_ACK_PENDING(dsp->ds_dlstate)) {
694 		dl_err = DL_OUTSTATE;
695 		goto failed;
696 	}
697 
698 	promisc_saved = dsp->ds_promisc;
699 	switch (dlp->dl_level) {
700 	case DL_PROMISC_SAP:
701 		dsp->ds_promisc |= DLS_PROMISC_SAP;
702 		break;
703 
704 	case DL_PROMISC_MULTI:
705 		dsp->ds_promisc |= DLS_PROMISC_MULTI;
706 		break;
707 
708 	case DL_PROMISC_PHYS:
709 		dsp->ds_promisc |= DLS_PROMISC_PHYS;
710 		break;
711 
712 	default:
713 		dl_err = DL_NOTSUPPORTED;
714 		goto failed;
715 	}
716 
717 	if (dsp->ds_passivestate == DLD_UNINITIALIZED &&
718 	    !dls_active_set(dsp->ds_dc)) {
719 		dsp->ds_promisc = promisc_saved;
720 		dl_err = DL_SYSERR;
721 		err = EBUSY;
722 		goto failed;
723 	}
724 
725 	/*
726 	 * Adjust channel promiscuity.
727 	 */
728 	err = dls_promisc(dsp->ds_dc, dsp->ds_promisc);
729 	if (err != 0) {
730 		dl_err = DL_SYSERR;
731 		dsp->ds_promisc = promisc_saved;
732 		if (dsp->ds_passivestate == DLD_UNINITIALIZED)
733 			dls_active_clear(dsp->ds_dc);
734 
735 		goto failed;
736 	}
737 
738 	if (dsp->ds_passivestate == DLD_UNINITIALIZED)
739 		dsp->ds_passivestate = DLD_ACTIVE;
740 
741 	rw_exit(&dsp->ds_lock);
742 	dlokack(q, mp, DL_PROMISCON_REQ);
743 	return (B_TRUE);
744 failed:
745 	rw_exit(&dsp->ds_lock);
746 	dlerrorack(q, mp, DL_PROMISCON_REQ, dl_err, (t_uscalar_t)err);
747 	return (B_FALSE);
748 }
749 
750 /*
751  * DL_PROMISCOFF_REQ
752  */
753 static boolean_t
754 proto_promiscoff_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
755 {
756 	dl_promiscoff_req_t *dlp = (dl_promiscoff_req_t *)udlp;
757 	int		err = 0;
758 	t_uscalar_t	dl_err;
759 	uint32_t	promisc_saved;
760 	queue_t		*q = dsp->ds_wq;
761 
762 	rw_enter(&dsp->ds_lock, RW_WRITER);
763 
764 	if (MBLKL(mp) < sizeof (dl_promiscoff_req_t)) {
765 		dl_err = DL_BADPRIM;
766 		goto failed;
767 	}
768 
769 	if (dsp->ds_dlstate == DL_UNATTACHED ||
770 	    DL_ACK_PENDING(dsp->ds_dlstate)) {
771 		dl_err = DL_OUTSTATE;
772 		goto failed;
773 	}
774 
775 	promisc_saved = dsp->ds_promisc;
776 	switch (dlp->dl_level) {
777 	case DL_PROMISC_SAP:
778 		if (!(dsp->ds_promisc & DLS_PROMISC_SAP)) {
779 			dl_err = DL_NOTENAB;
780 			goto failed;
781 		}
782 		dsp->ds_promisc &= ~DLS_PROMISC_SAP;
783 		break;
784 
785 	case DL_PROMISC_MULTI:
786 		if (!(dsp->ds_promisc & DLS_PROMISC_MULTI)) {
787 			dl_err = DL_NOTENAB;
788 			goto failed;
789 		}
790 		dsp->ds_promisc &= ~DLS_PROMISC_MULTI;
791 		break;
792 
793 	case DL_PROMISC_PHYS:
794 		if (!(dsp->ds_promisc & DLS_PROMISC_PHYS)) {
795 			dl_err = DL_NOTENAB;
796 			goto failed;
797 		}
798 		dsp->ds_promisc &= ~DLS_PROMISC_PHYS;
799 		break;
800 
801 	default:
802 		dl_err = DL_NOTSUPPORTED;
803 		goto failed;
804 	}
805 
806 	/*
807 	 * Adjust channel promiscuity.
808 	 */
809 	err = dls_promisc(dsp->ds_dc, dsp->ds_promisc);
810 	if (err != 0) {
811 		dsp->ds_promisc = promisc_saved;
812 		dl_err = DL_SYSERR;
813 		goto failed;
814 	}
815 
816 	rw_exit(&dsp->ds_lock);
817 	dlokack(q, mp, DL_PROMISCOFF_REQ);
818 	return (B_TRUE);
819 failed:
820 	rw_exit(&dsp->ds_lock);
821 	dlerrorack(q, mp, DL_PROMISCOFF_REQ, dl_err, (t_uscalar_t)err);
822 	return (B_FALSE);
823 }
824 
825 /*
826  * DL_ENABMULTI_REQ
827  */
828 static boolean_t
829 proto_enabmulti_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
830 {
831 	dl_enabmulti_req_t *dlp = (dl_enabmulti_req_t *)udlp;
832 	int		err = 0;
833 	t_uscalar_t	dl_err;
834 	queue_t		*q = dsp->ds_wq;
835 
836 	rw_enter(&dsp->ds_lock, RW_WRITER);
837 
838 	if (dsp->ds_dlstate == DL_UNATTACHED ||
839 	    DL_ACK_PENDING(dsp->ds_dlstate)) {
840 		dl_err = DL_OUTSTATE;
841 		goto failed;
842 	}
843 
844 	if (MBLKL(mp) < sizeof (dl_enabmulti_req_t) ||
845 	    !MBLKIN(mp, dlp->dl_addr_offset, dlp->dl_addr_length) ||
846 	    dlp->dl_addr_length != dsp->ds_mip->mi_addr_length) {
847 		dl_err = DL_BADPRIM;
848 		goto failed;
849 	}
850 
851 	if (dsp->ds_passivestate == DLD_UNINITIALIZED &&
852 	    !dls_active_set(dsp->ds_dc)) {
853 		dl_err = DL_SYSERR;
854 		err = EBUSY;
855 		goto failed;
856 	}
857 
858 	err = dls_multicst_add(dsp->ds_dc, mp->b_rptr + dlp->dl_addr_offset);
859 	if (err != 0) {
860 		switch (err) {
861 		case EINVAL:
862 			dl_err = DL_BADADDR;
863 			err = 0;
864 			break;
865 		case ENOSPC:
866 			dl_err = DL_TOOMANY;
867 			err = 0;
868 			break;
869 		default:
870 			dl_err = DL_SYSERR;
871 			break;
872 		}
873 		if (dsp->ds_passivestate == DLD_UNINITIALIZED)
874 			dls_active_clear(dsp->ds_dc);
875 
876 		goto failed;
877 	}
878 
879 	if (dsp->ds_passivestate == DLD_UNINITIALIZED)
880 		dsp->ds_passivestate = DLD_ACTIVE;
881 
882 	rw_exit(&dsp->ds_lock);
883 	dlokack(q, mp, DL_ENABMULTI_REQ);
884 	return (B_TRUE);
885 failed:
886 	rw_exit(&dsp->ds_lock);
887 	dlerrorack(q, mp, DL_ENABMULTI_REQ, dl_err, (t_uscalar_t)err);
888 	return (B_FALSE);
889 }
890 
891 /*
892  * DL_DISABMULTI_REQ
893  */
894 static boolean_t
895 proto_disabmulti_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
896 {
897 	dl_disabmulti_req_t *dlp = (dl_disabmulti_req_t *)udlp;
898 	int		err = 0;
899 	t_uscalar_t	dl_err;
900 	queue_t		*q = dsp->ds_wq;
901 
902 	rw_enter(&dsp->ds_lock, RW_READER);
903 
904 	if (dsp->ds_dlstate == DL_UNATTACHED ||
905 	    DL_ACK_PENDING(dsp->ds_dlstate)) {
906 		dl_err = DL_OUTSTATE;
907 		goto failed;
908 	}
909 
910 	if (MBLKL(mp) < sizeof (dl_disabmulti_req_t) ||
911 	    !MBLKIN(mp, dlp->dl_addr_offset, dlp->dl_addr_length) ||
912 	    dlp->dl_addr_length != dsp->ds_mip->mi_addr_length) {
913 		dl_err = DL_BADPRIM;
914 		goto failed;
915 	}
916 
917 	err = dls_multicst_remove(dsp->ds_dc, mp->b_rptr + dlp->dl_addr_offset);
918 	if (err != 0) {
919 	switch (err) {
920 		case EINVAL:
921 			dl_err = DL_BADADDR;
922 			err = 0;
923 			break;
924 
925 		case ENOENT:
926 			dl_err = DL_NOTENAB;
927 			err = 0;
928 			break;
929 
930 		default:
931 			dl_err = DL_SYSERR;
932 			break;
933 		}
934 		goto failed;
935 	}
936 
937 	rw_exit(&dsp->ds_lock);
938 	dlokack(q, mp, DL_DISABMULTI_REQ);
939 	return (B_TRUE);
940 failed:
941 	rw_exit(&dsp->ds_lock);
942 	dlerrorack(q, mp, DL_DISABMULTI_REQ, dl_err, (t_uscalar_t)err);
943 	return (B_FALSE);
944 }
945 
946 /*
947  * DL_PHYS_ADDR_REQ
948  */
949 static boolean_t
950 proto_physaddr_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
951 {
952 	dl_phys_addr_req_t *dlp = (dl_phys_addr_req_t *)udlp;
953 	queue_t		*q = dsp->ds_wq;
954 	t_uscalar_t	dl_err;
955 	char		*addr;
956 	uint_t		addr_length;
957 
958 	rw_enter(&dsp->ds_lock, RW_READER);
959 
960 	if (MBLKL(mp) < sizeof (dl_phys_addr_req_t)) {
961 		dl_err = DL_BADPRIM;
962 		goto failed;
963 	}
964 
965 	if (dsp->ds_dlstate == DL_UNATTACHED ||
966 	    DL_ACK_PENDING(dsp->ds_dlstate)) {
967 		dl_err = DL_OUTSTATE;
968 		goto failed;
969 	}
970 
971 	if (dlp->dl_addr_type != DL_CURR_PHYS_ADDR &&
972 	    dlp->dl_addr_type != DL_FACT_PHYS_ADDR) {
973 		dl_err = DL_UNSUPPORTED;
974 		goto failed;
975 	}
976 
977 	addr_length = dsp->ds_mip->mi_addr_length;
978 	addr = kmem_alloc(addr_length, KM_NOSLEEP);
979 	if (addr == NULL) {
980 		rw_exit(&dsp->ds_lock);
981 		merror(q, mp, ENOSR);
982 		return (B_FALSE);
983 	}
984 
985 	/*
986 	 * Copy out the address before we drop the lock; we don't
987 	 * want to call dlphysaddrack() while holding ds_lock.
988 	 */
989 	bcopy((dlp->dl_addr_type == DL_CURR_PHYS_ADDR) ?
990 	    dsp->ds_curr_addr : dsp->ds_fact_addr, addr, addr_length);
991 
992 	rw_exit(&dsp->ds_lock);
993 	dlphysaddrack(q, mp, addr, (t_uscalar_t)addr_length);
994 	kmem_free(addr, addr_length);
995 	return (B_TRUE);
996 failed:
997 	rw_exit(&dsp->ds_lock);
998 	dlerrorack(q, mp, DL_PHYS_ADDR_REQ, dl_err, 0);
999 	return (B_FALSE);
1000 }
1001 
1002 /*
1003  * DL_SET_PHYS_ADDR_REQ
1004  */
1005 static boolean_t
1006 proto_setphysaddr_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
1007 {
1008 	dl_set_phys_addr_req_t *dlp = (dl_set_phys_addr_req_t *)udlp;
1009 	int		err = 0;
1010 	t_uscalar_t	dl_err;
1011 	queue_t		*q = dsp->ds_wq;
1012 
1013 	rw_enter(&dsp->ds_lock, RW_WRITER);
1014 
1015 	if (dsp->ds_dlstate == DL_UNATTACHED ||
1016 	    DL_ACK_PENDING(dsp->ds_dlstate)) {
1017 		dl_err = DL_OUTSTATE;
1018 		goto failed;
1019 	}
1020 
1021 	if (MBLKL(mp) < sizeof (dl_set_phys_addr_req_t) ||
1022 	    !MBLKIN(mp, dlp->dl_addr_offset, dlp->dl_addr_length) ||
1023 	    dlp->dl_addr_length != dsp->ds_mip->mi_addr_length) {
1024 		dl_err = DL_BADPRIM;
1025 		goto failed;
1026 	}
1027 
1028 	if (dsp->ds_passivestate == DLD_UNINITIALIZED &&
1029 	    !dls_active_set(dsp->ds_dc)) {
1030 		dl_err = DL_SYSERR;
1031 		err = EBUSY;
1032 		goto failed;
1033 	}
1034 
1035 	err = mac_unicst_set(dsp->ds_mh, mp->b_rptr + dlp->dl_addr_offset);
1036 	if (err != 0) {
1037 		switch (err) {
1038 		case EINVAL:
1039 			dl_err = DL_BADADDR;
1040 			err = 0;
1041 			break;
1042 
1043 		default:
1044 			dl_err = DL_SYSERR;
1045 			break;
1046 		}
1047 		if (dsp->ds_passivestate == DLD_UNINITIALIZED)
1048 			dls_active_clear(dsp->ds_dc);
1049 
1050 		goto failed;
1051 	}
1052 	if (dsp->ds_passivestate == DLD_UNINITIALIZED)
1053 		dsp->ds_passivestate = DLD_ACTIVE;
1054 
1055 	rw_exit(&dsp->ds_lock);
1056 	dlokack(q, mp, DL_SET_PHYS_ADDR_REQ);
1057 	return (B_TRUE);
1058 failed:
1059 	rw_exit(&dsp->ds_lock);
1060 	dlerrorack(q, mp, DL_SET_PHYS_ADDR_REQ, dl_err, (t_uscalar_t)err);
1061 	return (B_FALSE);
1062 }
1063 
1064 /*
1065  * DL_UDQOS_REQ
1066  */
1067 static boolean_t
1068 proto_udqos_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
1069 {
1070 	dl_udqos_req_t *dlp = (dl_udqos_req_t *)udlp;
1071 	dl_qos_cl_sel1_t *selp;
1072 	int		off, len;
1073 	t_uscalar_t	dl_err;
1074 	queue_t		*q = dsp->ds_wq;
1075 
1076 	off = dlp->dl_qos_offset;
1077 	len = dlp->dl_qos_length;
1078 
1079 	rw_enter(&dsp->ds_lock, RW_WRITER);
1080 
1081 	if (MBLKL(mp) < sizeof (dl_udqos_req_t) || !MBLKIN(mp, off, len)) {
1082 		dl_err = DL_BADPRIM;
1083 		goto failed;
1084 	}
1085 
1086 	selp = (dl_qos_cl_sel1_t *)(mp->b_rptr + off);
1087 	if (selp->dl_qos_type != DL_QOS_CL_SEL1) {
1088 		dl_err = DL_BADQOSTYPE;
1089 		goto failed;
1090 	}
1091 
1092 	if (selp->dl_priority > (1 << VLAN_PRI_SIZE) - 1 ||
1093 	    selp->dl_priority < 0) {
1094 		dl_err = DL_BADQOSPARAM;
1095 		goto failed;
1096 	}
1097 
1098 	dsp->ds_pri = selp->dl_priority;
1099 
1100 	rw_exit(&dsp->ds_lock);
1101 	dlokack(q, mp, DL_UDQOS_REQ);
1102 	return (B_TRUE);
1103 failed:
1104 	rw_exit(&dsp->ds_lock);
1105 	dlerrorack(q, mp, DL_UDQOS_REQ, dl_err, 0);
1106 	return (B_FALSE);
1107 }
1108 
1109 static boolean_t
1110 check_ip_above(queue_t *q)
1111 {
1112 	queue_t		*next_q;
1113 	boolean_t	ret = B_TRUE;
1114 
1115 	claimstr(q);
1116 	next_q = q->q_next;
1117 	if (strcmp(next_q->q_qinfo->qi_minfo->mi_idname, "ip") != 0)
1118 		ret = B_FALSE;
1119 	releasestr(q);
1120 	return (ret);
1121 }
1122 
1123 /*
1124  * DL_CAPABILITY_REQ
1125  */
1126 /*ARGSUSED*/
1127 static boolean_t
1128 proto_capability_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
1129 {
1130 	dl_capability_req_t *dlp = (dl_capability_req_t *)udlp;
1131 	dl_capability_sub_t *sp;
1132 	size_t		size, len;
1133 	offset_t	off, end;
1134 	t_uscalar_t	dl_err;
1135 	queue_t		*q = dsp->ds_wq;
1136 	boolean_t	upgraded;
1137 
1138 	rw_enter(&dsp->ds_lock, RW_READER);
1139 
1140 	if (MBLKL(mp) < sizeof (dl_capability_req_t)) {
1141 		dl_err = DL_BADPRIM;
1142 		goto failed;
1143 	}
1144 
1145 	if (dsp->ds_dlstate == DL_UNATTACHED ||
1146 	    DL_ACK_PENDING(dsp->ds_dlstate)) {
1147 		dl_err = DL_OUTSTATE;
1148 		goto failed;
1149 	}
1150 
1151 	/*
1152 	 * This request is overloaded. If there are no requested capabilities
1153 	 * then we just want to acknowledge with all the capabilities we
1154 	 * support. Otherwise we enable the set of capabilities requested.
1155 	 */
1156 	if (dlp->dl_sub_length == 0) {
1157 		/* callee drops lock */
1158 		return (proto_capability_advertise(dsp, mp));
1159 	}
1160 
1161 	if (!MBLKIN(mp, dlp->dl_sub_offset, dlp->dl_sub_length)) {
1162 		dl_err = DL_BADPRIM;
1163 		goto failed;
1164 	}
1165 
1166 	dlp->dl_primitive = DL_CAPABILITY_ACK;
1167 
1168 	off = dlp->dl_sub_offset;
1169 	len = dlp->dl_sub_length;
1170 
1171 	/*
1172 	 * Walk the list of capabilities to be enabled.
1173 	 */
1174 	upgraded = B_FALSE;
1175 	for (end = off + len; off < end; ) {
1176 		sp = (dl_capability_sub_t *)(mp->b_rptr + off);
1177 		size = sizeof (dl_capability_sub_t) + sp->dl_length;
1178 
1179 		if (off + size > end ||
1180 		    !IS_P2ALIGNED(off, sizeof (uint32_t))) {
1181 			dl_err = DL_BADPRIM;
1182 			goto failed;
1183 		}
1184 
1185 		switch (sp->dl_cap) {
1186 		/*
1187 		 * TCP/IP checksum offload to hardware.
1188 		 */
1189 		case DL_CAPAB_HCKSUM: {
1190 			dl_capab_hcksum_t *hcksump;
1191 			dl_capab_hcksum_t hcksum;
1192 
1193 			hcksump = (dl_capab_hcksum_t *)&sp[1];
1194 			/*
1195 			 * Copy for alignment.
1196 			 */
1197 			bcopy(hcksump, &hcksum, sizeof (dl_capab_hcksum_t));
1198 			dlcapabsetqid(&(hcksum.hcksum_mid), dsp->ds_rq);
1199 			bcopy(&hcksum, hcksump, sizeof (dl_capab_hcksum_t));
1200 			break;
1201 		}
1202 
1203 		/*
1204 		 * IP polling interface.
1205 		 */
1206 		case DL_CAPAB_POLL: {
1207 			dl_capab_dls_t *pollp;
1208 			dl_capab_dls_t	poll;
1209 
1210 			pollp = (dl_capab_dls_t *)&sp[1];
1211 			/*
1212 			 * Copy for alignment.
1213 			 */
1214 			bcopy(pollp, &poll, sizeof (dl_capab_dls_t));
1215 
1216 			/*
1217 			 * We need to become writer before enabling and/or
1218 			 * disabling the polling interface.  If we couldn'
1219 			 * upgrade, check state again after re-acquiring the
1220 			 * lock to make sure we can proceed.
1221 			 */
1222 			if (!upgraded && !rw_tryupgrade(&dsp->ds_lock)) {
1223 				rw_exit(&dsp->ds_lock);
1224 				rw_enter(&dsp->ds_lock, RW_WRITER);
1225 
1226 				if (dsp->ds_dlstate == DL_UNATTACHED ||
1227 				    DL_ACK_PENDING(dsp->ds_dlstate)) {
1228 					dl_err = DL_OUTSTATE;
1229 					goto failed;
1230 				}
1231 			}
1232 			upgraded = B_TRUE;
1233 
1234 			switch (poll.dls_flags) {
1235 			default:
1236 				/*FALLTHRU*/
1237 			case POLL_DISABLE:
1238 				proto_poll_disable(dsp);
1239 				break;
1240 
1241 			case POLL_ENABLE:
1242 				ASSERT(!(dld_opt & DLD_OPT_NO_POLL));
1243 
1244 				/*
1245 				 * Make sure polling is disabled.
1246 				 */
1247 				proto_poll_disable(dsp);
1248 
1249 				/*
1250 				 * Now attempt enable it.
1251 				 */
1252 				if (check_ip_above(dsp->ds_rq) &&
1253 				    proto_poll_enable(dsp, &poll)) {
1254 					bzero(&poll, sizeof (dl_capab_dls_t));
1255 					poll.dls_flags = POLL_ENABLE;
1256 				}
1257 				break;
1258 			}
1259 
1260 			dlcapabsetqid(&(poll.dls_mid), dsp->ds_rq);
1261 			bcopy(&poll, pollp, sizeof (dl_capab_dls_t));
1262 			break;
1263 		}
1264 		case DL_CAPAB_SOFT_RING: {
1265 			dl_capab_dls_t *soft_ringp;
1266 			dl_capab_dls_t soft_ring;
1267 
1268 			soft_ringp = (dl_capab_dls_t *)&sp[1];
1269 			/*
1270 			 * Copy for alignment.
1271 			 */
1272 			bcopy(soft_ringp, &soft_ring,
1273 			    sizeof (dl_capab_dls_t));
1274 
1275 			/*
1276 			 * We need to become writer before enabling and/or
1277 			 * disabling the soft_ring interface.  If we couldn'
1278 			 * upgrade, check state again after re-acquiring the
1279 			 * lock to make sure we can proceed.
1280 			 */
1281 			if (!upgraded && !rw_tryupgrade(&dsp->ds_lock)) {
1282 				rw_exit(&dsp->ds_lock);
1283 				rw_enter(&dsp->ds_lock, RW_WRITER);
1284 
1285 				if (dsp->ds_dlstate == DL_UNATTACHED ||
1286 				    DL_ACK_PENDING(dsp->ds_dlstate)) {
1287 					dl_err = DL_OUTSTATE;
1288 					goto failed;
1289 				}
1290 			}
1291 			upgraded = B_TRUE;
1292 
1293 			switch (soft_ring.dls_flags) {
1294 			default:
1295 				/*FALLTHRU*/
1296 			case SOFT_RING_DISABLE:
1297 				proto_soft_ring_disable(dsp);
1298 				break;
1299 
1300 			case SOFT_RING_ENABLE:
1301 				/*
1302 				 * Make sure soft_ring is disabled.
1303 				 */
1304 				proto_soft_ring_disable(dsp);
1305 
1306 				/*
1307 				 * Now attempt enable it.
1308 				 */
1309 				if (check_ip_above(dsp->ds_rq) &&
1310 				    proto_soft_ring_enable(dsp, &soft_ring)) {
1311 					bzero(&soft_ring,
1312 					    sizeof (dl_capab_dls_t));
1313 					soft_ring.dls_flags =
1314 					    SOFT_RING_ENABLE;
1315 				} else {
1316 					bzero(&soft_ring,
1317 					    sizeof (dl_capab_dls_t));
1318 					soft_ring.dls_flags =
1319 					    SOFT_RING_DISABLE;
1320 				}
1321 				break;
1322 			}
1323 
1324 			dlcapabsetqid(&(soft_ring.dls_mid), dsp->ds_rq);
1325 			bcopy(&soft_ring, soft_ringp,
1326 			    sizeof (dl_capab_dls_t));
1327 			break;
1328 		}
1329 		default:
1330 			break;
1331 		}
1332 
1333 		off += size;
1334 	}
1335 	rw_exit(&dsp->ds_lock);
1336 	qreply(q, mp);
1337 	return (B_TRUE);
1338 failed:
1339 	rw_exit(&dsp->ds_lock);
1340 	dlerrorack(q, mp, DL_CAPABILITY_REQ, dl_err, 0);
1341 	return (B_FALSE);
1342 }
1343 
1344 /*
1345  * DL_NOTIFY_REQ
1346  */
1347 static boolean_t
1348 proto_notify_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
1349 {
1350 	dl_notify_req_t	*dlp = (dl_notify_req_t *)udlp;
1351 	t_uscalar_t	dl_err;
1352 	queue_t		*q = dsp->ds_wq;
1353 	uint_t		note =
1354 	    DL_NOTE_PROMISC_ON_PHYS |
1355 	    DL_NOTE_PROMISC_OFF_PHYS |
1356 	    DL_NOTE_PHYS_ADDR |
1357 	    DL_NOTE_LINK_UP |
1358 	    DL_NOTE_LINK_DOWN |
1359 	    DL_NOTE_CAPAB_RENEG |
1360 	    DL_NOTE_SPEED;
1361 
1362 	rw_enter(&dsp->ds_lock, RW_WRITER);
1363 
1364 	if (MBLKL(mp) < sizeof (dl_notify_req_t)) {
1365 		dl_err = DL_BADPRIM;
1366 		goto failed;
1367 	}
1368 
1369 	if (dsp->ds_dlstate == DL_UNATTACHED ||
1370 	    DL_ACK_PENDING(dsp->ds_dlstate)) {
1371 		dl_err = DL_OUTSTATE;
1372 		goto failed;
1373 	}
1374 
1375 	/*
1376 	 * Cache the notifications that are being enabled.
1377 	 */
1378 	dsp->ds_notifications = dlp->dl_notifications & note;
1379 	rw_exit(&dsp->ds_lock);
1380 	/*
1381 	 * The ACK carries all notifications regardless of which set is
1382 	 * being enabled.
1383 	 */
1384 	dlnotifyack(q, mp, note);
1385 
1386 	/*
1387 	 * Solicit DL_NOTIFY_IND messages for each enabled notification.
1388 	 */
1389 	rw_enter(&dsp->ds_lock, RW_READER);
1390 	if (dsp->ds_notifications != 0) {
1391 		rw_exit(&dsp->ds_lock);
1392 		dld_str_notify_ind(dsp);
1393 	} else {
1394 		rw_exit(&dsp->ds_lock);
1395 	}
1396 	return (B_TRUE);
1397 failed:
1398 	rw_exit(&dsp->ds_lock);
1399 	dlerrorack(q, mp, DL_NOTIFY_REQ, dl_err, 0);
1400 	return (B_FALSE);
1401 }
1402 
1403 /*
1404  * DL_UINTDATA_REQ
1405  */
1406 static boolean_t
1407 proto_unitdata_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
1408 {
1409 	queue_t			*q = dsp->ds_wq;
1410 	dl_unitdata_req_t	*dlp = (dl_unitdata_req_t *)udlp;
1411 	off_t			off;
1412 	size_t			len, size;
1413 	const uint8_t		*addr;
1414 	uint16_t		sap;
1415 	uint_t			addr_length;
1416 	mblk_t			*bp, *payload;
1417 	uint32_t		start, stuff, end, value, flags;
1418 	t_uscalar_t		dl_err;
1419 
1420 	rw_enter(&dsp->ds_lock, RW_READER);
1421 
1422 	if (MBLKL(mp) < sizeof (dl_unitdata_req_t) || mp->b_cont == NULL) {
1423 		dl_err = DL_BADPRIM;
1424 		goto failed;
1425 	}
1426 
1427 	if (dsp->ds_dlstate != DL_IDLE) {
1428 		dl_err = DL_OUTSTATE;
1429 		goto failed;
1430 	}
1431 	addr_length = dsp->ds_mip->mi_addr_length;
1432 
1433 	off = dlp->dl_dest_addr_offset;
1434 	len = dlp->dl_dest_addr_length;
1435 
1436 	if (!MBLKIN(mp, off, len) || !IS_P2ALIGNED(off, sizeof (uint16_t))) {
1437 		dl_err = DL_BADPRIM;
1438 		goto failed;
1439 	}
1440 
1441 	if (len != addr_length + sizeof (uint16_t)) {
1442 		dl_err = DL_BADADDR;
1443 		goto failed;
1444 	}
1445 
1446 	addr = mp->b_rptr + off;
1447 	sap = *(uint16_t *)(mp->b_rptr + off + addr_length);
1448 
1449 	/*
1450 	 * Check the length of the packet and the block types.
1451 	 */
1452 	size = 0;
1453 	payload = mp->b_cont;
1454 	for (bp = payload; bp != NULL; bp = bp->b_cont) {
1455 		if (DB_TYPE(bp) != M_DATA)
1456 			goto baddata;
1457 
1458 		size += MBLKL(bp);
1459 	}
1460 
1461 	if (size > dsp->ds_mip->mi_sdu_max)
1462 		goto baddata;
1463 
1464 	/*
1465 	 * Build a packet header.
1466 	 */
1467 	if ((bp = dls_header(dsp->ds_dc, addr, sap, dlp->dl_priority.dl_max,
1468 	    &payload)) == NULL) {
1469 		dl_err = DL_BADADDR;
1470 		goto failed;
1471 	}
1472 
1473 	/*
1474 	 * We no longer need the M_PROTO header, so free it.
1475 	 */
1476 	freeb(mp);
1477 
1478 	/*
1479 	 * Transfer the checksum offload information if it is present.
1480 	 */
1481 	hcksum_retrieve(payload, NULL, NULL, &start, &stuff, &end, &value,
1482 	    &flags);
1483 	(void) hcksum_assoc(bp, NULL, NULL, start, stuff, end, value, flags, 0);
1484 
1485 	/*
1486 	 * Link the payload onto the new header.
1487 	 */
1488 	ASSERT(bp->b_cont == NULL);
1489 	bp->b_cont = payload;
1490 
1491 	dld_tx_single(dsp, bp);
1492 	rw_exit(&dsp->ds_lock);
1493 	return (B_TRUE);
1494 failed:
1495 	rw_exit(&dsp->ds_lock);
1496 	dlerrorack(q, mp, DL_UNITDATA_REQ, dl_err, 0);
1497 	return (B_FALSE);
1498 
1499 baddata:
1500 	rw_exit(&dsp->ds_lock);
1501 	dluderrorind(q, mp, (void *)addr, len, DL_BADDATA, 0);
1502 	return (B_FALSE);
1503 }
1504 
1505 /*
1506  * DL_PASSIVE_REQ
1507  */
1508 /* ARGSUSED */
1509 static boolean_t
1510 proto_passive_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
1511 {
1512 	t_uscalar_t dl_err;
1513 
1514 	rw_enter(&dsp->ds_lock, RW_WRITER);
1515 	/*
1516 	 * If we've already become active by issuing an active primitive,
1517 	 * then it's too late to try to become passive.
1518 	 */
1519 	if (dsp->ds_passivestate == DLD_ACTIVE) {
1520 		dl_err = DL_OUTSTATE;
1521 		goto failed;
1522 	}
1523 
1524 	if (MBLKL(mp) < sizeof (dl_passive_req_t)) {
1525 		dl_err = DL_BADPRIM;
1526 		goto failed;
1527 	}
1528 
1529 	dsp->ds_passivestate = DLD_PASSIVE;
1530 	rw_exit(&dsp->ds_lock);
1531 	dlokack(dsp->ds_wq, mp, DL_PASSIVE_REQ);
1532 	return (B_TRUE);
1533 failed:
1534 	rw_exit(&dsp->ds_lock);
1535 	dlerrorack(dsp->ds_wq, mp, DL_PASSIVE_REQ, dl_err, 0);
1536 	return (B_FALSE);
1537 }
1538 
1539 
1540 /*
1541  * Catch-all handler.
1542  */
1543 static boolean_t
1544 proto_req(dld_str_t *dsp, union DL_primitives *dlp, mblk_t *mp)
1545 {
1546 	dlerrorack(dsp->ds_wq, mp, dlp->dl_primitive, DL_UNSUPPORTED, 0);
1547 	return (B_FALSE);
1548 }
1549 
1550 static void
1551 proto_poll_disable(dld_str_t *dsp)
1552 {
1553 	mac_handle_t	mh;
1554 
1555 	ASSERT(dsp->ds_pending_req != NULL || RW_WRITE_HELD(&dsp->ds_lock));
1556 
1557 	if (!dsp->ds_polling)
1558 		return;
1559 
1560 	/*
1561 	 * It should be impossible to enable raw mode if polling is turned on.
1562 	 */
1563 	ASSERT(dsp->ds_mode != DLD_RAW);
1564 
1565 	/*
1566 	 * Reset the resource_add callback.
1567 	 */
1568 	mh = dls_mac(dsp->ds_dc);
1569 	mac_resource_set(mh, NULL, NULL);
1570 	mac_resources(mh);
1571 
1572 	/*
1573 	 * Set receive function back to default.
1574 	 */
1575 	dls_rx_set(dsp->ds_dc, (dsp->ds_mode == DLD_FASTPATH) ?
1576 	    dld_str_rx_fastpath : dld_str_rx_unitdata, (void *)dsp);
1577 
1578 	/*
1579 	 * Note that polling is disabled.
1580 	 */
1581 	dsp->ds_polling = B_FALSE;
1582 }
1583 
1584 static boolean_t
1585 proto_poll_enable(dld_str_t *dsp, dl_capab_dls_t *pollp)
1586 {
1587 	mac_handle_t	mh;
1588 
1589 	ASSERT(RW_WRITE_HELD(&dsp->ds_lock));
1590 	ASSERT(!dsp->ds_polling);
1591 
1592 	/*
1593 	 * We cannot enable polling if raw mode
1594 	 * has been enabled.
1595 	 */
1596 	if (dsp->ds_mode == DLD_RAW)
1597 		return (B_FALSE);
1598 
1599 	mh = dls_mac(dsp->ds_dc);
1600 
1601 	/*
1602 	 * Register resources.
1603 	 */
1604 	mac_resource_set(mh, (mac_resource_add_t)pollp->dls_ring_add,
1605 	    (void *)pollp->dls_rx_handle);
1606 	mac_resources(mh);
1607 
1608 	/*
1609 	 * Set the receive function.
1610 	 */
1611 	dls_rx_set(dsp->ds_dc, (dls_rx_t)pollp->dls_rx,
1612 	    (void *)pollp->dls_rx_handle);
1613 
1614 	/*
1615 	 * Note that polling is enabled. This prevents further DLIOCHDRINFO
1616 	 * ioctls from overwriting the receive function pointer.
1617 	 */
1618 	dsp->ds_polling = B_TRUE;
1619 	return (B_TRUE);
1620 }
1621 
1622 static void
1623 proto_soft_ring_disable(dld_str_t *dsp)
1624 {
1625 	ASSERT(RW_WRITE_HELD(&dsp->ds_lock));
1626 
1627 	if (!dsp->ds_soft_ring)
1628 		return;
1629 
1630 	/*
1631 	 * It should be impossible to enable raw mode if soft_ring is turned on.
1632 	 */
1633 	ASSERT(dsp->ds_mode != DLD_RAW);
1634 	proto_change_soft_ring_fanout(dsp, SOFT_RING_NONE);
1635 	/*
1636 	 * Note that fanout is disabled.
1637 	 */
1638 	dsp->ds_soft_ring = B_FALSE;
1639 }
1640 
1641 static boolean_t
1642 proto_soft_ring_enable(dld_str_t *dsp, dl_capab_dls_t *soft_ringp)
1643 {
1644 	ASSERT(RW_WRITE_HELD(&dsp->ds_lock));
1645 	ASSERT(!dsp->ds_soft_ring);
1646 
1647 	/*
1648 	 * We cannot enable soft_ring if raw mode
1649 	 * has been enabled.
1650 	 */
1651 	if (dsp->ds_mode == DLD_RAW)
1652 		return (B_FALSE);
1653 
1654 	if (dls_soft_ring_enable(dsp->ds_dc, soft_ringp) == B_FALSE)
1655 		return (B_FALSE);
1656 
1657 	dsp->ds_soft_ring = B_TRUE;
1658 	return (B_TRUE);
1659 }
1660 
1661 static void
1662 proto_change_soft_ring_fanout(dld_str_t *dsp, int type)
1663 {
1664 	dls_rx_t	rx;
1665 
1666 	if (type == SOFT_RING_NONE) {
1667 		rx = (dsp->ds_mode == DLD_FASTPATH) ?
1668 			    dld_str_rx_fastpath : dld_str_rx_unitdata;
1669 	} else {
1670 		rx = (dls_rx_t)dls_ether_soft_ring_fanout;
1671 	}
1672 	dls_soft_ring_rx_set(dsp->ds_dc, rx, dsp, type);
1673 }
1674 
1675 /*
1676  * DL_CAPABILITY_ACK/DL_ERROR_ACK
1677  */
1678 static boolean_t
1679 proto_capability_advertise(dld_str_t *dsp, mblk_t *mp)
1680 {
1681 	dl_capability_ack_t	*dlap;
1682 	dl_capability_sub_t	*dlsp;
1683 	size_t			subsize;
1684 	dl_capab_dls_t		poll;
1685 	dl_capab_dls_t	soft_ring;
1686 	dl_capab_hcksum_t	hcksum;
1687 	dl_capab_zerocopy_t	zcopy;
1688 	uint8_t			*ptr;
1689 	boolean_t		cksum_cap;
1690 	boolean_t		poll_cap;
1691 	queue_t			*q = dsp->ds_wq;
1692 	mblk_t			*mp1;
1693 
1694 	ASSERT(RW_READ_HELD(&dsp->ds_lock));
1695 
1696 	/*
1697 	 * Initially assume no capabilities.
1698 	 */
1699 	subsize = 0;
1700 
1701 	/*
1702 	 * Advertize soft ring capability if
1703 	 * VLAN_ID_NONE for GLDv3 drivers
1704 	 */
1705 	if (dsp->ds_vid == VLAN_ID_NONE)
1706 		subsize += sizeof (dl_capability_sub_t) +
1707 				    sizeof (dl_capab_dls_t);
1708 
1709 	/*
1710 	 * Check if polling can be enabled on this interface.
1711 	 * If advertising DL_CAPAB_POLL has not been explicitly disabled
1712 	 * then reserve space for that capability.
1713 	 */
1714 	poll_cap = (mac_capab_get(dsp->ds_mh, MAC_CAPAB_POLL, NULL) &&
1715 	    !(dld_opt & DLD_OPT_NO_POLL) && (dsp->ds_vid == VLAN_ID_NONE));
1716 	if (poll_cap) {
1717 		subsize += sizeof (dl_capability_sub_t) +
1718 		    sizeof (dl_capab_dls_t);
1719 	}
1720 
1721 	/*
1722 	 * If the MAC interface supports checksum offload then reserve
1723 	 * space for the DL_CAPAB_HCKSUM capability.
1724 	 */
1725 	if (cksum_cap = mac_capab_get(dsp->ds_mh, MAC_CAPAB_HCKSUM,
1726 	    &hcksum.hcksum_txflags)) {
1727 		subsize += sizeof (dl_capability_sub_t) +
1728 		    sizeof (dl_capab_hcksum_t);
1729 	}
1730 
1731 	/*
1732 	 * If DL_CAPAB_ZEROCOPY has not be explicitly disabled then
1733 	 * reserve space for it.
1734 	 */
1735 	if (!(dld_opt & DLD_OPT_NO_ZEROCOPY)) {
1736 		subsize += sizeof (dl_capability_sub_t) +
1737 		    sizeof (dl_capab_zerocopy_t);
1738 	}
1739 
1740 	/*
1741 	 * If there are no capabilities to advertise or if we
1742 	 * can't allocate a response, send a DL_ERROR_ACK.
1743 	 */
1744 	if ((mp1 = reallocb(mp,
1745 	    sizeof (dl_capability_ack_t) + subsize, 0)) == NULL) {
1746 		rw_exit(&dsp->ds_lock);
1747 		dlerrorack(q, mp, DL_CAPABILITY_REQ, DL_NOTSUPPORTED, 0);
1748 		return (B_FALSE);
1749 	}
1750 
1751 	mp = mp1;
1752 	DB_TYPE(mp) = M_PROTO;
1753 	mp->b_wptr = mp->b_rptr + sizeof (dl_capability_ack_t) + subsize;
1754 	bzero(mp->b_rptr, MBLKL(mp));
1755 	dlap = (dl_capability_ack_t *)mp->b_rptr;
1756 	dlap->dl_primitive = DL_CAPABILITY_ACK;
1757 	dlap->dl_sub_offset = sizeof (dl_capability_ack_t);
1758 	dlap->dl_sub_length = subsize;
1759 	ptr = (uint8_t *)&dlap[1];
1760 
1761 	/*
1762 	 * IP polling interface.
1763 	 */
1764 	if (poll_cap) {
1765 		/*
1766 		 * Attempt to disable just in case this is a re-negotiation;
1767 		 * we need to become writer before doing so.
1768 		 */
1769 		if (!rw_tryupgrade(&dsp->ds_lock)) {
1770 			rw_exit(&dsp->ds_lock);
1771 			rw_enter(&dsp->ds_lock, RW_WRITER);
1772 		}
1773 
1774 		/*
1775 		 * Check if polling state has changed after we re-acquired
1776 		 * the lock above, so that we don't mis-advertise it.
1777 		 */
1778 		poll_cap = !(dld_opt & DLD_OPT_NO_POLL) &&
1779 		    (dsp->ds_vid == VLAN_ID_NONE);
1780 
1781 		if (!poll_cap) {
1782 			int poll_capab_size;
1783 
1784 			rw_downgrade(&dsp->ds_lock);
1785 
1786 			poll_capab_size = sizeof (dl_capability_sub_t) +
1787 			    sizeof (dl_capab_dls_t);
1788 
1789 			mp->b_wptr -= poll_capab_size;
1790 			subsize -= poll_capab_size;
1791 			dlap->dl_sub_length = subsize;
1792 		} else {
1793 			proto_poll_disable(dsp);
1794 
1795 			rw_downgrade(&dsp->ds_lock);
1796 
1797 			dlsp = (dl_capability_sub_t *)ptr;
1798 
1799 			dlsp->dl_cap = DL_CAPAB_POLL;
1800 			dlsp->dl_length = sizeof (dl_capab_dls_t);
1801 			ptr += sizeof (dl_capability_sub_t);
1802 
1803 			bzero(&poll, sizeof (dl_capab_dls_t));
1804 			poll.dls_version = POLL_VERSION_1;
1805 			poll.dls_flags = POLL_CAPABLE;
1806 			poll.dls_tx_handle = (uintptr_t)dsp;
1807 			poll.dls_tx = (uintptr_t)str_mdata_fastpath_put;
1808 
1809 			dlcapabsetqid(&(poll.dls_mid), dsp->ds_rq);
1810 			bcopy(&poll, ptr, sizeof (dl_capab_dls_t));
1811 			ptr += sizeof (dl_capab_dls_t);
1812 		}
1813 	}
1814 
1815 	ASSERT(RW_READ_HELD(&dsp->ds_lock));
1816 
1817 	if (dsp->ds_vid == VLAN_ID_NONE) {
1818 		dlsp = (dl_capability_sub_t *)ptr;
1819 
1820 		dlsp->dl_cap = DL_CAPAB_SOFT_RING;
1821 		dlsp->dl_length = sizeof (dl_capab_dls_t);
1822 		ptr += sizeof (dl_capability_sub_t);
1823 
1824 		bzero(&soft_ring, sizeof (dl_capab_dls_t));
1825 		soft_ring.dls_version = SOFT_RING_VERSION_1;
1826 		soft_ring.dls_flags = SOFT_RING_CAPABLE;
1827 		soft_ring.dls_tx_handle = (uintptr_t)dsp;
1828 		soft_ring.dls_tx = (uintptr_t)str_mdata_fastpath_put;
1829 		soft_ring.dls_ring_change_status =
1830 		    (uintptr_t)proto_change_soft_ring_fanout;
1831 		soft_ring.dls_ring_bind = (uintptr_t)soft_ring_bind;
1832 		soft_ring.dls_ring_unbind = (uintptr_t)soft_ring_unbind;
1833 
1834 		dlcapabsetqid(&(soft_ring.dls_mid), dsp->ds_rq);
1835 		bcopy(&soft_ring, ptr, sizeof (dl_capab_dls_t));
1836 		ptr += sizeof (dl_capab_dls_t);
1837 	}
1838 
1839 	/*
1840 	 * TCP/IP checksum offload.
1841 	 */
1842 	if (cksum_cap) {
1843 		dlsp = (dl_capability_sub_t *)ptr;
1844 
1845 		dlsp->dl_cap = DL_CAPAB_HCKSUM;
1846 		dlsp->dl_length = sizeof (dl_capab_hcksum_t);
1847 		ptr += sizeof (dl_capability_sub_t);
1848 
1849 		hcksum.hcksum_version = HCKSUM_VERSION_1;
1850 		dlcapabsetqid(&(hcksum.hcksum_mid), dsp->ds_rq);
1851 		bcopy(&hcksum, ptr, sizeof (dl_capab_hcksum_t));
1852 		ptr += sizeof (dl_capab_hcksum_t);
1853 	}
1854 
1855 	/*
1856 	 * Zero copy
1857 	 */
1858 	if (!(dld_opt & DLD_OPT_NO_ZEROCOPY)) {
1859 		dlsp = (dl_capability_sub_t *)ptr;
1860 
1861 		dlsp->dl_cap = DL_CAPAB_ZEROCOPY;
1862 		dlsp->dl_length = sizeof (dl_capab_zerocopy_t);
1863 		ptr += sizeof (dl_capability_sub_t);
1864 
1865 		bzero(&zcopy, sizeof (dl_capab_zerocopy_t));
1866 		zcopy.zerocopy_version = ZEROCOPY_VERSION_1;
1867 		zcopy.zerocopy_flags = DL_CAPAB_VMSAFE_MEM;
1868 
1869 		dlcapabsetqid(&(zcopy.zerocopy_mid), dsp->ds_rq);
1870 		bcopy(&zcopy, ptr, sizeof (dl_capab_zerocopy_t));
1871 		ptr += sizeof (dl_capab_zerocopy_t);
1872 	}
1873 
1874 	ASSERT(ptr == mp->b_rptr + sizeof (dl_capability_ack_t) + subsize);
1875 
1876 	rw_exit(&dsp->ds_lock);
1877 	qreply(q, mp);
1878 	return (B_TRUE);
1879 }
1880