xref: /titanic_50/usr/src/uts/common/io/dld/dld_proto.c (revision fb3fb4f3d76d55b64440afd0af72775dfad3bd1d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * Data-Link Driver
31  */
32 
33 #include <sys/types.h>
34 #include <sys/debug.h>
35 #include <sys/sysmacros.h>
36 #include <sys/stream.h>
37 #include <sys/ddi.h>
38 #include <sys/sunddi.h>
39 #include <sys/strsun.h>
40 #include <sys/cpuvar.h>
41 #include <sys/dlpi.h>
42 #include <netinet/in.h>
43 #include <sys/sdt.h>
44 #include <sys/strsubr.h>
45 #include <sys/vlan.h>
46 #include <sys/mac.h>
47 #include <sys/dls.h>
48 #include <sys/dld.h>
49 #include <sys/dld_impl.h>
50 #include <sys/dls_soft_ring.h>
51 
52 typedef boolean_t proto_reqfunc_t(dld_str_t *, union DL_primitives *, mblk_t *);
53 
54 static proto_reqfunc_t proto_info_req, proto_attach_req, proto_detach_req,
55     proto_bind_req, proto_unbind_req, proto_promiscon_req, proto_promiscoff_req,
56     proto_enabmulti_req, proto_disabmulti_req, proto_physaddr_req,
57     proto_setphysaddr_req, proto_udqos_req, proto_req, proto_capability_req,
58     proto_notify_req, proto_unitdata_req, proto_passive_req;
59 
60 static void proto_poll_disable(dld_str_t *);
61 static boolean_t proto_poll_enable(dld_str_t *, dl_capab_dls_t *);
62 static boolean_t proto_capability_advertise(dld_str_t *, mblk_t *);
63 
64 static task_func_t proto_process_unbind_req, proto_process_detach_req;
65 
66 static void proto_soft_ring_disable(dld_str_t *);
67 static boolean_t proto_soft_ring_enable(dld_str_t *, dl_capab_dls_t *);
68 static boolean_t proto_capability_advertise(dld_str_t *, mblk_t *);
69 static void proto_change_soft_ring_fanout(dld_str_t *, int);
70 
71 #define	DL_ACK_PENDING(state) \
72 	((state) == DL_ATTACH_PENDING || \
73 	(state) == DL_DETACH_PENDING || \
74 	(state) == DL_BIND_PENDING || \
75 	(state) == DL_UNBIND_PENDING)
76 
77 /*
78  * Process a DLPI protocol message.
79  * The primitives DL_BIND_REQ, DL_ENABMULTI_REQ, DL_PROMISCON_REQ,
80  * DL_SET_PHYS_ADDR_REQ put the data link below our dld_str_t into an
81  * 'active' state. The primitive DL_PASSIVE_REQ marks our dld_str_t
82  * as 'passive' and forbids it from being subsequently made 'active'
83  * by the above primitives.
84  */
85 void
86 dld_proto(dld_str_t *dsp, mblk_t *mp)
87 {
88 	union DL_primitives	*udlp;
89 	t_uscalar_t		prim;
90 
91 	if (MBLKL(mp) < sizeof (t_uscalar_t)) {
92 		freemsg(mp);
93 		return;
94 	}
95 
96 	udlp = (union DL_primitives *)mp->b_rptr;
97 	prim = udlp->dl_primitive;
98 
99 	switch (prim) {
100 	case DL_INFO_REQ:
101 		(void) proto_info_req(dsp, udlp, mp);
102 		break;
103 	case DL_BIND_REQ:
104 		(void) proto_bind_req(dsp, udlp, mp);
105 		break;
106 	case DL_UNBIND_REQ:
107 		(void) proto_unbind_req(dsp, udlp, mp);
108 		break;
109 	case DL_UNITDATA_REQ:
110 		(void) proto_unitdata_req(dsp, udlp, mp);
111 		break;
112 	case DL_UDQOS_REQ:
113 		(void) proto_udqos_req(dsp, udlp, mp);
114 		break;
115 	case DL_ATTACH_REQ:
116 		(void) proto_attach_req(dsp, udlp, mp);
117 		break;
118 	case DL_DETACH_REQ:
119 		(void) proto_detach_req(dsp, udlp, mp);
120 		break;
121 	case DL_ENABMULTI_REQ:
122 		(void) proto_enabmulti_req(dsp, udlp, mp);
123 		break;
124 	case DL_DISABMULTI_REQ:
125 		(void) proto_disabmulti_req(dsp, udlp, mp);
126 		break;
127 	case DL_PROMISCON_REQ:
128 		(void) proto_promiscon_req(dsp, udlp, mp);
129 		break;
130 	case DL_PROMISCOFF_REQ:
131 		(void) proto_promiscoff_req(dsp, udlp, mp);
132 		break;
133 	case DL_PHYS_ADDR_REQ:
134 		(void) proto_physaddr_req(dsp, udlp, mp);
135 		break;
136 	case DL_SET_PHYS_ADDR_REQ:
137 		(void) proto_setphysaddr_req(dsp, udlp, mp);
138 		break;
139 	case DL_NOTIFY_REQ:
140 		(void) proto_notify_req(dsp, udlp, mp);
141 		break;
142 	case DL_CAPABILITY_REQ:
143 		(void) proto_capability_req(dsp, udlp, mp);
144 		break;
145 	case DL_PASSIVE_REQ:
146 		(void) proto_passive_req(dsp, udlp, mp);
147 		break;
148 	default:
149 		(void) proto_req(dsp, udlp, mp);
150 		break;
151 	}
152 }
153 
154 /*
155  * Finish any pending operations.
156  * Requests that need to be processed asynchronously will be handled
157  * by a separate thread. After this function returns, other threads
158  * will be allowed to enter dld; they will not be able to do anything
159  * until ds_dlstate transitions to a non-pending state.
160  */
161 void
162 dld_finish_pending_ops(dld_str_t *dsp)
163 {
164 	task_func_t *op = NULL;
165 
166 	ASSERT(MUTEX_HELD(&dsp->ds_thr_lock));
167 	ASSERT(dsp->ds_thr == 0);
168 
169 	op = dsp->ds_pending_op;
170 	dsp->ds_pending_op = NULL;
171 	mutex_exit(&dsp->ds_thr_lock);
172 	if (op != NULL)
173 		(void) taskq_dispatch(system_taskq, op, dsp, TQ_SLEEP);
174 }
175 
176 #define	NEG(x)	-(x)
177 
178 typedef struct dl_info_ack_wrapper {
179 	dl_info_ack_t		dl_info;
180 	uint8_t			dl_addr[MAXADDRLEN + sizeof (uint16_t)];
181 	uint8_t			dl_brdcst_addr[MAXADDRLEN];
182 	dl_qos_cl_range1_t	dl_qos_range1;
183 	dl_qos_cl_sel1_t	dl_qos_sel1;
184 } dl_info_ack_wrapper_t;
185 
186 /*
187  * DL_INFO_REQ
188  */
189 /*ARGSUSED*/
190 static boolean_t
191 proto_info_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
192 {
193 	dl_info_ack_wrapper_t	*dlwp;
194 	dl_info_ack_t		*dlp;
195 	dl_qos_cl_sel1_t	*selp;
196 	dl_qos_cl_range1_t	*rangep;
197 	uint8_t			*addr;
198 	uint8_t			*brdcst_addr;
199 	uint_t			addr_length;
200 	uint_t			sap_length;
201 	mac_info_t		minfo;
202 	mac_info_t		*minfop;
203 	queue_t			*q = dsp->ds_wq;
204 
205 	/*
206 	 * Swap the request message for one large enough to contain the
207 	 * wrapper structure defined above.
208 	 */
209 	if ((mp = mexchange(q, mp, sizeof (dl_info_ack_wrapper_t),
210 	    M_PCPROTO, 0)) == NULL)
211 		return (B_FALSE);
212 
213 	rw_enter(&dsp->ds_lock, RW_READER);
214 
215 	bzero(mp->b_rptr, sizeof (dl_info_ack_wrapper_t));
216 	dlwp = (dl_info_ack_wrapper_t *)mp->b_rptr;
217 
218 	dlp = &(dlwp->dl_info);
219 	ASSERT(dlp == (dl_info_ack_t *)mp->b_rptr);
220 
221 	dlp->dl_primitive = DL_INFO_ACK;
222 
223 	/*
224 	 * Set up the sub-structure pointers.
225 	 */
226 	addr = dlwp->dl_addr;
227 	brdcst_addr = dlwp->dl_brdcst_addr;
228 	rangep = &(dlwp->dl_qos_range1);
229 	selp = &(dlwp->dl_qos_sel1);
230 
231 	/*
232 	 * This driver supports only version 2 connectionless DLPI provider
233 	 * nodes.
234 	 */
235 	dlp->dl_service_mode = DL_CLDLS;
236 	dlp->dl_version = DL_VERSION_2;
237 
238 	/*
239 	 * Set the style of the provider
240 	 */
241 	dlp->dl_provider_style = dsp->ds_style;
242 	ASSERT(dlp->dl_provider_style == DL_STYLE1 ||
243 	    dlp->dl_provider_style == DL_STYLE2);
244 
245 	/*
246 	 * Set the current DLPI state.
247 	 */
248 	dlp->dl_current_state = dsp->ds_dlstate;
249 
250 	/*
251 	 * Gratuitously set the media type. This is to deal with modules
252 	 * that assume the media type is known prior to DL_ATTACH_REQ
253 	 * being completed.
254 	 */
255 	dlp->dl_mac_type = DL_ETHER;
256 
257 	/*
258 	 * If the stream is not at least attached we try to retrieve the
259 	 * mac_info using mac_info_get()
260 	 */
261 	if (dsp->ds_dlstate == DL_UNATTACHED ||
262 	    dsp->ds_dlstate == DL_ATTACH_PENDING ||
263 	    dsp->ds_dlstate == DL_DETACH_PENDING) {
264 		if (!mac_info_get(ddi_major_to_name(dsp->ds_major), &minfo)) {
265 			/*
266 			 * Cannot find mac_info. giving up.
267 			 */
268 			goto done;
269 		}
270 		minfop = &minfo;
271 	} else {
272 		minfop = (mac_info_t *)dsp->ds_mip;
273 	}
274 
275 	/*
276 	 * Set the media type (properly this time).
277 	 */
278 	dlp->dl_mac_type = minfop->mi_media;
279 
280 	/*
281 	 * Set the DLSAP length. We only support 16 bit values and they
282 	 * appear after the MAC address portion of DLSAP addresses.
283 	 */
284 	sap_length = sizeof (uint16_t);
285 	dlp->dl_sap_length = NEG(sap_length);
286 
287 	/*
288 	 * Set the minimum and maximum payload sizes.
289 	 */
290 	dlp->dl_min_sdu = minfop->mi_sdu_min;
291 	dlp->dl_max_sdu = minfop->mi_sdu_max;
292 
293 	addr_length = minfop->mi_addr_length;
294 	ASSERT(addr_length != 0);
295 
296 	/*
297 	 * Copy in the media broadcast address.
298 	 */
299 	dlp->dl_brdcst_addr_offset = (uintptr_t)brdcst_addr - (uintptr_t)dlp;
300 	bcopy(minfop->mi_brdcst_addr, brdcst_addr, addr_length);
301 	dlp->dl_brdcst_addr_length = addr_length;
302 
303 	/*
304 	 * We only support QoS information for VLAN interfaces.
305 	 */
306 	if (dsp->ds_vid != VLAN_ID_NONE) {
307 		dlp->dl_qos_range_offset = (uintptr_t)rangep - (uintptr_t)dlp;
308 		dlp->dl_qos_range_length = sizeof (dl_qos_cl_range1_t);
309 
310 		rangep->dl_qos_type = DL_QOS_CL_RANGE1;
311 		rangep->dl_trans_delay.dl_target_value = DL_UNKNOWN;
312 		rangep->dl_trans_delay.dl_accept_value = DL_UNKNOWN;
313 		rangep->dl_protection.dl_min = DL_UNKNOWN;
314 		rangep->dl_protection.dl_max = DL_UNKNOWN;
315 		rangep->dl_residual_error = DL_UNKNOWN;
316 
317 		/*
318 		 * Specify the supported range of priorities.
319 		 */
320 		rangep->dl_priority.dl_min = 0;
321 		rangep->dl_priority.dl_max = (1 << VLAN_PRI_SIZE) - 1;
322 
323 		dlp->dl_qos_offset = (uintptr_t)selp - (uintptr_t)dlp;
324 		dlp->dl_qos_length = sizeof (dl_qos_cl_sel1_t);
325 
326 		selp->dl_qos_type = DL_QOS_CL_SEL1;
327 		selp->dl_trans_delay = DL_UNKNOWN;
328 		selp->dl_protection = DL_UNKNOWN;
329 		selp->dl_residual_error = DL_UNKNOWN;
330 
331 		/*
332 		 * Specify the current priority (which can be changed by
333 		 * the DL_UDQOS_REQ primitive).
334 		 */
335 		selp->dl_priority = dsp->ds_pri;
336 	} else {
337 		/*
338 		 * Shorten the buffer to lose the unused QoS information
339 		 * structures.
340 		 */
341 		mp->b_wptr = (uint8_t *)rangep;
342 	}
343 
344 	dlp->dl_addr_length = addr_length + sizeof (uint16_t);
345 	if (dsp->ds_dlstate == DL_IDLE) {
346 		/*
347 		 * The stream is bound. Therefore we can formulate a valid
348 		 * DLSAP address.
349 		 */
350 		dlp->dl_addr_offset = (uintptr_t)addr - (uintptr_t)dlp;
351 		bcopy(dsp->ds_curr_addr, addr, addr_length);
352 		*(uint16_t *)(addr + addr_length) = dsp->ds_sap;
353 	}
354 
355 done:
356 	ASSERT(IMPLY(dlp->dl_qos_offset != 0, dlp->dl_qos_length != 0));
357 	ASSERT(IMPLY(dlp->dl_qos_range_offset != 0,
358 	    dlp->dl_qos_range_length != 0));
359 	ASSERT(IMPLY(dlp->dl_addr_offset != 0, dlp->dl_addr_length != 0));
360 	ASSERT(IMPLY(dlp->dl_brdcst_addr_offset != 0,
361 	    dlp->dl_brdcst_addr_length != 0));
362 
363 	rw_exit(&dsp->ds_lock);
364 
365 	qreply(q, mp);
366 	return (B_TRUE);
367 }
368 
369 /*
370  * DL_ATTACH_REQ
371  */
372 static boolean_t
373 proto_attach_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
374 {
375 	dl_attach_req_t	*dlp = (dl_attach_req_t *)udlp;
376 	int		err = 0;
377 	t_uscalar_t	dl_err;
378 	queue_t		*q = dsp->ds_wq;
379 
380 	rw_enter(&dsp->ds_lock, RW_WRITER);
381 
382 	if (MBLKL(mp) < sizeof (dl_attach_req_t) ||
383 	    dlp->dl_ppa < 0 || dsp->ds_style == DL_STYLE1) {
384 		dl_err = DL_BADPRIM;
385 		goto failed;
386 	}
387 
388 	if (dsp->ds_dlstate != DL_UNATTACHED) {
389 		dl_err = DL_OUTSTATE;
390 		goto failed;
391 	}
392 
393 	dsp->ds_dlstate = DL_ATTACH_PENDING;
394 
395 	err = dld_str_attach(dsp, dlp->dl_ppa);
396 	if (err != 0) {
397 		switch (err) {
398 		case ENOENT:
399 			dl_err = DL_BADPPA;
400 			err = 0;
401 			break;
402 		default:
403 			dl_err = DL_SYSERR;
404 			break;
405 		}
406 		dsp->ds_dlstate = DL_UNATTACHED;
407 		goto failed;
408 	}
409 	ASSERT(dsp->ds_dlstate == DL_UNBOUND);
410 	rw_exit(&dsp->ds_lock);
411 
412 	dlokack(q, mp, DL_ATTACH_REQ);
413 	return (B_TRUE);
414 failed:
415 	rw_exit(&dsp->ds_lock);
416 	dlerrorack(q, mp, DL_ATTACH_REQ, dl_err, (t_uscalar_t)err);
417 	return (B_FALSE);
418 }
419 
420 /*
421  * DL_DETACH_REQ
422  */
423 static void
424 proto_process_detach_req(void *arg)
425 {
426 	dld_str_t	*dsp = arg;
427 	mblk_t		*mp;
428 
429 	/*
430 	 * We don't need to hold locks because no other thread
431 	 * would manipulate dsp while it is in a PENDING state.
432 	 */
433 	ASSERT(dsp->ds_pending_req != NULL);
434 	ASSERT(dsp->ds_dlstate == DL_DETACH_PENDING);
435 
436 	mp = dsp->ds_pending_req;
437 	dsp->ds_pending_req = NULL;
438 	dld_str_detach(dsp);
439 	dlokack(dsp->ds_wq, mp, DL_DETACH_REQ);
440 
441 	DLD_WAKEUP(dsp);
442 }
443 
444 /*ARGSUSED*/
445 static boolean_t
446 proto_detach_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
447 {
448 	queue_t		*q = dsp->ds_wq;
449 	t_uscalar_t	dl_err;
450 
451 	rw_enter(&dsp->ds_lock, RW_WRITER);
452 
453 	if (MBLKL(mp) < sizeof (dl_detach_req_t)) {
454 		dl_err = DL_BADPRIM;
455 		goto failed;
456 	}
457 
458 	if (dsp->ds_dlstate != DL_UNBOUND) {
459 		dl_err = DL_OUTSTATE;
460 		goto failed;
461 	}
462 
463 	if (dsp->ds_style == DL_STYLE1) {
464 		dl_err = DL_BADPRIM;
465 		goto failed;
466 	}
467 
468 	dsp->ds_dlstate = DL_DETACH_PENDING;
469 
470 	/*
471 	 * Complete the detach when the driver is single-threaded.
472 	 */
473 	mutex_enter(&dsp->ds_thr_lock);
474 	ASSERT(dsp->ds_pending_req == NULL);
475 	dsp->ds_pending_req = mp;
476 	dsp->ds_pending_op = proto_process_detach_req;
477 	dsp->ds_pending_cnt++;
478 	mutex_exit(&dsp->ds_thr_lock);
479 	rw_exit(&dsp->ds_lock);
480 
481 	return (B_TRUE);
482 failed:
483 	rw_exit(&dsp->ds_lock);
484 	dlerrorack(q, mp, DL_DETACH_REQ, dl_err, 0);
485 	return (B_FALSE);
486 }
487 
488 /*
489  * DL_BIND_REQ
490  */
491 static boolean_t
492 proto_bind_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
493 {
494 	dl_bind_req_t	*dlp = (dl_bind_req_t *)udlp;
495 	int		err = 0;
496 	uint8_t		addr[MAXADDRLEN];
497 	uint_t		addr_length;
498 	t_uscalar_t	dl_err;
499 	t_scalar_t	sap;
500 	queue_t		*q = dsp->ds_wq;
501 
502 	if (MBLKL(mp) < sizeof (dl_bind_req_t)) {
503 		dl_err = DL_BADPRIM;
504 		goto failed;
505 	}
506 
507 	if (dlp->dl_xidtest_flg != 0) {
508 		dl_err = DL_NOAUTO;
509 		goto failed;
510 	}
511 
512 	if (dlp->dl_service_mode != DL_CLDLS) {
513 		dl_err = DL_UNSUPPORTED;
514 		goto failed;
515 	}
516 
517 	rw_enter(&dsp->ds_lock, RW_WRITER);
518 
519 	if (dsp->ds_dlstate != DL_UNBOUND) {
520 		dl_err = DL_OUTSTATE;
521 		goto failed;
522 	}
523 
524 	if (dsp->ds_passivestate == DLD_UNINITIALIZED &&
525 	    !dls_active_set(dsp->ds_dc)) {
526 		dl_err = DL_SYSERR;
527 		err = EBUSY;
528 		goto failed;
529 	}
530 
531 	dsp->ds_dlstate = DL_BIND_PENDING;
532 	/*
533 	 * Set the receive callback.
534 	 */
535 	dls_rx_set(dsp->ds_dc, (dsp->ds_mode == DLD_RAW) ?
536 	    dld_str_rx_raw : dld_str_rx_unitdata, dsp);
537 
538 	/*
539 	 * Bind the channel such that it can receive packets.
540 	 */
541 	sap = dsp->ds_sap = dlp->dl_sap;
542 	err = dls_bind(dsp->ds_dc, dlp->dl_sap);
543 	if (err != 0) {
544 		switch (err) {
545 		case EINVAL:
546 			dl_err = DL_BADADDR;
547 			err = 0;
548 			break;
549 		default:
550 			dl_err = DL_SYSERR;
551 			break;
552 		}
553 		dsp->ds_dlstate = DL_UNBOUND;
554 		if (dsp->ds_passivestate == DLD_UNINITIALIZED)
555 			dls_active_clear(dsp->ds_dc);
556 
557 		goto failed;
558 	}
559 
560 	/*
561 	 * Copy in MAC address.
562 	 */
563 	addr_length = dsp->ds_mip->mi_addr_length;
564 	bcopy(dsp->ds_curr_addr, addr, addr_length);
565 
566 	/*
567 	 * Copy in the DLSAP.
568 	 */
569 	*(uint16_t *)(addr + addr_length) = dsp->ds_sap;
570 	addr_length += sizeof (uint16_t);
571 
572 	dsp->ds_dlstate = DL_IDLE;
573 	if (dsp->ds_passivestate == DLD_UNINITIALIZED)
574 		dsp->ds_passivestate = DLD_ACTIVE;
575 
576 	rw_exit(&dsp->ds_lock);
577 
578 	dlbindack(q, mp, sap, (void *)addr, addr_length, 0, 0);
579 	return (B_TRUE);
580 failed:
581 	rw_exit(&dsp->ds_lock);
582 	dlerrorack(q, mp, DL_BIND_REQ, dl_err, (t_uscalar_t)err);
583 	return (B_FALSE);
584 }
585 
586 /*
587  * DL_UNBIND_REQ
588  */
589 /*ARGSUSED*/
590 static void
591 proto_process_unbind_req(void *arg)
592 {
593 	dld_str_t	*dsp = arg;
594 	mblk_t		*mp;
595 
596 	/*
597 	 * We don't need to hold locks because no other thread
598 	 * would manipulate dsp while it is in a PENDING state.
599 	 */
600 	ASSERT(dsp->ds_pending_req != NULL);
601 	ASSERT(dsp->ds_dlstate == DL_UNBIND_PENDING);
602 
603 	/*
604 	 * Flush any remaining packets scheduled for transmission.
605 	 */
606 	dld_tx_flush(dsp);
607 
608 	/*
609 	 * Unbind the channel to stop packets being received.
610 	 */
611 	dls_unbind(dsp->ds_dc);
612 
613 	/*
614 	 * Disable polling mode, if it is enabled.
615 	 */
616 	proto_poll_disable(dsp);
617 
618 	/*
619 	 * Clear the receive callback.
620 	 */
621 	dls_rx_set(dsp->ds_dc, NULL, NULL);
622 
623 	/*
624 	 * Set the mode back to the default (unitdata).
625 	 */
626 	dsp->ds_mode = DLD_UNITDATA;
627 
628 	/*
629 	 * If soft rings were enabled, the workers
630 	 * should be quiesced. We cannot check for
631 	 * ds_soft_ring flag because
632 	 * proto_soft_ring_disable() called from
633 	 * proto_capability_req() would have reset it.
634 	 */
635 	if (dls_soft_ring_workers(dsp->ds_dc))
636 		dls_soft_ring_disable(dsp->ds_dc);
637 
638 	mp = dsp->ds_pending_req;
639 	dsp->ds_pending_req = NULL;
640 	dsp->ds_dlstate = DL_UNBOUND;
641 	dlokack(dsp->ds_wq, mp, DL_UNBIND_REQ);
642 
643 	DLD_WAKEUP(dsp);
644 }
645 
646 /*ARGSUSED*/
647 static boolean_t
648 proto_unbind_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
649 {
650 	queue_t		*q = dsp->ds_wq;
651 	t_uscalar_t	dl_err;
652 
653 	rw_enter(&dsp->ds_lock, RW_WRITER);
654 
655 	if (MBLKL(mp) < sizeof (dl_unbind_req_t)) {
656 		dl_err = DL_BADPRIM;
657 		goto failed;
658 	}
659 
660 	if (dsp->ds_dlstate != DL_IDLE) {
661 		dl_err = DL_OUTSTATE;
662 		goto failed;
663 	}
664 
665 	dsp->ds_dlstate = DL_UNBIND_PENDING;
666 
667 	mutex_enter(&dsp->ds_thr_lock);
668 	ASSERT(dsp->ds_pending_req == NULL);
669 	dsp->ds_pending_req = mp;
670 	dsp->ds_pending_op = proto_process_unbind_req;
671 	dsp->ds_pending_cnt++;
672 	mutex_exit(&dsp->ds_thr_lock);
673 	rw_exit(&dsp->ds_lock);
674 
675 	return (B_TRUE);
676 failed:
677 	rw_exit(&dsp->ds_lock);
678 	dlerrorack(q, mp, DL_UNBIND_REQ, dl_err, 0);
679 	return (B_FALSE);
680 }
681 
682 /*
683  * DL_PROMISCON_REQ
684  */
685 static boolean_t
686 proto_promiscon_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
687 {
688 	dl_promiscon_req_t *dlp = (dl_promiscon_req_t *)udlp;
689 	int		err = 0;
690 	t_uscalar_t	dl_err;
691 	uint32_t	promisc_saved;
692 	queue_t		*q = dsp->ds_wq;
693 
694 	if (MBLKL(mp) < sizeof (dl_promiscon_req_t)) {
695 		dl_err = DL_BADPRIM;
696 		goto failed;
697 	}
698 
699 	rw_enter(&dsp->ds_lock, RW_WRITER);
700 
701 	if (dsp->ds_dlstate == DL_UNATTACHED ||
702 	    DL_ACK_PENDING(dsp->ds_dlstate)) {
703 		dl_err = DL_OUTSTATE;
704 		goto failed;
705 	}
706 
707 	promisc_saved = dsp->ds_promisc;
708 	switch (dlp->dl_level) {
709 	case DL_PROMISC_SAP:
710 		dsp->ds_promisc |= DLS_PROMISC_SAP;
711 		break;
712 
713 	case DL_PROMISC_MULTI:
714 		dsp->ds_promisc |= DLS_PROMISC_MULTI;
715 		break;
716 
717 	case DL_PROMISC_PHYS:
718 		dsp->ds_promisc |= DLS_PROMISC_PHYS;
719 		break;
720 
721 	default:
722 		dl_err = DL_NOTSUPPORTED;
723 		goto failed;
724 	}
725 
726 	if (dsp->ds_passivestate == DLD_UNINITIALIZED &&
727 	    !dls_active_set(dsp->ds_dc)) {
728 		dsp->ds_promisc = promisc_saved;
729 		dl_err = DL_SYSERR;
730 		err = EBUSY;
731 		goto failed;
732 	}
733 
734 	/*
735 	 * Adjust channel promiscuity.
736 	 */
737 	err = dls_promisc(dsp->ds_dc, dsp->ds_promisc);
738 	if (err != 0) {
739 		dl_err = DL_SYSERR;
740 		dsp->ds_promisc = promisc_saved;
741 		if (dsp->ds_passivestate == DLD_UNINITIALIZED)
742 			dls_active_clear(dsp->ds_dc);
743 
744 		goto failed;
745 	}
746 
747 	if (dsp->ds_passivestate == DLD_UNINITIALIZED)
748 		dsp->ds_passivestate = DLD_ACTIVE;
749 
750 	rw_exit(&dsp->ds_lock);
751 	dlokack(q, mp, DL_PROMISCON_REQ);
752 	return (B_TRUE);
753 failed:
754 	rw_exit(&dsp->ds_lock);
755 	dlerrorack(q, mp, DL_PROMISCON_REQ, dl_err, (t_uscalar_t)err);
756 	return (B_FALSE);
757 }
758 
759 /*
760  * DL_PROMISCOFF_REQ
761  */
762 static boolean_t
763 proto_promiscoff_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
764 {
765 	dl_promiscoff_req_t *dlp = (dl_promiscoff_req_t *)udlp;
766 	int		err = 0;
767 	t_uscalar_t	dl_err;
768 	uint32_t	promisc_saved;
769 	queue_t		*q = dsp->ds_wq;
770 
771 
772 	if (MBLKL(mp) < sizeof (dl_promiscoff_req_t)) {
773 		dl_err = DL_BADPRIM;
774 		goto failed;
775 	}
776 
777 	rw_enter(&dsp->ds_lock, RW_WRITER);
778 
779 	if (dsp->ds_dlstate == DL_UNATTACHED ||
780 	    DL_ACK_PENDING(dsp->ds_dlstate)) {
781 		dl_err = DL_OUTSTATE;
782 		goto failed;
783 	}
784 
785 	promisc_saved = dsp->ds_promisc;
786 	switch (dlp->dl_level) {
787 	case DL_PROMISC_SAP:
788 		if (!(dsp->ds_promisc & DLS_PROMISC_SAP)) {
789 			dl_err = DL_NOTENAB;
790 			goto failed;
791 		}
792 		dsp->ds_promisc &= ~DLS_PROMISC_SAP;
793 		break;
794 
795 	case DL_PROMISC_MULTI:
796 		if (!(dsp->ds_promisc & DLS_PROMISC_MULTI)) {
797 			dl_err = DL_NOTENAB;
798 			goto failed;
799 		}
800 		dsp->ds_promisc &= ~DLS_PROMISC_MULTI;
801 		break;
802 
803 	case DL_PROMISC_PHYS:
804 		if (!(dsp->ds_promisc & DLS_PROMISC_PHYS)) {
805 			dl_err = DL_NOTENAB;
806 			goto failed;
807 		}
808 		dsp->ds_promisc &= ~DLS_PROMISC_PHYS;
809 		break;
810 
811 	default:
812 		dl_err = DL_NOTSUPPORTED;
813 		goto failed;
814 	}
815 
816 	/*
817 	 * Adjust channel promiscuity.
818 	 */
819 	err = dls_promisc(dsp->ds_dc, dsp->ds_promisc);
820 	if (err != 0) {
821 		dsp->ds_promisc = promisc_saved;
822 		dl_err = DL_SYSERR;
823 		goto failed;
824 	}
825 
826 	rw_exit(&dsp->ds_lock);
827 	dlokack(q, mp, DL_PROMISCOFF_REQ);
828 	return (B_TRUE);
829 failed:
830 	rw_exit(&dsp->ds_lock);
831 	dlerrorack(q, mp, DL_PROMISCOFF_REQ, dl_err, (t_uscalar_t)err);
832 	return (B_FALSE);
833 }
834 
835 /*
836  * DL_ENABMULTI_REQ
837  */
838 static boolean_t
839 proto_enabmulti_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
840 {
841 	dl_enabmulti_req_t *dlp = (dl_enabmulti_req_t *)udlp;
842 	int		err = 0;
843 	t_uscalar_t	dl_err;
844 	queue_t		*q = dsp->ds_wq;
845 
846 	rw_enter(&dsp->ds_lock, RW_WRITER);
847 
848 	if (dsp->ds_dlstate == DL_UNATTACHED ||
849 	    DL_ACK_PENDING(dsp->ds_dlstate)) {
850 		dl_err = DL_OUTSTATE;
851 		goto failed;
852 	}
853 
854 	if (MBLKL(mp) < sizeof (dl_enabmulti_req_t) ||
855 	    !MBLKIN(mp, dlp->dl_addr_offset, dlp->dl_addr_length) ||
856 	    dlp->dl_addr_length != dsp->ds_mip->mi_addr_length) {
857 		dl_err = DL_BADPRIM;
858 		goto failed;
859 	}
860 
861 	if (dsp->ds_passivestate == DLD_UNINITIALIZED &&
862 	    !dls_active_set(dsp->ds_dc)) {
863 		dl_err = DL_SYSERR;
864 		err = EBUSY;
865 		goto failed;
866 	}
867 
868 	err = dls_multicst_add(dsp->ds_dc, mp->b_rptr + dlp->dl_addr_offset);
869 	if (err != 0) {
870 		switch (err) {
871 		case EINVAL:
872 			dl_err = DL_BADADDR;
873 			err = 0;
874 			break;
875 		case ENOSPC:
876 			dl_err = DL_TOOMANY;
877 			err = 0;
878 			break;
879 		default:
880 			dl_err = DL_SYSERR;
881 			break;
882 		}
883 		if (dsp->ds_passivestate == DLD_UNINITIALIZED)
884 			dls_active_clear(dsp->ds_dc);
885 
886 		goto failed;
887 	}
888 
889 	if (dsp->ds_passivestate == DLD_UNINITIALIZED)
890 		dsp->ds_passivestate = DLD_ACTIVE;
891 
892 	rw_exit(&dsp->ds_lock);
893 	dlokack(q, mp, DL_ENABMULTI_REQ);
894 	return (B_TRUE);
895 failed:
896 	rw_exit(&dsp->ds_lock);
897 	dlerrorack(q, mp, DL_ENABMULTI_REQ, dl_err, (t_uscalar_t)err);
898 	return (B_FALSE);
899 }
900 
901 /*
902  * DL_DISABMULTI_REQ
903  */
904 static boolean_t
905 proto_disabmulti_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
906 {
907 	dl_disabmulti_req_t *dlp = (dl_disabmulti_req_t *)udlp;
908 	int		err = 0;
909 	t_uscalar_t	dl_err;
910 	queue_t		*q = dsp->ds_wq;
911 
912 	rw_enter(&dsp->ds_lock, RW_READER);
913 
914 	if (dsp->ds_dlstate == DL_UNATTACHED ||
915 	    DL_ACK_PENDING(dsp->ds_dlstate)) {
916 		dl_err = DL_OUTSTATE;
917 		goto failed;
918 	}
919 
920 	if (MBLKL(mp) < sizeof (dl_disabmulti_req_t) ||
921 	    !MBLKIN(mp, dlp->dl_addr_offset, dlp->dl_addr_length) ||
922 	    dlp->dl_addr_length != dsp->ds_mip->mi_addr_length) {
923 		dl_err = DL_BADPRIM;
924 		goto failed;
925 	}
926 
927 	err = dls_multicst_remove(dsp->ds_dc, mp->b_rptr + dlp->dl_addr_offset);
928 	if (err != 0) {
929 	switch (err) {
930 		case EINVAL:
931 			dl_err = DL_BADADDR;
932 			err = 0;
933 			break;
934 
935 		case ENOENT:
936 			dl_err = DL_NOTENAB;
937 			err = 0;
938 			break;
939 
940 		default:
941 			dl_err = DL_SYSERR;
942 			break;
943 		}
944 		goto failed;
945 	}
946 
947 	rw_exit(&dsp->ds_lock);
948 	dlokack(q, mp, DL_DISABMULTI_REQ);
949 	return (B_TRUE);
950 failed:
951 	rw_exit(&dsp->ds_lock);
952 	dlerrorack(q, mp, DL_DISABMULTI_REQ, dl_err, (t_uscalar_t)err);
953 	return (B_FALSE);
954 }
955 
956 /*
957  * DL_PHYS_ADDR_REQ
958  */
959 static boolean_t
960 proto_physaddr_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
961 {
962 	dl_phys_addr_req_t *dlp = (dl_phys_addr_req_t *)udlp;
963 	queue_t		*q = dsp->ds_wq;
964 	t_uscalar_t	dl_err;
965 	char		*addr;
966 	uint_t		addr_length;
967 
968 	rw_enter(&dsp->ds_lock, RW_READER);
969 
970 	if (MBLKL(mp) < sizeof (dl_phys_addr_req_t)) {
971 		dl_err = DL_BADPRIM;
972 		goto failed;
973 	}
974 
975 	if (dsp->ds_dlstate == DL_UNATTACHED ||
976 	    DL_ACK_PENDING(dsp->ds_dlstate)) {
977 		dl_err = DL_OUTSTATE;
978 		goto failed;
979 	}
980 
981 	if (dlp->dl_addr_type != DL_CURR_PHYS_ADDR &&
982 	    dlp->dl_addr_type != DL_FACT_PHYS_ADDR) {
983 		dl_err = DL_UNSUPPORTED;
984 		goto failed;
985 	}
986 
987 	addr_length = dsp->ds_mip->mi_addr_length;
988 	addr = kmem_alloc(addr_length, KM_NOSLEEP);
989 	if (addr == NULL) {
990 		rw_exit(&dsp->ds_lock);
991 		merror(q, mp, ENOSR);
992 		return (B_FALSE);
993 	}
994 
995 	/*
996 	 * Copy out the address before we drop the lock; we don't
997 	 * want to call dlphysaddrack() while holding ds_lock.
998 	 */
999 	bcopy((dlp->dl_addr_type == DL_CURR_PHYS_ADDR) ?
1000 	    dsp->ds_curr_addr : dsp->ds_fact_addr, addr, addr_length);
1001 
1002 	rw_exit(&dsp->ds_lock);
1003 	dlphysaddrack(q, mp, addr, (t_uscalar_t)addr_length);
1004 	kmem_free(addr, addr_length);
1005 	return (B_TRUE);
1006 failed:
1007 	rw_exit(&dsp->ds_lock);
1008 	dlerrorack(q, mp, DL_PHYS_ADDR_REQ, dl_err, 0);
1009 	return (B_FALSE);
1010 }
1011 
1012 /*
1013  * DL_SET_PHYS_ADDR_REQ
1014  */
1015 static boolean_t
1016 proto_setphysaddr_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
1017 {
1018 	dl_set_phys_addr_req_t *dlp = (dl_set_phys_addr_req_t *)udlp;
1019 	int		err = 0;
1020 	t_uscalar_t	dl_err;
1021 	queue_t		*q = dsp->ds_wq;
1022 
1023 	rw_enter(&dsp->ds_lock, RW_WRITER);
1024 
1025 	if (dsp->ds_dlstate == DL_UNATTACHED ||
1026 	    DL_ACK_PENDING(dsp->ds_dlstate)) {
1027 		dl_err = DL_OUTSTATE;
1028 		goto failed;
1029 	}
1030 
1031 	if (MBLKL(mp) < sizeof (dl_set_phys_addr_req_t) ||
1032 	    !MBLKIN(mp, dlp->dl_addr_offset, dlp->dl_addr_length) ||
1033 	    dlp->dl_addr_length != dsp->ds_mip->mi_addr_length) {
1034 		dl_err = DL_BADPRIM;
1035 		goto failed;
1036 	}
1037 
1038 	if (dsp->ds_passivestate == DLD_UNINITIALIZED &&
1039 	    !dls_active_set(dsp->ds_dc)) {
1040 		dl_err = DL_SYSERR;
1041 		err = EBUSY;
1042 		goto failed;
1043 	}
1044 
1045 	err = mac_unicst_set(dsp->ds_mh, mp->b_rptr + dlp->dl_addr_offset);
1046 	if (err != 0) {
1047 		switch (err) {
1048 		case EINVAL:
1049 			dl_err = DL_BADADDR;
1050 			err = 0;
1051 			break;
1052 
1053 		default:
1054 			dl_err = DL_SYSERR;
1055 			break;
1056 		}
1057 		if (dsp->ds_passivestate == DLD_UNINITIALIZED)
1058 			dls_active_clear(dsp->ds_dc);
1059 
1060 		goto failed;
1061 	}
1062 	if (dsp->ds_passivestate == DLD_UNINITIALIZED)
1063 		dsp->ds_passivestate = DLD_ACTIVE;
1064 
1065 	rw_exit(&dsp->ds_lock);
1066 	dlokack(q, mp, DL_SET_PHYS_ADDR_REQ);
1067 	return (B_TRUE);
1068 failed:
1069 	rw_exit(&dsp->ds_lock);
1070 	dlerrorack(q, mp, DL_SET_PHYS_ADDR_REQ, dl_err, (t_uscalar_t)err);
1071 	return (B_FALSE);
1072 }
1073 
1074 /*
1075  * DL_UDQOS_REQ
1076  */
1077 static boolean_t
1078 proto_udqos_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
1079 {
1080 	dl_udqos_req_t *dlp = (dl_udqos_req_t *)udlp;
1081 	dl_qos_cl_sel1_t *selp;
1082 	int		off, len;
1083 	t_uscalar_t	dl_err;
1084 	queue_t		*q = dsp->ds_wq;
1085 
1086 	off = dlp->dl_qos_offset;
1087 	len = dlp->dl_qos_length;
1088 
1089 	if (MBLKL(mp) < sizeof (dl_udqos_req_t) || !MBLKIN(mp, off, len)) {
1090 		dl_err = DL_BADPRIM;
1091 		goto failed;
1092 	}
1093 
1094 	selp = (dl_qos_cl_sel1_t *)(mp->b_rptr + off);
1095 	if (selp->dl_qos_type != DL_QOS_CL_SEL1) {
1096 		dl_err = DL_BADQOSTYPE;
1097 		goto failed;
1098 	}
1099 
1100 	rw_enter(&dsp->ds_lock, RW_WRITER);
1101 
1102 	if (dsp->ds_vid == VLAN_ID_NONE ||
1103 	    selp->dl_priority > (1 << VLAN_PRI_SIZE) - 1 ||
1104 	    selp->dl_priority < 0) {
1105 		dl_err = DL_BADQOSPARAM;
1106 		goto failed;
1107 	}
1108 
1109 	dsp->ds_pri = selp->dl_priority;
1110 
1111 	rw_exit(&dsp->ds_lock);
1112 	dlokack(q, mp, DL_UDQOS_REQ);
1113 	return (B_TRUE);
1114 failed:
1115 	rw_exit(&dsp->ds_lock);
1116 	dlerrorack(q, mp, DL_UDQOS_REQ, dl_err, 0);
1117 	return (B_FALSE);
1118 }
1119 
1120 static boolean_t
1121 check_ip_above(queue_t *q)
1122 {
1123 	queue_t		*next_q;
1124 	boolean_t	ret = B_TRUE;
1125 
1126 	claimstr(q);
1127 	next_q = q->q_next;
1128 	if (strcmp(next_q->q_qinfo->qi_minfo->mi_idname, "ip") != 0)
1129 		ret = B_FALSE;
1130 	releasestr(q);
1131 	return (ret);
1132 }
1133 
1134 /*
1135  * DL_CAPABILITY_REQ
1136  */
1137 /*ARGSUSED*/
1138 static boolean_t
1139 proto_capability_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
1140 {
1141 	dl_capability_req_t *dlp = (dl_capability_req_t *)udlp;
1142 	dl_capability_sub_t *sp;
1143 	size_t		size, len;
1144 	offset_t	off, end;
1145 	t_uscalar_t	dl_err;
1146 	queue_t		*q = dsp->ds_wq;
1147 	boolean_t	upgraded;
1148 
1149 	rw_enter(&dsp->ds_lock, RW_READER);
1150 
1151 	if (MBLKL(mp) < sizeof (dl_capability_req_t)) {
1152 		dl_err = DL_BADPRIM;
1153 		goto failed;
1154 	}
1155 
1156 	if (dsp->ds_dlstate == DL_UNATTACHED ||
1157 	    DL_ACK_PENDING(dsp->ds_dlstate)) {
1158 		dl_err = DL_OUTSTATE;
1159 		goto failed;
1160 	}
1161 
1162 	/*
1163 	 * This request is overloaded. If there are no requested capabilities
1164 	 * then we just want to acknowledge with all the capabilities we
1165 	 * support. Otherwise we enable the set of capabilities requested.
1166 	 */
1167 	if (dlp->dl_sub_length == 0) {
1168 		/* callee drops lock */
1169 		return (proto_capability_advertise(dsp, mp));
1170 	}
1171 
1172 	if (!MBLKIN(mp, dlp->dl_sub_offset, dlp->dl_sub_length)) {
1173 		dl_err = DL_BADPRIM;
1174 		goto failed;
1175 	}
1176 
1177 	dlp->dl_primitive = DL_CAPABILITY_ACK;
1178 
1179 	off = dlp->dl_sub_offset;
1180 	len = dlp->dl_sub_length;
1181 
1182 	/*
1183 	 * Walk the list of capabilities to be enabled.
1184 	 */
1185 	upgraded = B_FALSE;
1186 	for (end = off + len; off < end; ) {
1187 		sp = (dl_capability_sub_t *)(mp->b_rptr + off);
1188 		size = sizeof (dl_capability_sub_t) + sp->dl_length;
1189 
1190 		if (off + size > end ||
1191 		    !IS_P2ALIGNED(off, sizeof (uint32_t))) {
1192 			dl_err = DL_BADPRIM;
1193 			goto failed;
1194 		}
1195 
1196 		switch (sp->dl_cap) {
1197 		/*
1198 		 * TCP/IP checksum offload to hardware.
1199 		 */
1200 		case DL_CAPAB_HCKSUM: {
1201 			dl_capab_hcksum_t *hcksump;
1202 			dl_capab_hcksum_t hcksum;
1203 
1204 			ASSERT(dsp->ds_mip->mi_cksum != 0);
1205 
1206 			hcksump = (dl_capab_hcksum_t *)&sp[1];
1207 			/*
1208 			 * Copy for alignment.
1209 			 */
1210 			bcopy(hcksump, &hcksum, sizeof (dl_capab_hcksum_t));
1211 			dlcapabsetqid(&(hcksum.hcksum_mid), dsp->ds_rq);
1212 			bcopy(&hcksum, hcksump, sizeof (dl_capab_hcksum_t));
1213 			break;
1214 		}
1215 
1216 		/*
1217 		 * IP polling interface.
1218 		 */
1219 		case DL_CAPAB_POLL: {
1220 			dl_capab_dls_t *pollp;
1221 			dl_capab_dls_t	poll;
1222 
1223 			pollp = (dl_capab_dls_t *)&sp[1];
1224 			/*
1225 			 * Copy for alignment.
1226 			 */
1227 			bcopy(pollp, &poll, sizeof (dl_capab_dls_t));
1228 
1229 			/*
1230 			 * We need to become writer before enabling and/or
1231 			 * disabling the polling interface.  If we couldn'
1232 			 * upgrade, check state again after re-acquiring the
1233 			 * lock to make sure we can proceed.
1234 			 */
1235 			if (!upgraded && !rw_tryupgrade(&dsp->ds_lock)) {
1236 				rw_exit(&dsp->ds_lock);
1237 				rw_enter(&dsp->ds_lock, RW_WRITER);
1238 
1239 				if (dsp->ds_dlstate == DL_UNATTACHED ||
1240 				    DL_ACK_PENDING(dsp->ds_dlstate)) {
1241 					dl_err = DL_OUTSTATE;
1242 					goto failed;
1243 				}
1244 			}
1245 			upgraded = B_TRUE;
1246 
1247 			switch (poll.dls_flags) {
1248 			default:
1249 				/*FALLTHRU*/
1250 			case POLL_DISABLE:
1251 				proto_poll_disable(dsp);
1252 				break;
1253 
1254 			case POLL_ENABLE:
1255 				ASSERT(!(dld_opt & DLD_OPT_NO_POLL));
1256 
1257 				/*
1258 				 * Make sure polling is disabled.
1259 				 */
1260 				proto_poll_disable(dsp);
1261 
1262 				/*
1263 				 * Now attempt enable it.
1264 				 */
1265 				if (check_ip_above(dsp->ds_rq) &&
1266 				    proto_poll_enable(dsp, &poll)) {
1267 					bzero(&poll, sizeof (dl_capab_dls_t));
1268 					poll.dls_flags = POLL_ENABLE;
1269 				}
1270 				break;
1271 			}
1272 
1273 			dlcapabsetqid(&(poll.dls_mid), dsp->ds_rq);
1274 			bcopy(&poll, pollp, sizeof (dl_capab_dls_t));
1275 			break;
1276 		}
1277 		case DL_CAPAB_SOFT_RING: {
1278 			dl_capab_dls_t *soft_ringp;
1279 			dl_capab_dls_t soft_ring;
1280 
1281 			soft_ringp = (dl_capab_dls_t *)&sp[1];
1282 			/*
1283 			 * Copy for alignment.
1284 			 */
1285 			bcopy(soft_ringp, &soft_ring,
1286 			    sizeof (dl_capab_dls_t));
1287 
1288 			/*
1289 			 * We need to become writer before enabling and/or
1290 			 * disabling the soft_ring interface.  If we couldn'
1291 			 * upgrade, check state again after re-acquiring the
1292 			 * lock to make sure we can proceed.
1293 			 */
1294 			if (!upgraded && !rw_tryupgrade(&dsp->ds_lock)) {
1295 				rw_exit(&dsp->ds_lock);
1296 				rw_enter(&dsp->ds_lock, RW_WRITER);
1297 
1298 				if (dsp->ds_dlstate == DL_UNATTACHED ||
1299 				    DL_ACK_PENDING(dsp->ds_dlstate)) {
1300 					dl_err = DL_OUTSTATE;
1301 					goto failed;
1302 				}
1303 			}
1304 			upgraded = B_TRUE;
1305 
1306 			switch (soft_ring.dls_flags) {
1307 			default:
1308 				/*FALLTHRU*/
1309 			case SOFT_RING_DISABLE:
1310 				proto_soft_ring_disable(dsp);
1311 				break;
1312 
1313 			case SOFT_RING_ENABLE:
1314 				/*
1315 				 * Make sure soft_ring is disabled.
1316 				 */
1317 				proto_soft_ring_disable(dsp);
1318 
1319 				/*
1320 				 * Now attempt enable it.
1321 				 */
1322 				if (check_ip_above(dsp->ds_rq) &&
1323 				    proto_soft_ring_enable(dsp, &soft_ring)) {
1324 					bzero(&soft_ring,
1325 					    sizeof (dl_capab_dls_t));
1326 					soft_ring.dls_flags =
1327 					    SOFT_RING_ENABLE;
1328 				} else {
1329 					bzero(&soft_ring,
1330 					    sizeof (dl_capab_dls_t));
1331 					soft_ring.dls_flags =
1332 					    SOFT_RING_DISABLE;
1333 				}
1334 				break;
1335 			}
1336 
1337 			dlcapabsetqid(&(soft_ring.dls_mid), dsp->ds_rq);
1338 			bcopy(&soft_ring, soft_ringp,
1339 			    sizeof (dl_capab_dls_t));
1340 			break;
1341 		}
1342 		default:
1343 			break;
1344 		}
1345 
1346 		off += size;
1347 	}
1348 	rw_exit(&dsp->ds_lock);
1349 	qreply(q, mp);
1350 	return (B_TRUE);
1351 failed:
1352 	rw_exit(&dsp->ds_lock);
1353 	dlerrorack(q, mp, DL_CAPABILITY_REQ, dl_err, 0);
1354 	return (B_FALSE);
1355 }
1356 
1357 /*
1358  * DL_NOTIFY_REQ
1359  */
1360 static boolean_t
1361 proto_notify_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
1362 {
1363 	dl_notify_req_t	*dlp = (dl_notify_req_t *)udlp;
1364 	t_uscalar_t	dl_err;
1365 	queue_t		*q = dsp->ds_wq;
1366 	uint_t		note =
1367 	    DL_NOTE_PROMISC_ON_PHYS |
1368 	    DL_NOTE_PROMISC_OFF_PHYS |
1369 	    DL_NOTE_PHYS_ADDR |
1370 	    DL_NOTE_LINK_UP |
1371 	    DL_NOTE_LINK_DOWN |
1372 	    DL_NOTE_CAPAB_RENEG;
1373 
1374 	if (MBLKL(mp) < sizeof (dl_notify_req_t)) {
1375 		dl_err = DL_BADPRIM;
1376 		goto failed;
1377 	}
1378 
1379 	rw_enter(&dsp->ds_lock, RW_WRITER);
1380 	if (dsp->ds_dlstate == DL_UNATTACHED ||
1381 	    DL_ACK_PENDING(dsp->ds_dlstate)) {
1382 		dl_err = DL_OUTSTATE;
1383 		goto failed;
1384 	}
1385 
1386 	if (dsp->ds_mip->mi_stat[MAC_STAT_IFSPEED])
1387 		note |= DL_NOTE_SPEED;
1388 
1389 	/*
1390 	 * Cache the notifications that are being enabled.
1391 	 */
1392 	dsp->ds_notifications = dlp->dl_notifications & note;
1393 	rw_exit(&dsp->ds_lock);
1394 	/*
1395 	 * The ACK carries all notifications regardless of which set is
1396 	 * being enabled.
1397 	 */
1398 	dlnotifyack(q, mp, note);
1399 
1400 	/*
1401 	 * Solicit DL_NOTIFY_IND messages for each enabled notification.
1402 	 */
1403 	rw_enter(&dsp->ds_lock, RW_READER);
1404 	if (dsp->ds_notifications != 0) {
1405 		rw_exit(&dsp->ds_lock);
1406 		dld_str_notify_ind(dsp);
1407 	} else {
1408 		rw_exit(&dsp->ds_lock);
1409 	}
1410 	return (B_TRUE);
1411 failed:
1412 	rw_exit(&dsp->ds_lock);
1413 	dlerrorack(q, mp, DL_NOTIFY_REQ, dl_err, 0);
1414 	return (B_FALSE);
1415 }
1416 
1417 /*
1418  * DL_UINTDATA_REQ
1419  */
1420 static boolean_t
1421 proto_unitdata_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
1422 {
1423 	queue_t			*q = dsp->ds_wq;
1424 	dl_unitdata_req_t	*dlp = (dl_unitdata_req_t *)udlp;
1425 	off_t			off;
1426 	size_t			len, size;
1427 	const uint8_t		*addr;
1428 	uint16_t		sap;
1429 	uint_t			addr_length;
1430 	mblk_t			*bp, *cont;
1431 	uint32_t		start, stuff, end, value, flags;
1432 	t_uscalar_t		dl_err;
1433 
1434 	rw_enter(&dsp->ds_lock, RW_READER);
1435 
1436 	if (MBLKL(mp) < sizeof (dl_unitdata_req_t) || mp->b_cont == NULL) {
1437 		dl_err = DL_BADPRIM;
1438 		goto failed;
1439 	}
1440 
1441 	if (dsp->ds_dlstate != DL_IDLE) {
1442 		dl_err = DL_OUTSTATE;
1443 		goto failed;
1444 	}
1445 	addr_length = dsp->ds_mip->mi_addr_length;
1446 
1447 	off = dlp->dl_dest_addr_offset;
1448 	len = dlp->dl_dest_addr_length;
1449 
1450 	if (!MBLKIN(mp, off, len) || !IS_P2ALIGNED(off, sizeof (uint16_t))) {
1451 		dl_err = DL_BADPRIM;
1452 		goto failed;
1453 	}
1454 
1455 	if (len != addr_length + sizeof (uint16_t)) {
1456 		dl_err = DL_BADADDR;
1457 		goto failed;
1458 	}
1459 
1460 	addr = mp->b_rptr + off;
1461 	sap = *(uint16_t *)(mp->b_rptr + off + addr_length);
1462 
1463 	/*
1464 	 * Check the length of the packet and the block types.
1465 	 */
1466 	size = 0;
1467 	cont = mp->b_cont;
1468 	for (bp = cont; bp != NULL; bp = bp->b_cont) {
1469 		if (DB_TYPE(bp) != M_DATA)
1470 			goto baddata;
1471 
1472 		size += MBLKL(bp);
1473 	}
1474 
1475 	if (size > dsp->ds_mip->mi_sdu_max)
1476 		goto baddata;
1477 
1478 	/*
1479 	 * Build a packet header.
1480 	 */
1481 	if ((bp = dls_header(dsp->ds_dc, addr, sap, dsp->ds_pri)) == NULL) {
1482 		dl_err = DL_BADADDR;
1483 		goto failed;
1484 	}
1485 
1486 	/*
1487 	 * We no longer need the M_PROTO header, so free it.
1488 	 */
1489 	freeb(mp);
1490 
1491 	/*
1492 	 * Transfer the checksum offload information if it is present.
1493 	 */
1494 	hcksum_retrieve(cont, NULL, NULL, &start, &stuff, &end, &value,
1495 	    &flags);
1496 	(void) hcksum_assoc(bp, NULL, NULL, start, stuff, end, value, flags,
1497 	    0);
1498 
1499 	/*
1500 	 * Link the payload onto the new header.
1501 	 */
1502 	ASSERT(bp->b_cont == NULL);
1503 	bp->b_cont = cont;
1504 
1505 	str_mdata_fastpath_put(dsp, bp);
1506 	rw_exit(&dsp->ds_lock);
1507 	return (B_TRUE);
1508 failed:
1509 	rw_exit(&dsp->ds_lock);
1510 	dlerrorack(q, mp, DL_UNITDATA_REQ, dl_err, 0);
1511 	return (B_FALSE);
1512 
1513 baddata:
1514 	rw_exit(&dsp->ds_lock);
1515 	dluderrorind(q, mp, (void *)addr, len, DL_BADDATA, 0);
1516 	return (B_FALSE);
1517 }
1518 
1519 /*
1520  * DL_PASSIVE_REQ
1521  */
1522 /* ARGSUSED */
1523 static boolean_t
1524 proto_passive_req(dld_str_t *dsp, union DL_primitives *udlp, mblk_t *mp)
1525 {
1526 	t_uscalar_t dl_err;
1527 
1528 	rw_enter(&dsp->ds_lock, RW_WRITER);
1529 	/*
1530 	 * If we've already become active by issuing an active primitive,
1531 	 * then it's too late to try to become passive.
1532 	 */
1533 	if (dsp->ds_passivestate == DLD_ACTIVE) {
1534 		dl_err = DL_OUTSTATE;
1535 		goto failed;
1536 	}
1537 
1538 	if (MBLKL(mp) < sizeof (dl_passive_req_t)) {
1539 		dl_err = DL_BADPRIM;
1540 		goto failed;
1541 	}
1542 
1543 	dsp->ds_passivestate = DLD_PASSIVE;
1544 	rw_exit(&dsp->ds_lock);
1545 	dlokack(dsp->ds_wq, mp, DL_PASSIVE_REQ);
1546 	return (B_TRUE);
1547 failed:
1548 	rw_exit(&dsp->ds_lock);
1549 	dlerrorack(dsp->ds_wq, mp, DL_PASSIVE_REQ, dl_err, 0);
1550 	return (B_FALSE);
1551 }
1552 
1553 
1554 /*
1555  * Catch-all handler.
1556  */
1557 static boolean_t
1558 proto_req(dld_str_t *dsp, union DL_primitives *dlp, mblk_t *mp)
1559 {
1560 	dlerrorack(dsp->ds_wq, mp, dlp->dl_primitive, DL_UNSUPPORTED, 0);
1561 	return (B_FALSE);
1562 }
1563 
1564 static void
1565 proto_poll_disable(dld_str_t *dsp)
1566 {
1567 	mac_handle_t	mh;
1568 
1569 	ASSERT(dsp->ds_pending_req != NULL || RW_WRITE_HELD(&dsp->ds_lock));
1570 
1571 	if (!dsp->ds_polling)
1572 		return;
1573 
1574 	/*
1575 	 * It should be impossible to enable raw mode if polling is turned on.
1576 	 */
1577 	ASSERT(dsp->ds_mode != DLD_RAW);
1578 
1579 	/*
1580 	 * Reset the resource_add callback.
1581 	 */
1582 	mh = dls_mac(dsp->ds_dc);
1583 	mac_resource_set(mh, NULL, NULL);
1584 	mac_resources(mh);
1585 
1586 	/*
1587 	 * Set receive function back to default.
1588 	 */
1589 	dls_rx_set(dsp->ds_dc, (dsp->ds_mode == DLD_FASTPATH) ?
1590 	    dld_str_rx_fastpath : dld_str_rx_unitdata, (void *)dsp);
1591 
1592 	/*
1593 	 * Note that polling is disabled.
1594 	 */
1595 	dsp->ds_polling = B_FALSE;
1596 }
1597 
1598 static boolean_t
1599 proto_poll_enable(dld_str_t *dsp, dl_capab_dls_t *pollp)
1600 {
1601 	mac_handle_t	mh;
1602 
1603 	ASSERT(RW_WRITE_HELD(&dsp->ds_lock));
1604 	ASSERT(!dsp->ds_polling);
1605 
1606 	/*
1607 	 * We cannot enable polling if raw mode
1608 	 * has been enabled.
1609 	 */
1610 	if (dsp->ds_mode == DLD_RAW)
1611 		return (B_FALSE);
1612 
1613 	mh = dls_mac(dsp->ds_dc);
1614 
1615 	/*
1616 	 * Register resources.
1617 	 */
1618 	mac_resource_set(mh, (mac_resource_add_t)pollp->dls_ring_add,
1619 	    (void *)pollp->dls_rx_handle);
1620 	mac_resources(mh);
1621 
1622 	/*
1623 	 * Set the receive function.
1624 	 */
1625 	dls_rx_set(dsp->ds_dc, (dls_rx_t)pollp->dls_rx,
1626 	    (void *)pollp->dls_rx_handle);
1627 
1628 	/*
1629 	 * Note that polling is enabled. This prevents further DLIOCHDRINFO
1630 	 * ioctls from overwriting the receive function pointer.
1631 	 */
1632 	dsp->ds_polling = B_TRUE;
1633 	return (B_TRUE);
1634 }
1635 
1636 static void
1637 proto_soft_ring_disable(dld_str_t *dsp)
1638 {
1639 	ASSERT(RW_WRITE_HELD(&dsp->ds_lock));
1640 
1641 	if (!dsp->ds_soft_ring)
1642 		return;
1643 
1644 	/*
1645 	 * It should be impossible to enable raw mode if soft_ring is turned on.
1646 	 */
1647 	ASSERT(dsp->ds_mode != DLD_RAW);
1648 	proto_change_soft_ring_fanout(dsp, SOFT_RING_NONE);
1649 	/*
1650 	 * Note that fanout is disabled.
1651 	 */
1652 	dsp->ds_soft_ring = B_FALSE;
1653 }
1654 
1655 static boolean_t
1656 proto_soft_ring_enable(dld_str_t *dsp, dl_capab_dls_t *soft_ringp)
1657 {
1658 	ASSERT(RW_WRITE_HELD(&dsp->ds_lock));
1659 	ASSERT(!dsp->ds_soft_ring);
1660 
1661 	/*
1662 	 * We cannot enable soft_ring if raw mode
1663 	 * has been enabled.
1664 	 */
1665 	if (dsp->ds_mode == DLD_RAW)
1666 		return (B_FALSE);
1667 
1668 	if (dls_soft_ring_enable(dsp->ds_dc, soft_ringp) == B_FALSE)
1669 		return (B_FALSE);
1670 
1671 	dsp->ds_soft_ring = B_TRUE;
1672 	return (B_TRUE);
1673 }
1674 
1675 static void
1676 proto_change_soft_ring_fanout(dld_str_t *dsp, int type)
1677 {
1678 	dls_rx_t	rx;
1679 
1680 	if (type == SOFT_RING_NONE) {
1681 		rx = (dsp->ds_mode == DLD_FASTPATH) ?
1682 			    dld_str_rx_fastpath : dld_str_rx_unitdata;
1683 	} else {
1684 		rx = (dls_rx_t)dls_ether_soft_ring_fanout;
1685 	}
1686 	dls_soft_ring_rx_set(dsp->ds_dc, rx, dsp, type);
1687 }
1688 
1689 /*
1690  * DL_CAPABILITY_ACK/DL_ERROR_ACK
1691  */
1692 static boolean_t
1693 proto_capability_advertise(dld_str_t *dsp, mblk_t *mp)
1694 {
1695 	dl_capability_ack_t	*dlap;
1696 	dl_capability_sub_t	*dlsp;
1697 	size_t			subsize;
1698 	dl_capab_dls_t		poll;
1699 	dl_capab_dls_t	soft_ring;
1700 	dl_capab_hcksum_t	hcksum;
1701 	dl_capab_zerocopy_t	zcopy;
1702 	uint8_t			*ptr;
1703 	uint32_t		cksum;
1704 	boolean_t		poll_cap;
1705 	queue_t			*q = dsp->ds_wq;
1706 	mblk_t			*mp1;
1707 
1708 	ASSERT(RW_READ_HELD(&dsp->ds_lock));
1709 
1710 	/*
1711 	 * Initially assume no capabilities.
1712 	 */
1713 	subsize = 0;
1714 
1715 	/* Always advertize soft ring capability for GLDv3 drivers */
1716 	subsize += sizeof (dl_capability_sub_t) + sizeof (dl_capab_dls_t);
1717 
1718 	/*
1719 	 * Check if polling can be enabled on this interface.
1720 	 * If advertising DL_CAPAB_POLL has not been explicitly disabled
1721 	 * then reserve space for that capability.
1722 	 */
1723 	poll_cap = ((dsp->ds_mip->mi_poll & DL_CAPAB_POLL) &&
1724 	    !(dld_opt & DLD_OPT_NO_POLL) && (dsp->ds_vid == VLAN_ID_NONE));
1725 	if (poll_cap) {
1726 		subsize += sizeof (dl_capability_sub_t) +
1727 		    sizeof (dl_capab_dls_t);
1728 	}
1729 
1730 	/*
1731 	 * If the MAC interface supports checksum offload then reserve
1732 	 * space for the DL_CAPAB_HCKSUM capability.
1733 	 */
1734 	if ((cksum = dsp->ds_mip->mi_cksum) != 0) {
1735 		subsize += sizeof (dl_capability_sub_t) +
1736 		    sizeof (dl_capab_hcksum_t);
1737 	}
1738 
1739 	/*
1740 	 * If DL_CAPAB_ZEROCOPY has not be explicitly disabled then
1741 	 * reserve space for it.
1742 	 */
1743 	if (!(dld_opt & DLD_OPT_NO_ZEROCOPY)) {
1744 		subsize += sizeof (dl_capability_sub_t) +
1745 		    sizeof (dl_capab_zerocopy_t);
1746 	}
1747 
1748 	/*
1749 	 * If there are no capabilities to advertise or if we
1750 	 * can't allocate a response, send a DL_ERROR_ACK.
1751 	 */
1752 	if ((mp1 = reallocb(mp,
1753 	    sizeof (dl_capability_ack_t) + subsize, 0)) == NULL) {
1754 		rw_exit(&dsp->ds_lock);
1755 		dlerrorack(q, mp, DL_CAPABILITY_REQ, DL_NOTSUPPORTED, 0);
1756 		return (B_FALSE);
1757 	}
1758 
1759 	mp = mp1;
1760 	DB_TYPE(mp) = M_PROTO;
1761 	mp->b_wptr = mp->b_rptr + sizeof (dl_capability_ack_t) + subsize;
1762 	bzero(mp->b_rptr, MBLKL(mp));
1763 	dlap = (dl_capability_ack_t *)mp->b_rptr;
1764 	dlap->dl_primitive = DL_CAPABILITY_ACK;
1765 	dlap->dl_sub_offset = sizeof (dl_capability_ack_t);
1766 	dlap->dl_sub_length = subsize;
1767 	ptr = (uint8_t *)&dlap[1];
1768 
1769 	/*
1770 	 * IP polling interface.
1771 	 */
1772 	if (poll_cap) {
1773 		/*
1774 		 * Attempt to disable just in case this is a re-negotiation;
1775 		 * we need to become writer before doing so.
1776 		 */
1777 		if (!rw_tryupgrade(&dsp->ds_lock)) {
1778 			rw_exit(&dsp->ds_lock);
1779 			rw_enter(&dsp->ds_lock, RW_WRITER);
1780 		}
1781 
1782 		/*
1783 		 * Check if polling state has changed after we re-acquired
1784 		 * the lock above, so that we don't mis-advertise it.
1785 		 */
1786 		poll_cap = ((dsp->ds_mip->mi_poll & DL_CAPAB_POLL) &&
1787 		    !(dld_opt & DLD_OPT_NO_POLL) &&
1788 		    (dsp->ds_vid == VLAN_ID_NONE));
1789 
1790 		if (!poll_cap) {
1791 			int poll_capab_size;
1792 
1793 			rw_downgrade(&dsp->ds_lock);
1794 
1795 			poll_capab_size = sizeof (dl_capability_sub_t) +
1796 			    sizeof (dl_capab_dls_t);
1797 
1798 			mp->b_wptr -= poll_capab_size;
1799 			subsize -= poll_capab_size;
1800 			dlap->dl_sub_length = subsize;
1801 		} else {
1802 			proto_poll_disable(dsp);
1803 
1804 			rw_downgrade(&dsp->ds_lock);
1805 
1806 			dlsp = (dl_capability_sub_t *)ptr;
1807 
1808 			dlsp->dl_cap = DL_CAPAB_POLL;
1809 			dlsp->dl_length = sizeof (dl_capab_dls_t);
1810 			ptr += sizeof (dl_capability_sub_t);
1811 
1812 			bzero(&poll, sizeof (dl_capab_dls_t));
1813 			poll.dls_version = POLL_VERSION_1;
1814 			poll.dls_flags = POLL_CAPABLE;
1815 			poll.dls_tx_handle = (uintptr_t)dsp;
1816 			poll.dls_tx = (uintptr_t)str_mdata_fastpath_put;
1817 
1818 			dlcapabsetqid(&(poll.dls_mid), dsp->ds_rq);
1819 			bcopy(&poll, ptr, sizeof (dl_capab_dls_t));
1820 			ptr += sizeof (dl_capab_dls_t);
1821 		}
1822 	}
1823 
1824 	ASSERT(RW_READ_HELD(&dsp->ds_lock));
1825 
1826 	dlsp = (dl_capability_sub_t *)ptr;
1827 
1828 	dlsp->dl_cap = DL_CAPAB_SOFT_RING;
1829 	dlsp->dl_length = sizeof (dl_capab_dls_t);
1830 	ptr += sizeof (dl_capability_sub_t);
1831 
1832 	bzero(&soft_ring, sizeof (dl_capab_dls_t));
1833 	soft_ring.dls_version = SOFT_RING_VERSION_1;
1834 	soft_ring.dls_flags = SOFT_RING_CAPABLE;
1835 	soft_ring.dls_tx_handle = (uintptr_t)dsp;
1836 	soft_ring.dls_tx = (uintptr_t)str_mdata_fastpath_put;
1837 	soft_ring.dls_ring_change_status =
1838 	    (uintptr_t)proto_change_soft_ring_fanout;
1839 	soft_ring.dls_ring_bind = (uintptr_t)soft_ring_bind;
1840 	soft_ring.dls_ring_unbind = (uintptr_t)soft_ring_unbind;
1841 
1842 	dlcapabsetqid(&(soft_ring.dls_mid), dsp->ds_rq);
1843 	bcopy(&soft_ring, ptr, sizeof (dl_capab_dls_t));
1844 	ptr += sizeof (dl_capab_dls_t);
1845 
1846 	/*
1847 	 * TCP/IP checksum offload.
1848 	 */
1849 	if (cksum != 0) {
1850 		dlsp = (dl_capability_sub_t *)ptr;
1851 
1852 		dlsp->dl_cap = DL_CAPAB_HCKSUM;
1853 		dlsp->dl_length = sizeof (dl_capab_hcksum_t);
1854 		ptr += sizeof (dl_capability_sub_t);
1855 
1856 		bzero(&hcksum, sizeof (dl_capab_hcksum_t));
1857 		hcksum.hcksum_version = HCKSUM_VERSION_1;
1858 		hcksum.hcksum_txflags = cksum;
1859 
1860 		dlcapabsetqid(&(hcksum.hcksum_mid), dsp->ds_rq);
1861 		bcopy(&hcksum, ptr, sizeof (dl_capab_hcksum_t));
1862 		ptr += sizeof (dl_capab_hcksum_t);
1863 	}
1864 
1865 	/*
1866 	 * Zero copy
1867 	 */
1868 	if (!(dld_opt & DLD_OPT_NO_ZEROCOPY)) {
1869 		dlsp = (dl_capability_sub_t *)ptr;
1870 
1871 		dlsp->dl_cap = DL_CAPAB_ZEROCOPY;
1872 		dlsp->dl_length = sizeof (dl_capab_zerocopy_t);
1873 		ptr += sizeof (dl_capability_sub_t);
1874 
1875 		bzero(&zcopy, sizeof (dl_capab_zerocopy_t));
1876 		zcopy.zerocopy_version = ZEROCOPY_VERSION_1;
1877 		zcopy.zerocopy_flags = DL_CAPAB_VMSAFE_MEM;
1878 
1879 		dlcapabsetqid(&(zcopy.zerocopy_mid), dsp->ds_rq);
1880 		bcopy(&zcopy, ptr, sizeof (dl_capab_zerocopy_t));
1881 		ptr += sizeof (dl_capab_zerocopy_t);
1882 	}
1883 
1884 	ASSERT(ptr == mp->b_rptr + sizeof (dl_capability_ack_t) + subsize);
1885 
1886 	rw_exit(&dsp->ds_lock);
1887 	qreply(q, mp);
1888 	return (B_TRUE);
1889 }
1890