xref: /titanic_41/usr/src/uts/common/io/dcopy.c (revision b798e010bf650d3252fd767dff39df1ccc12d285)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * dcopy.c
29  *    dcopy misc module
30  */
31 
32 #include <sys/conf.h>
33 #include <sys/kmem.h>
34 #include <sys/ddi.h>
35 #include <sys/sunddi.h>
36 #include <sys/modctl.h>
37 #include <sys/sysmacros.h>
38 #include <sys/atomic.h>
39 
40 
41 #include <sys/dcopy.h>
42 #include <sys/dcopy_device.h>
43 
44 
45 /* Number of entries per channel to allocate */
46 uint_t dcopy_channel_size = 1024;
47 
48 
49 typedef struct dcopy_list_s {
50 	list_t			dl_list;
51 	kmutex_t		dl_mutex;
52 	uint_t			dl_cnt; /* num entries on list */
53 } dcopy_list_t;
54 
55 /* device state for register/unregister */
56 struct dcopy_device_s {
57 	/* DMA device drivers private pointer */
58 	void			*dc_device_private;
59 
60 	/* to track list of channels from this DMA device */
61 	dcopy_list_t		dc_devchan_list;
62 	list_node_t		dc_device_list_node;
63 
64 	/*
65 	 * dc_removing_cnt track how many channels still have to be freed up
66 	 * before it's safe to allow the DMA device driver to detach.
67 	 */
68 	uint_t			dc_removing_cnt;
69 	dcopy_device_cb_t	*dc_cb;
70 
71 	dcopy_device_info_t	dc_info;
72 
73 };
74 
75 typedef struct dcopy_stats_s {
76 	kstat_named_t	cs_bytes_xfer;
77 	kstat_named_t	cs_cmd_alloc;
78 	kstat_named_t	cs_cmd_post;
79 	kstat_named_t	cs_cmd_poll;
80 	kstat_named_t	cs_notify_poll;
81 	kstat_named_t	cs_notify_pending;
82 	kstat_named_t	cs_id;
83 	kstat_named_t	cs_capabilities;
84 } dcopy_stats_t;
85 
86 /* DMA channel state */
87 struct dcopy_channel_s {
88 	/* DMA driver channel private pointer */
89 	void			*ch_channel_private;
90 
91 	/* shortcut to device callbacks */
92 	dcopy_device_cb_t	*ch_cb;
93 
94 	/*
95 	 * number of outstanding allocs for this channel. used to track when
96 	 * it's safe to free up this channel so the DMA device driver can
97 	 * detach.
98 	 */
99 	uint64_t		ch_ref_cnt;
100 
101 	/* state for if channel needs to be removed when ch_ref_cnt gets to 0 */
102 	boolean_t		ch_removing;
103 
104 	list_node_t		ch_devchan_list_node;
105 	list_node_t		ch_globalchan_list_node;
106 
107 	/*
108 	 * per channel list of commands actively blocking waiting for
109 	 * completion.
110 	 */
111 	dcopy_list_t		ch_poll_list;
112 
113 	/* pointer back to our device */
114 	struct dcopy_device_s	*ch_device;
115 
116 	dcopy_query_channel_t	ch_info;
117 
118 	kstat_t			*ch_kstat;
119 	dcopy_stats_t		ch_stat;
120 };
121 
122 /*
123  * If grabbing both device_list mutex & globalchan_list mutex,
124  * Always grab globalchan_list mutex before device_list mutex
125  */
126 typedef struct dcopy_state_s {
127 	dcopy_list_t		d_device_list;
128 	dcopy_list_t		d_globalchan_list;
129 } dcopy_state_t;
130 dcopy_state_t *dcopy_statep;
131 
132 
133 /* Module Driver Info */
134 static struct modlmisc dcopy_modlmisc = {
135 	&mod_miscops,
136 	"dcopy kernel module"
137 };
138 
139 /* Module Linkage */
140 static struct modlinkage dcopy_modlinkage = {
141 	MODREV_1,
142 	&dcopy_modlmisc,
143 	NULL
144 };
145 
146 static int dcopy_init();
147 static void dcopy_fini();
148 
149 static int dcopy_list_init(dcopy_list_t *list, size_t node_size,
150     offset_t link_offset);
151 static void dcopy_list_fini(dcopy_list_t *list);
152 static void dcopy_list_push(dcopy_list_t *list, void *list_node);
153 static void *dcopy_list_pop(dcopy_list_t *list);
154 
155 static void dcopy_device_cleanup(dcopy_device_handle_t device,
156     boolean_t do_callback);
157 
158 static int dcopy_stats_init(dcopy_handle_t channel);
159 static void dcopy_stats_fini(dcopy_handle_t channel);
160 
161 
162 /*
163  * _init()
164  */
165 int
_init()166 _init()
167 {
168 	int e;
169 
170 	e = dcopy_init();
171 	if (e != 0) {
172 		return (e);
173 	}
174 
175 	return (mod_install(&dcopy_modlinkage));
176 }
177 
178 
179 /*
180  * _info()
181  */
182 int
_info(struct modinfo * modinfop)183 _info(struct modinfo *modinfop)
184 {
185 	return (mod_info(&dcopy_modlinkage, modinfop));
186 }
187 
188 
189 /*
190  * _fini()
191  */
192 int
_fini()193 _fini()
194 {
195 	int e;
196 
197 	e = mod_remove(&dcopy_modlinkage);
198 	if (e != 0) {
199 		return (e);
200 	}
201 
202 	dcopy_fini();
203 
204 	return (e);
205 }
206 
207 /*
208  * dcopy_init()
209  */
210 static int
dcopy_init()211 dcopy_init()
212 {
213 	int e;
214 
215 
216 	dcopy_statep = kmem_zalloc(sizeof (*dcopy_statep), KM_SLEEP);
217 
218 	/* Initialize the list we use to track device register/unregister */
219 	e = dcopy_list_init(&dcopy_statep->d_device_list,
220 	    sizeof (struct dcopy_device_s),
221 	    offsetof(struct dcopy_device_s, dc_device_list_node));
222 	if (e != DCOPY_SUCCESS) {
223 		goto dcopyinitfail_device;
224 	}
225 
226 	/* Initialize the list we use to track all DMA channels */
227 	e = dcopy_list_init(&dcopy_statep->d_globalchan_list,
228 	    sizeof (struct dcopy_channel_s),
229 	    offsetof(struct dcopy_channel_s, ch_globalchan_list_node));
230 	if (e != DCOPY_SUCCESS) {
231 		goto dcopyinitfail_global;
232 	}
233 
234 	return (0);
235 
236 dcopyinitfail_cback:
237 	dcopy_list_fini(&dcopy_statep->d_globalchan_list);
238 dcopyinitfail_global:
239 	dcopy_list_fini(&dcopy_statep->d_device_list);
240 dcopyinitfail_device:
241 	kmem_free(dcopy_statep, sizeof (*dcopy_statep));
242 
243 	return (-1);
244 }
245 
246 
247 /*
248  * dcopy_fini()
249  */
250 static void
dcopy_fini()251 dcopy_fini()
252 {
253 	/*
254 	 * if mod_remove was successfull, we shouldn't have any
255 	 * devices/channels to worry about.
256 	 */
257 	ASSERT(list_head(&dcopy_statep->d_globalchan_list.dl_list) == NULL);
258 	ASSERT(list_head(&dcopy_statep->d_device_list.dl_list) == NULL);
259 
260 	dcopy_list_fini(&dcopy_statep->d_globalchan_list);
261 	dcopy_list_fini(&dcopy_statep->d_device_list);
262 	kmem_free(dcopy_statep, sizeof (*dcopy_statep));
263 }
264 
265 
266 /* *** EXTERNAL INTERFACE *** */
267 /*
268  * dcopy_query()
269  */
270 void
dcopy_query(dcopy_query_t * query)271 dcopy_query(dcopy_query_t *query)
272 {
273 	query->dq_version = DCOPY_QUERY_V0;
274 	query->dq_num_channels = dcopy_statep->d_globalchan_list.dl_cnt;
275 }
276 
277 
278 /*
279  * dcopy_alloc()
280  */
281 /*ARGSUSED*/
282 int
dcopy_alloc(int flags,dcopy_handle_t * handle)283 dcopy_alloc(int flags, dcopy_handle_t *handle)
284 {
285 	dcopy_handle_t channel;
286 	dcopy_list_t *list;
287 
288 
289 	/*
290 	 * we don't use the dcopy_list_* code here because we need to due
291 	 * some non-standard stuff.
292 	 */
293 
294 	list = &dcopy_statep->d_globalchan_list;
295 
296 	/*
297 	 * if nothing is on the channel list, return DCOPY_NORESOURCES. This
298 	 * can happen if there aren't any DMA device registered.
299 	 */
300 	mutex_enter(&list->dl_mutex);
301 	channel = list_head(&list->dl_list);
302 	if (channel == NULL) {
303 		mutex_exit(&list->dl_mutex);
304 		return (DCOPY_NORESOURCES);
305 	}
306 
307 	/*
308 	 * increment the reference count, and pop the channel off the head and
309 	 * push it on the tail. This ensures we rotate through the channels.
310 	 * DMA channels are shared.
311 	 */
312 	channel->ch_ref_cnt++;
313 	list_remove(&list->dl_list, channel);
314 	list_insert_tail(&list->dl_list, channel);
315 	mutex_exit(&list->dl_mutex);
316 
317 	*handle = (dcopy_handle_t)channel;
318 	return (DCOPY_SUCCESS);
319 }
320 
321 
322 /*
323  * dcopy_free()
324  */
325 void
dcopy_free(dcopy_handle_t * channel)326 dcopy_free(dcopy_handle_t *channel)
327 {
328 	dcopy_device_handle_t device;
329 	dcopy_list_t *list;
330 	boolean_t cleanup = B_FALSE;
331 
332 
333 	ASSERT(*channel != NULL);
334 
335 	/*
336 	 * we don't need to add the channel back to the list since we never
337 	 * removed it. decrement the reference count.
338 	 */
339 	list = &dcopy_statep->d_globalchan_list;
340 	mutex_enter(&list->dl_mutex);
341 	(*channel)->ch_ref_cnt--;
342 
343 	/*
344 	 * if we need to remove this channel, and the reference count is down
345 	 * to 0, decrement the number of channels which still need to be
346 	 * removed on the device.
347 	 */
348 	if ((*channel)->ch_removing && ((*channel)->ch_ref_cnt == 0)) {
349 		device = (*channel)->ch_device;
350 		mutex_enter(&device->dc_devchan_list.dl_mutex);
351 		device->dc_removing_cnt--;
352 		if (device->dc_removing_cnt == 0) {
353 			cleanup = B_TRUE;
354 		}
355 		mutex_exit(&device->dc_devchan_list.dl_mutex);
356 	}
357 	mutex_exit(&list->dl_mutex);
358 
359 	/*
360 	 * if there are no channels which still need to be removed, cleanup the
361 	 * device state and call back into the DMA device driver to tell them
362 	 * the device is free.
363 	 */
364 	if (cleanup) {
365 		dcopy_device_cleanup(device, B_TRUE);
366 	}
367 
368 	*channel = NULL;
369 }
370 
371 
372 /*
373  * dcopy_query_channel()
374  */
375 void
dcopy_query_channel(dcopy_handle_t channel,dcopy_query_channel_t * query)376 dcopy_query_channel(dcopy_handle_t channel, dcopy_query_channel_t *query)
377 {
378 	*query = channel->ch_info;
379 }
380 
381 
382 /*
383  * dcopy_cmd_alloc()
384  */
385 int
dcopy_cmd_alloc(dcopy_handle_t handle,int flags,dcopy_cmd_t * cmd)386 dcopy_cmd_alloc(dcopy_handle_t handle, int flags, dcopy_cmd_t *cmd)
387 {
388 	dcopy_handle_t channel;
389 	dcopy_cmd_priv_t priv;
390 	int e;
391 
392 
393 	channel = handle;
394 
395 	atomic_inc_64(&channel->ch_stat.cs_cmd_alloc.value.ui64);
396 	e = channel->ch_cb->cb_cmd_alloc(channel->ch_channel_private, flags,
397 	    cmd);
398 	if (e == DCOPY_SUCCESS) {
399 		priv = (*cmd)->dp_private;
400 		priv->pr_channel = channel;
401 		/*
402 		 * we won't initialize the blocking state until we actually
403 		 * need to block.
404 		 */
405 		priv->pr_block_init = B_FALSE;
406 	}
407 
408 	return (e);
409 }
410 
411 
412 /*
413  * dcopy_cmd_free()
414  */
415 void
dcopy_cmd_free(dcopy_cmd_t * cmd)416 dcopy_cmd_free(dcopy_cmd_t *cmd)
417 {
418 	dcopy_handle_t channel;
419 	dcopy_cmd_priv_t priv;
420 
421 
422 	ASSERT(*cmd != NULL);
423 
424 	priv = (*cmd)->dp_private;
425 	channel = priv->pr_channel;
426 
427 	/* if we initialized the blocking state, clean it up too */
428 	if (priv->pr_block_init) {
429 		cv_destroy(&priv->pr_cv);
430 		mutex_destroy(&priv->pr_mutex);
431 	}
432 
433 	channel->ch_cb->cb_cmd_free(channel->ch_channel_private, cmd);
434 }
435 
436 
437 /*
438  * dcopy_cmd_post()
439  */
440 int
dcopy_cmd_post(dcopy_cmd_t cmd)441 dcopy_cmd_post(dcopy_cmd_t cmd)
442 {
443 	dcopy_handle_t channel;
444 	int e;
445 
446 
447 	channel = cmd->dp_private->pr_channel;
448 
449 	atomic_inc_64(&channel->ch_stat.cs_cmd_post.value.ui64);
450 	if (cmd->dp_cmd == DCOPY_CMD_COPY) {
451 		atomic_add_64(&channel->ch_stat.cs_bytes_xfer.value.ui64,
452 		    cmd->dp.copy.cc_size);
453 	}
454 	e = channel->ch_cb->cb_cmd_post(channel->ch_channel_private, cmd);
455 	if (e != DCOPY_SUCCESS) {
456 		return (e);
457 	}
458 
459 	return (DCOPY_SUCCESS);
460 }
461 
462 
463 /*
464  * dcopy_cmd_poll()
465  */
466 int
dcopy_cmd_poll(dcopy_cmd_t cmd,int flags)467 dcopy_cmd_poll(dcopy_cmd_t cmd, int flags)
468 {
469 	dcopy_handle_t channel;
470 	dcopy_cmd_priv_t priv;
471 	int e;
472 
473 
474 	priv = cmd->dp_private;
475 	channel = priv->pr_channel;
476 
477 	/*
478 	 * if the caller is trying to block, they needed to post the
479 	 * command with DCOPY_CMD_INTR set.
480 	 */
481 	if ((flags & DCOPY_POLL_BLOCK) && !(cmd->dp_flags & DCOPY_CMD_INTR)) {
482 		return (DCOPY_FAILURE);
483 	}
484 
485 	atomic_inc_64(&channel->ch_stat.cs_cmd_poll.value.ui64);
486 
487 repoll:
488 	e = channel->ch_cb->cb_cmd_poll(channel->ch_channel_private, cmd);
489 	if (e == DCOPY_PENDING) {
490 		/*
491 		 * if the command is still active, and the blocking flag
492 		 * is set.
493 		 */
494 		if (flags & DCOPY_POLL_BLOCK) {
495 
496 			/*
497 			 * if we haven't initialized the state, do it now. A
498 			 * command can be re-used, so it's possible it's
499 			 * already been initialized.
500 			 */
501 			if (!priv->pr_block_init) {
502 				priv->pr_block_init = B_TRUE;
503 				mutex_init(&priv->pr_mutex, NULL, MUTEX_DRIVER,
504 				    NULL);
505 				cv_init(&priv->pr_cv, NULL, CV_DRIVER, NULL);
506 				priv->pr_cmd = cmd;
507 			}
508 
509 			/* push it on the list for blocking commands */
510 			priv->pr_wait = B_TRUE;
511 			dcopy_list_push(&channel->ch_poll_list, priv);
512 
513 			mutex_enter(&priv->pr_mutex);
514 			/*
515 			 * it's possible we already cleared pr_wait before we
516 			 * grabbed the mutex.
517 			 */
518 			if (priv->pr_wait) {
519 				cv_wait(&priv->pr_cv, &priv->pr_mutex);
520 			}
521 			mutex_exit(&priv->pr_mutex);
522 
523 			/*
524 			 * the command has completed, go back and poll so we
525 			 * get the status.
526 			 */
527 			goto repoll;
528 		}
529 	}
530 
531 	return (e);
532 }
533 
534 /* *** END OF EXTERNAL INTERFACE *** */
535 
536 /*
537  * dcopy_list_init()
538  */
539 static int
dcopy_list_init(dcopy_list_t * list,size_t node_size,offset_t link_offset)540 dcopy_list_init(dcopy_list_t *list, size_t node_size, offset_t link_offset)
541 {
542 	mutex_init(&list->dl_mutex, NULL, MUTEX_DRIVER, NULL);
543 	list_create(&list->dl_list, node_size, link_offset);
544 	list->dl_cnt = 0;
545 
546 	return (DCOPY_SUCCESS);
547 }
548 
549 
550 /*
551  * dcopy_list_fini()
552  */
553 static void
dcopy_list_fini(dcopy_list_t * list)554 dcopy_list_fini(dcopy_list_t *list)
555 {
556 	list_destroy(&list->dl_list);
557 	mutex_destroy(&list->dl_mutex);
558 }
559 
560 
561 /*
562  * dcopy_list_push()
563  */
564 static void
dcopy_list_push(dcopy_list_t * list,void * list_node)565 dcopy_list_push(dcopy_list_t *list, void *list_node)
566 {
567 	mutex_enter(&list->dl_mutex);
568 	list_insert_tail(&list->dl_list, list_node);
569 	list->dl_cnt++;
570 	mutex_exit(&list->dl_mutex);
571 }
572 
573 
574 /*
575  * dcopy_list_pop()
576  */
577 static void *
dcopy_list_pop(dcopy_list_t * list)578 dcopy_list_pop(dcopy_list_t *list)
579 {
580 	list_node_t *list_node;
581 
582 	mutex_enter(&list->dl_mutex);
583 	list_node = list_head(&list->dl_list);
584 	if (list_node == NULL) {
585 		mutex_exit(&list->dl_mutex);
586 		return (list_node);
587 	}
588 	list->dl_cnt--;
589 	list_remove(&list->dl_list, list_node);
590 	mutex_exit(&list->dl_mutex);
591 
592 	return (list_node);
593 }
594 
595 
596 /* *** DEVICE INTERFACE *** */
597 /*
598  * dcopy_device_register()
599  */
600 int
dcopy_device_register(void * device_private,dcopy_device_info_t * info,dcopy_device_handle_t * handle)601 dcopy_device_register(void *device_private, dcopy_device_info_t *info,
602     dcopy_device_handle_t *handle)
603 {
604 	struct dcopy_channel_s *channel;
605 	struct dcopy_device_s *device;
606 	int e;
607 	int i;
608 
609 
610 	/* initialize the per device state */
611 	device = kmem_zalloc(sizeof (*device), KM_SLEEP);
612 	device->dc_device_private = device_private;
613 	device->dc_info = *info;
614 	device->dc_removing_cnt = 0;
615 	device->dc_cb = info->di_cb;
616 
617 	/*
618 	 * we have a per device channel list so we can remove a device in the
619 	 * future.
620 	 */
621 	e = dcopy_list_init(&device->dc_devchan_list,
622 	    sizeof (struct dcopy_channel_s),
623 	    offsetof(struct dcopy_channel_s, ch_devchan_list_node));
624 	if (e != DCOPY_SUCCESS) {
625 		goto registerfail_devchan;
626 	}
627 
628 	/*
629 	 * allocate state for each channel, allocate the channel,  and then add
630 	 * the devices dma channels to the devices channel list.
631 	 */
632 	for (i = 0; i < info->di_num_dma; i++) {
633 		channel = kmem_zalloc(sizeof (*channel), KM_SLEEP);
634 		channel->ch_device = device;
635 		channel->ch_removing = B_FALSE;
636 		channel->ch_ref_cnt = 0;
637 		channel->ch_cb = info->di_cb;
638 
639 		e = info->di_cb->cb_channel_alloc(device_private, channel,
640 		    DCOPY_SLEEP, dcopy_channel_size, &channel->ch_info,
641 		    &channel->ch_channel_private);
642 		if (e != DCOPY_SUCCESS) {
643 			kmem_free(channel, sizeof (*channel));
644 			goto registerfail_alloc;
645 		}
646 
647 		e = dcopy_stats_init(channel);
648 		if (e != DCOPY_SUCCESS) {
649 			info->di_cb->cb_channel_free(
650 			    &channel->ch_channel_private);
651 			kmem_free(channel, sizeof (*channel));
652 			goto registerfail_alloc;
653 		}
654 
655 		e = dcopy_list_init(&channel->ch_poll_list,
656 		    sizeof (struct dcopy_cmd_priv_s),
657 		    offsetof(struct dcopy_cmd_priv_s, pr_poll_list_node));
658 		if (e != DCOPY_SUCCESS) {
659 			dcopy_stats_fini(channel);
660 			info->di_cb->cb_channel_free(
661 			    &channel->ch_channel_private);
662 			kmem_free(channel, sizeof (*channel));
663 			goto registerfail_alloc;
664 		}
665 
666 		dcopy_list_push(&device->dc_devchan_list, channel);
667 	}
668 
669 	/* add the device to device list */
670 	dcopy_list_push(&dcopy_statep->d_device_list, device);
671 
672 	/*
673 	 * add the device's dma channels to the global channel list (where
674 	 * dcopy_alloc's come from)
675 	 */
676 	mutex_enter(&dcopy_statep->d_globalchan_list.dl_mutex);
677 	mutex_enter(&dcopy_statep->d_device_list.dl_mutex);
678 	channel = list_head(&device->dc_devchan_list.dl_list);
679 	while (channel != NULL) {
680 		list_insert_tail(&dcopy_statep->d_globalchan_list.dl_list,
681 		    channel);
682 		dcopy_statep->d_globalchan_list.dl_cnt++;
683 		channel = list_next(&device->dc_devchan_list.dl_list, channel);
684 	}
685 	mutex_exit(&dcopy_statep->d_device_list.dl_mutex);
686 	mutex_exit(&dcopy_statep->d_globalchan_list.dl_mutex);
687 
688 	*handle = device;
689 
690 	/* last call-back into kernel for dcopy KAPI enabled */
691 	uioa_dcopy_enable();
692 
693 	return (DCOPY_SUCCESS);
694 
695 registerfail_alloc:
696 	channel = list_head(&device->dc_devchan_list.dl_list);
697 	while (channel != NULL) {
698 		/* remove from the list */
699 		channel = dcopy_list_pop(&device->dc_devchan_list);
700 		ASSERT(channel != NULL);
701 
702 		dcopy_list_fini(&channel->ch_poll_list);
703 		dcopy_stats_fini(channel);
704 		info->di_cb->cb_channel_free(&channel->ch_channel_private);
705 		kmem_free(channel, sizeof (*channel));
706 	}
707 
708 	dcopy_list_fini(&device->dc_devchan_list);
709 registerfail_devchan:
710 	kmem_free(device, sizeof (*device));
711 
712 	return (DCOPY_FAILURE);
713 }
714 
715 
716 /*
717  * dcopy_device_unregister()
718  */
719 /*ARGSUSED*/
720 int
dcopy_device_unregister(dcopy_device_handle_t * handle)721 dcopy_device_unregister(dcopy_device_handle_t *handle)
722 {
723 	struct dcopy_channel_s *channel;
724 	dcopy_device_handle_t device;
725 	boolean_t device_busy;
726 
727 	/* first call-back into kernel for dcopy KAPI disable */
728 	uioa_dcopy_disable();
729 
730 	device = *handle;
731 	device_busy = B_FALSE;
732 
733 	/*
734 	 * remove the devices dma channels from the global channel list (where
735 	 * dcopy_alloc's come from)
736 	 */
737 	mutex_enter(&dcopy_statep->d_globalchan_list.dl_mutex);
738 	mutex_enter(&device->dc_devchan_list.dl_mutex);
739 	channel = list_head(&device->dc_devchan_list.dl_list);
740 	while (channel != NULL) {
741 		/*
742 		 * if the channel has outstanding allocs, mark it as having
743 		 * to be removed and increment the number of channels which
744 		 * need to be removed in the device state too.
745 		 */
746 		if (channel->ch_ref_cnt != 0) {
747 			channel->ch_removing = B_TRUE;
748 			device_busy = B_TRUE;
749 			device->dc_removing_cnt++;
750 		}
751 		dcopy_statep->d_globalchan_list.dl_cnt--;
752 		list_remove(&dcopy_statep->d_globalchan_list.dl_list, channel);
753 		channel = list_next(&device->dc_devchan_list.dl_list, channel);
754 	}
755 	mutex_exit(&device->dc_devchan_list.dl_mutex);
756 	mutex_exit(&dcopy_statep->d_globalchan_list.dl_mutex);
757 
758 	/*
759 	 * if there are channels which still need to be removed, we will clean
760 	 * up the device state after they are freed up.
761 	 */
762 	if (device_busy) {
763 		return (DCOPY_PENDING);
764 	}
765 
766 	dcopy_device_cleanup(device, B_FALSE);
767 
768 	*handle = NULL;
769 	return (DCOPY_SUCCESS);
770 }
771 
772 
773 /*
774  * dcopy_device_cleanup()
775  */
776 static void
dcopy_device_cleanup(dcopy_device_handle_t device,boolean_t do_callback)777 dcopy_device_cleanup(dcopy_device_handle_t device, boolean_t do_callback)
778 {
779 	struct dcopy_channel_s *channel;
780 
781 	/*
782 	 * remove all the channels in the device list, free them, and clean up
783 	 * the state.
784 	 */
785 	mutex_enter(&dcopy_statep->d_device_list.dl_mutex);
786 	channel = list_head(&device->dc_devchan_list.dl_list);
787 	while (channel != NULL) {
788 		device->dc_devchan_list.dl_cnt--;
789 		list_remove(&device->dc_devchan_list.dl_list, channel);
790 		dcopy_list_fini(&channel->ch_poll_list);
791 		dcopy_stats_fini(channel);
792 		channel->ch_cb->cb_channel_free(&channel->ch_channel_private);
793 		kmem_free(channel, sizeof (*channel));
794 		channel = list_head(&device->dc_devchan_list.dl_list);
795 	}
796 
797 	/* remove it from the list of devices */
798 	list_remove(&dcopy_statep->d_device_list.dl_list, device);
799 
800 	mutex_exit(&dcopy_statep->d_device_list.dl_mutex);
801 
802 	/*
803 	 * notify the DMA device driver that the device is free to be
804 	 * detached.
805 	 */
806 	if (do_callback) {
807 		device->dc_cb->cb_unregister_complete(
808 		    device->dc_device_private, DCOPY_SUCCESS);
809 	}
810 
811 	dcopy_list_fini(&device->dc_devchan_list);
812 	kmem_free(device, sizeof (*device));
813 }
814 
815 
816 /*
817  * dcopy_device_channel_notify()
818  */
819 /*ARGSUSED*/
820 void
dcopy_device_channel_notify(dcopy_handle_t handle,int status)821 dcopy_device_channel_notify(dcopy_handle_t handle, int status)
822 {
823 	struct dcopy_channel_s *channel;
824 	dcopy_list_t *poll_list;
825 	dcopy_cmd_priv_t priv;
826 	int e;
827 
828 
829 	ASSERT(status == DCOPY_COMPLETION);
830 	channel = handle;
831 
832 	poll_list = &channel->ch_poll_list;
833 
834 	/*
835 	 * when we get a completion notification from the device, go through
836 	 * all of the commands blocking on this channel and see if they have
837 	 * completed. Remove the command and wake up the block thread if they
838 	 * have. Once we hit a command which is still pending, we are done
839 	 * polling since commands in a channel complete in order.
840 	 */
841 	mutex_enter(&poll_list->dl_mutex);
842 	if (poll_list->dl_cnt != 0) {
843 		priv = list_head(&poll_list->dl_list);
844 		while (priv != NULL) {
845 			atomic_inc_64(&channel->
846 			    ch_stat.cs_notify_poll.value.ui64);
847 			e = channel->ch_cb->cb_cmd_poll(
848 			    channel->ch_channel_private,
849 			    priv->pr_cmd);
850 			if (e == DCOPY_PENDING) {
851 				atomic_inc_64(&channel->
852 				    ch_stat.cs_notify_pending.value.ui64);
853 				break;
854 			}
855 
856 			poll_list->dl_cnt--;
857 			list_remove(&poll_list->dl_list, priv);
858 
859 			mutex_enter(&priv->pr_mutex);
860 			priv->pr_wait = B_FALSE;
861 			cv_signal(&priv->pr_cv);
862 			mutex_exit(&priv->pr_mutex);
863 
864 			priv = list_head(&poll_list->dl_list);
865 		}
866 	}
867 
868 	mutex_exit(&poll_list->dl_mutex);
869 }
870 
871 
872 /*
873  * dcopy_stats_init()
874  */
875 static int
dcopy_stats_init(dcopy_handle_t channel)876 dcopy_stats_init(dcopy_handle_t channel)
877 {
878 #define	CHANSTRSIZE	20
879 	char chanstr[CHANSTRSIZE];
880 	dcopy_stats_t *stats;
881 	int instance;
882 	char *name;
883 
884 
885 	stats = &channel->ch_stat;
886 	name = (char *)ddi_driver_name(channel->ch_device->dc_info.di_dip);
887 	instance = ddi_get_instance(channel->ch_device->dc_info.di_dip);
888 
889 	(void) snprintf(chanstr, CHANSTRSIZE, "channel%d",
890 	    (uint32_t)channel->ch_info.qc_chan_num);
891 
892 	channel->ch_kstat = kstat_create(name, instance, chanstr, "misc",
893 	    KSTAT_TYPE_NAMED, sizeof (dcopy_stats_t) / sizeof (kstat_named_t),
894 	    KSTAT_FLAG_VIRTUAL);
895 	if (channel->ch_kstat == NULL) {
896 		return (DCOPY_FAILURE);
897 	}
898 	channel->ch_kstat->ks_data = stats;
899 
900 	kstat_named_init(&stats->cs_bytes_xfer, "bytes_xfer",
901 	    KSTAT_DATA_UINT64);
902 	kstat_named_init(&stats->cs_cmd_alloc, "cmd_alloc",
903 	    KSTAT_DATA_UINT64);
904 	kstat_named_init(&stats->cs_cmd_post, "cmd_post",
905 	    KSTAT_DATA_UINT64);
906 	kstat_named_init(&stats->cs_cmd_poll, "cmd_poll",
907 	    KSTAT_DATA_UINT64);
908 	kstat_named_init(&stats->cs_notify_poll, "notify_poll",
909 	    KSTAT_DATA_UINT64);
910 	kstat_named_init(&stats->cs_notify_pending, "notify_pending",
911 	    KSTAT_DATA_UINT64);
912 	kstat_named_init(&stats->cs_id, "id",
913 	    KSTAT_DATA_UINT64);
914 	kstat_named_init(&stats->cs_capabilities, "capabilities",
915 	    KSTAT_DATA_UINT64);
916 
917 	kstat_install(channel->ch_kstat);
918 
919 	channel->ch_stat.cs_id.value.ui64 = channel->ch_info.qc_id;
920 	channel->ch_stat.cs_capabilities.value.ui64 =
921 	    channel->ch_info.qc_capabilities;
922 
923 	return (DCOPY_SUCCESS);
924 }
925 
926 
927 /*
928  * dcopy_stats_fini()
929  */
930 static void
dcopy_stats_fini(dcopy_handle_t channel)931 dcopy_stats_fini(dcopy_handle_t channel)
932 {
933 	kstat_delete(channel->ch_kstat);
934 }
935 /* *** END OF DEVICE INTERFACE *** */
936