1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /*
28 * dcopy.c
29 * dcopy misc module
30 */
31
32 #include <sys/conf.h>
33 #include <sys/kmem.h>
34 #include <sys/ddi.h>
35 #include <sys/sunddi.h>
36 #include <sys/modctl.h>
37 #include <sys/sysmacros.h>
38 #include <sys/atomic.h>
39
40
41 #include <sys/dcopy.h>
42 #include <sys/dcopy_device.h>
43
44
45 /* Number of entries per channel to allocate */
46 uint_t dcopy_channel_size = 1024;
47
48
49 typedef struct dcopy_list_s {
50 list_t dl_list;
51 kmutex_t dl_mutex;
52 uint_t dl_cnt; /* num entries on list */
53 } dcopy_list_t;
54
55 /* device state for register/unregister */
56 struct dcopy_device_s {
57 /* DMA device drivers private pointer */
58 void *dc_device_private;
59
60 /* to track list of channels from this DMA device */
61 dcopy_list_t dc_devchan_list;
62 list_node_t dc_device_list_node;
63
64 /*
65 * dc_removing_cnt track how many channels still have to be freed up
66 * before it's safe to allow the DMA device driver to detach.
67 */
68 uint_t dc_removing_cnt;
69 dcopy_device_cb_t *dc_cb;
70
71 dcopy_device_info_t dc_info;
72
73 };
74
75 typedef struct dcopy_stats_s {
76 kstat_named_t cs_bytes_xfer;
77 kstat_named_t cs_cmd_alloc;
78 kstat_named_t cs_cmd_post;
79 kstat_named_t cs_cmd_poll;
80 kstat_named_t cs_notify_poll;
81 kstat_named_t cs_notify_pending;
82 kstat_named_t cs_id;
83 kstat_named_t cs_capabilities;
84 } dcopy_stats_t;
85
86 /* DMA channel state */
87 struct dcopy_channel_s {
88 /* DMA driver channel private pointer */
89 void *ch_channel_private;
90
91 /* shortcut to device callbacks */
92 dcopy_device_cb_t *ch_cb;
93
94 /*
95 * number of outstanding allocs for this channel. used to track when
96 * it's safe to free up this channel so the DMA device driver can
97 * detach.
98 */
99 uint64_t ch_ref_cnt;
100
101 /* state for if channel needs to be removed when ch_ref_cnt gets to 0 */
102 boolean_t ch_removing;
103
104 list_node_t ch_devchan_list_node;
105 list_node_t ch_globalchan_list_node;
106
107 /*
108 * per channel list of commands actively blocking waiting for
109 * completion.
110 */
111 dcopy_list_t ch_poll_list;
112
113 /* pointer back to our device */
114 struct dcopy_device_s *ch_device;
115
116 dcopy_query_channel_t ch_info;
117
118 kstat_t *ch_kstat;
119 dcopy_stats_t ch_stat;
120 };
121
122 /*
123 * If grabbing both device_list mutex & globalchan_list mutex,
124 * Always grab globalchan_list mutex before device_list mutex
125 */
126 typedef struct dcopy_state_s {
127 dcopy_list_t d_device_list;
128 dcopy_list_t d_globalchan_list;
129 } dcopy_state_t;
130 dcopy_state_t *dcopy_statep;
131
132
133 /* Module Driver Info */
134 static struct modlmisc dcopy_modlmisc = {
135 &mod_miscops,
136 "dcopy kernel module"
137 };
138
139 /* Module Linkage */
140 static struct modlinkage dcopy_modlinkage = {
141 MODREV_1,
142 &dcopy_modlmisc,
143 NULL
144 };
145
146 static int dcopy_init();
147 static void dcopy_fini();
148
149 static int dcopy_list_init(dcopy_list_t *list, size_t node_size,
150 offset_t link_offset);
151 static void dcopy_list_fini(dcopy_list_t *list);
152 static void dcopy_list_push(dcopy_list_t *list, void *list_node);
153 static void *dcopy_list_pop(dcopy_list_t *list);
154
155 static void dcopy_device_cleanup(dcopy_device_handle_t device,
156 boolean_t do_callback);
157
158 static int dcopy_stats_init(dcopy_handle_t channel);
159 static void dcopy_stats_fini(dcopy_handle_t channel);
160
161
162 /*
163 * _init()
164 */
165 int
_init()166 _init()
167 {
168 int e;
169
170 e = dcopy_init();
171 if (e != 0) {
172 return (e);
173 }
174
175 return (mod_install(&dcopy_modlinkage));
176 }
177
178
179 /*
180 * _info()
181 */
182 int
_info(struct modinfo * modinfop)183 _info(struct modinfo *modinfop)
184 {
185 return (mod_info(&dcopy_modlinkage, modinfop));
186 }
187
188
189 /*
190 * _fini()
191 */
192 int
_fini()193 _fini()
194 {
195 int e;
196
197 e = mod_remove(&dcopy_modlinkage);
198 if (e != 0) {
199 return (e);
200 }
201
202 dcopy_fini();
203
204 return (e);
205 }
206
207 /*
208 * dcopy_init()
209 */
210 static int
dcopy_init()211 dcopy_init()
212 {
213 int e;
214
215
216 dcopy_statep = kmem_zalloc(sizeof (*dcopy_statep), KM_SLEEP);
217
218 /* Initialize the list we use to track device register/unregister */
219 e = dcopy_list_init(&dcopy_statep->d_device_list,
220 sizeof (struct dcopy_device_s),
221 offsetof(struct dcopy_device_s, dc_device_list_node));
222 if (e != DCOPY_SUCCESS) {
223 goto dcopyinitfail_device;
224 }
225
226 /* Initialize the list we use to track all DMA channels */
227 e = dcopy_list_init(&dcopy_statep->d_globalchan_list,
228 sizeof (struct dcopy_channel_s),
229 offsetof(struct dcopy_channel_s, ch_globalchan_list_node));
230 if (e != DCOPY_SUCCESS) {
231 goto dcopyinitfail_global;
232 }
233
234 return (0);
235
236 dcopyinitfail_global:
237 dcopy_list_fini(&dcopy_statep->d_device_list);
238 dcopyinitfail_device:
239 kmem_free(dcopy_statep, sizeof (*dcopy_statep));
240
241 return (-1);
242 }
243
244
245 /*
246 * dcopy_fini()
247 */
248 static void
dcopy_fini()249 dcopy_fini()
250 {
251 /*
252 * if mod_remove was successfull, we shouldn't have any
253 * devices/channels to worry about.
254 */
255 ASSERT(list_head(&dcopy_statep->d_globalchan_list.dl_list) == NULL);
256 ASSERT(list_head(&dcopy_statep->d_device_list.dl_list) == NULL);
257
258 dcopy_list_fini(&dcopy_statep->d_globalchan_list);
259 dcopy_list_fini(&dcopy_statep->d_device_list);
260 kmem_free(dcopy_statep, sizeof (*dcopy_statep));
261 }
262
263
264 /* *** EXTERNAL INTERFACE *** */
265 /*
266 * dcopy_query()
267 */
268 void
dcopy_query(dcopy_query_t * query)269 dcopy_query(dcopy_query_t *query)
270 {
271 query->dq_version = DCOPY_QUERY_V0;
272 query->dq_num_channels = dcopy_statep->d_globalchan_list.dl_cnt;
273 }
274
275
276 /*
277 * dcopy_alloc()
278 */
279 /*ARGSUSED*/
280 int
dcopy_alloc(int flags,dcopy_handle_t * handle)281 dcopy_alloc(int flags, dcopy_handle_t *handle)
282 {
283 dcopy_handle_t channel;
284 dcopy_list_t *list;
285
286
287 /*
288 * we don't use the dcopy_list_* code here because we need to due
289 * some non-standard stuff.
290 */
291
292 list = &dcopy_statep->d_globalchan_list;
293
294 /*
295 * if nothing is on the channel list, return DCOPY_NORESOURCES. This
296 * can happen if there aren't any DMA device registered.
297 */
298 mutex_enter(&list->dl_mutex);
299 channel = list_head(&list->dl_list);
300 if (channel == NULL) {
301 mutex_exit(&list->dl_mutex);
302 return (DCOPY_NORESOURCES);
303 }
304
305 /*
306 * increment the reference count, and pop the channel off the head and
307 * push it on the tail. This ensures we rotate through the channels.
308 * DMA channels are shared.
309 */
310 channel->ch_ref_cnt++;
311 list_remove(&list->dl_list, channel);
312 list_insert_tail(&list->dl_list, channel);
313 mutex_exit(&list->dl_mutex);
314
315 *handle = (dcopy_handle_t)channel;
316 return (DCOPY_SUCCESS);
317 }
318
319
320 /*
321 * dcopy_free()
322 */
323 void
dcopy_free(dcopy_handle_t * channel)324 dcopy_free(dcopy_handle_t *channel)
325 {
326 dcopy_device_handle_t device;
327 dcopy_list_t *list;
328 boolean_t cleanup = B_FALSE;
329
330
331 ASSERT(*channel != NULL);
332
333 /*
334 * we don't need to add the channel back to the list since we never
335 * removed it. decrement the reference count.
336 */
337 list = &dcopy_statep->d_globalchan_list;
338 mutex_enter(&list->dl_mutex);
339 (*channel)->ch_ref_cnt--;
340
341 /*
342 * if we need to remove this channel, and the reference count is down
343 * to 0, decrement the number of channels which still need to be
344 * removed on the device.
345 */
346 if ((*channel)->ch_removing && ((*channel)->ch_ref_cnt == 0)) {
347 device = (*channel)->ch_device;
348 mutex_enter(&device->dc_devchan_list.dl_mutex);
349 device->dc_removing_cnt--;
350 if (device->dc_removing_cnt == 0) {
351 cleanup = B_TRUE;
352 }
353 mutex_exit(&device->dc_devchan_list.dl_mutex);
354 }
355 mutex_exit(&list->dl_mutex);
356
357 /*
358 * if there are no channels which still need to be removed, cleanup the
359 * device state and call back into the DMA device driver to tell them
360 * the device is free.
361 */
362 if (cleanup) {
363 dcopy_device_cleanup(device, B_TRUE);
364 }
365
366 *channel = NULL;
367 }
368
369
370 /*
371 * dcopy_query_channel()
372 */
373 void
dcopy_query_channel(dcopy_handle_t channel,dcopy_query_channel_t * query)374 dcopy_query_channel(dcopy_handle_t channel, dcopy_query_channel_t *query)
375 {
376 *query = channel->ch_info;
377 }
378
379
380 /*
381 * dcopy_cmd_alloc()
382 */
383 int
dcopy_cmd_alloc(dcopy_handle_t handle,int flags,dcopy_cmd_t * cmd)384 dcopy_cmd_alloc(dcopy_handle_t handle, int flags, dcopy_cmd_t *cmd)
385 {
386 dcopy_handle_t channel;
387 dcopy_cmd_priv_t priv;
388 int e;
389
390
391 channel = handle;
392
393 atomic_inc_64(&channel->ch_stat.cs_cmd_alloc.value.ui64);
394 e = channel->ch_cb->cb_cmd_alloc(channel->ch_channel_private, flags,
395 cmd);
396 if (e == DCOPY_SUCCESS) {
397 priv = (*cmd)->dp_private;
398 priv->pr_channel = channel;
399 /*
400 * we won't initialize the blocking state until we actually
401 * need to block.
402 */
403 priv->pr_block_init = B_FALSE;
404 }
405
406 return (e);
407 }
408
409
410 /*
411 * dcopy_cmd_free()
412 */
413 void
dcopy_cmd_free(dcopy_cmd_t * cmd)414 dcopy_cmd_free(dcopy_cmd_t *cmd)
415 {
416 dcopy_handle_t channel;
417 dcopy_cmd_priv_t priv;
418
419
420 ASSERT(*cmd != NULL);
421
422 priv = (*cmd)->dp_private;
423 channel = priv->pr_channel;
424
425 /* if we initialized the blocking state, clean it up too */
426 if (priv->pr_block_init) {
427 cv_destroy(&priv->pr_cv);
428 mutex_destroy(&priv->pr_mutex);
429 }
430
431 channel->ch_cb->cb_cmd_free(channel->ch_channel_private, cmd);
432 }
433
434
435 /*
436 * dcopy_cmd_post()
437 */
438 int
dcopy_cmd_post(dcopy_cmd_t cmd)439 dcopy_cmd_post(dcopy_cmd_t cmd)
440 {
441 dcopy_handle_t channel;
442 int e;
443
444
445 channel = cmd->dp_private->pr_channel;
446
447 atomic_inc_64(&channel->ch_stat.cs_cmd_post.value.ui64);
448 if (cmd->dp_cmd == DCOPY_CMD_COPY) {
449 atomic_add_64(&channel->ch_stat.cs_bytes_xfer.value.ui64,
450 cmd->dp.copy.cc_size);
451 }
452 e = channel->ch_cb->cb_cmd_post(channel->ch_channel_private, cmd);
453 if (e != DCOPY_SUCCESS) {
454 return (e);
455 }
456
457 return (DCOPY_SUCCESS);
458 }
459
460
461 /*
462 * dcopy_cmd_poll()
463 */
464 int
dcopy_cmd_poll(dcopy_cmd_t cmd,int flags)465 dcopy_cmd_poll(dcopy_cmd_t cmd, int flags)
466 {
467 dcopy_handle_t channel;
468 dcopy_cmd_priv_t priv;
469 int e;
470
471
472 priv = cmd->dp_private;
473 channel = priv->pr_channel;
474
475 /*
476 * if the caller is trying to block, they needed to post the
477 * command with DCOPY_CMD_INTR set.
478 */
479 if ((flags & DCOPY_POLL_BLOCK) && !(cmd->dp_flags & DCOPY_CMD_INTR)) {
480 return (DCOPY_FAILURE);
481 }
482
483 atomic_inc_64(&channel->ch_stat.cs_cmd_poll.value.ui64);
484
485 repoll:
486 e = channel->ch_cb->cb_cmd_poll(channel->ch_channel_private, cmd);
487 if (e == DCOPY_PENDING) {
488 /*
489 * if the command is still active, and the blocking flag
490 * is set.
491 */
492 if (flags & DCOPY_POLL_BLOCK) {
493
494 /*
495 * if we haven't initialized the state, do it now. A
496 * command can be re-used, so it's possible it's
497 * already been initialized.
498 */
499 if (!priv->pr_block_init) {
500 priv->pr_block_init = B_TRUE;
501 mutex_init(&priv->pr_mutex, NULL, MUTEX_DRIVER,
502 NULL);
503 cv_init(&priv->pr_cv, NULL, CV_DRIVER, NULL);
504 priv->pr_cmd = cmd;
505 }
506
507 /* push it on the list for blocking commands */
508 priv->pr_wait = B_TRUE;
509 dcopy_list_push(&channel->ch_poll_list, priv);
510
511 mutex_enter(&priv->pr_mutex);
512 /*
513 * it's possible we already cleared pr_wait before we
514 * grabbed the mutex.
515 */
516 if (priv->pr_wait) {
517 cv_wait(&priv->pr_cv, &priv->pr_mutex);
518 }
519 mutex_exit(&priv->pr_mutex);
520
521 /*
522 * the command has completed, go back and poll so we
523 * get the status.
524 */
525 goto repoll;
526 }
527 }
528
529 return (e);
530 }
531
532 /* *** END OF EXTERNAL INTERFACE *** */
533
534 /*
535 * dcopy_list_init()
536 */
537 static int
dcopy_list_init(dcopy_list_t * list,size_t node_size,offset_t link_offset)538 dcopy_list_init(dcopy_list_t *list, size_t node_size, offset_t link_offset)
539 {
540 mutex_init(&list->dl_mutex, NULL, MUTEX_DRIVER, NULL);
541 list_create(&list->dl_list, node_size, link_offset);
542 list->dl_cnt = 0;
543
544 return (DCOPY_SUCCESS);
545 }
546
547
548 /*
549 * dcopy_list_fini()
550 */
551 static void
dcopy_list_fini(dcopy_list_t * list)552 dcopy_list_fini(dcopy_list_t *list)
553 {
554 list_destroy(&list->dl_list);
555 mutex_destroy(&list->dl_mutex);
556 }
557
558
559 /*
560 * dcopy_list_push()
561 */
562 static void
dcopy_list_push(dcopy_list_t * list,void * list_node)563 dcopy_list_push(dcopy_list_t *list, void *list_node)
564 {
565 mutex_enter(&list->dl_mutex);
566 list_insert_tail(&list->dl_list, list_node);
567 list->dl_cnt++;
568 mutex_exit(&list->dl_mutex);
569 }
570
571
572 /*
573 * dcopy_list_pop()
574 */
575 static void *
dcopy_list_pop(dcopy_list_t * list)576 dcopy_list_pop(dcopy_list_t *list)
577 {
578 list_node_t *list_node;
579
580 mutex_enter(&list->dl_mutex);
581 list_node = list_head(&list->dl_list);
582 if (list_node == NULL) {
583 mutex_exit(&list->dl_mutex);
584 return (list_node);
585 }
586 list->dl_cnt--;
587 list_remove(&list->dl_list, list_node);
588 mutex_exit(&list->dl_mutex);
589
590 return (list_node);
591 }
592
593
594 /* *** DEVICE INTERFACE *** */
595 /*
596 * dcopy_device_register()
597 */
598 int
dcopy_device_register(void * device_private,dcopy_device_info_t * info,dcopy_device_handle_t * handle)599 dcopy_device_register(void *device_private, dcopy_device_info_t *info,
600 dcopy_device_handle_t *handle)
601 {
602 struct dcopy_channel_s *channel;
603 struct dcopy_device_s *device;
604 int e;
605 int i;
606
607
608 /* initialize the per device state */
609 device = kmem_zalloc(sizeof (*device), KM_SLEEP);
610 device->dc_device_private = device_private;
611 device->dc_info = *info;
612 device->dc_removing_cnt = 0;
613 device->dc_cb = info->di_cb;
614
615 /*
616 * we have a per device channel list so we can remove a device in the
617 * future.
618 */
619 e = dcopy_list_init(&device->dc_devchan_list,
620 sizeof (struct dcopy_channel_s),
621 offsetof(struct dcopy_channel_s, ch_devchan_list_node));
622 if (e != DCOPY_SUCCESS) {
623 goto registerfail_devchan;
624 }
625
626 /*
627 * allocate state for each channel, allocate the channel, and then add
628 * the devices dma channels to the devices channel list.
629 */
630 for (i = 0; i < info->di_num_dma; i++) {
631 channel = kmem_zalloc(sizeof (*channel), KM_SLEEP);
632 channel->ch_device = device;
633 channel->ch_removing = B_FALSE;
634 channel->ch_ref_cnt = 0;
635 channel->ch_cb = info->di_cb;
636
637 e = info->di_cb->cb_channel_alloc(device_private, channel,
638 DCOPY_SLEEP, dcopy_channel_size, &channel->ch_info,
639 &channel->ch_channel_private);
640 if (e != DCOPY_SUCCESS) {
641 kmem_free(channel, sizeof (*channel));
642 goto registerfail_alloc;
643 }
644
645 e = dcopy_stats_init(channel);
646 if (e != DCOPY_SUCCESS) {
647 info->di_cb->cb_channel_free(
648 &channel->ch_channel_private);
649 kmem_free(channel, sizeof (*channel));
650 goto registerfail_alloc;
651 }
652
653 e = dcopy_list_init(&channel->ch_poll_list,
654 sizeof (struct dcopy_cmd_priv_s),
655 offsetof(struct dcopy_cmd_priv_s, pr_poll_list_node));
656 if (e != DCOPY_SUCCESS) {
657 dcopy_stats_fini(channel);
658 info->di_cb->cb_channel_free(
659 &channel->ch_channel_private);
660 kmem_free(channel, sizeof (*channel));
661 goto registerfail_alloc;
662 }
663
664 dcopy_list_push(&device->dc_devchan_list, channel);
665 }
666
667 /* add the device to device list */
668 dcopy_list_push(&dcopy_statep->d_device_list, device);
669
670 /*
671 * add the device's dma channels to the global channel list (where
672 * dcopy_alloc's come from)
673 */
674 mutex_enter(&dcopy_statep->d_globalchan_list.dl_mutex);
675 mutex_enter(&dcopy_statep->d_device_list.dl_mutex);
676 channel = list_head(&device->dc_devchan_list.dl_list);
677 while (channel != NULL) {
678 list_insert_tail(&dcopy_statep->d_globalchan_list.dl_list,
679 channel);
680 dcopy_statep->d_globalchan_list.dl_cnt++;
681 channel = list_next(&device->dc_devchan_list.dl_list, channel);
682 }
683 mutex_exit(&dcopy_statep->d_device_list.dl_mutex);
684 mutex_exit(&dcopy_statep->d_globalchan_list.dl_mutex);
685
686 *handle = device;
687
688 /* last call-back into kernel for dcopy KAPI enabled */
689 uioa_dcopy_enable();
690
691 return (DCOPY_SUCCESS);
692
693 registerfail_alloc:
694 channel = list_head(&device->dc_devchan_list.dl_list);
695 while (channel != NULL) {
696 /* remove from the list */
697 channel = dcopy_list_pop(&device->dc_devchan_list);
698 ASSERT(channel != NULL);
699
700 dcopy_list_fini(&channel->ch_poll_list);
701 dcopy_stats_fini(channel);
702 info->di_cb->cb_channel_free(&channel->ch_channel_private);
703 kmem_free(channel, sizeof (*channel));
704 }
705
706 dcopy_list_fini(&device->dc_devchan_list);
707 registerfail_devchan:
708 kmem_free(device, sizeof (*device));
709
710 return (DCOPY_FAILURE);
711 }
712
713
714 /*
715 * dcopy_device_unregister()
716 */
717 /*ARGSUSED*/
718 int
dcopy_device_unregister(dcopy_device_handle_t * handle)719 dcopy_device_unregister(dcopy_device_handle_t *handle)
720 {
721 struct dcopy_channel_s *channel;
722 dcopy_device_handle_t device;
723 boolean_t device_busy;
724
725 /* first call-back into kernel for dcopy KAPI disable */
726 uioa_dcopy_disable();
727
728 device = *handle;
729 device_busy = B_FALSE;
730
731 /*
732 * remove the devices dma channels from the global channel list (where
733 * dcopy_alloc's come from)
734 */
735 mutex_enter(&dcopy_statep->d_globalchan_list.dl_mutex);
736 mutex_enter(&device->dc_devchan_list.dl_mutex);
737 channel = list_head(&device->dc_devchan_list.dl_list);
738 while (channel != NULL) {
739 /*
740 * if the channel has outstanding allocs, mark it as having
741 * to be removed and increment the number of channels which
742 * need to be removed in the device state too.
743 */
744 if (channel->ch_ref_cnt != 0) {
745 channel->ch_removing = B_TRUE;
746 device_busy = B_TRUE;
747 device->dc_removing_cnt++;
748 }
749 dcopy_statep->d_globalchan_list.dl_cnt--;
750 list_remove(&dcopy_statep->d_globalchan_list.dl_list, channel);
751 channel = list_next(&device->dc_devchan_list.dl_list, channel);
752 }
753 mutex_exit(&device->dc_devchan_list.dl_mutex);
754 mutex_exit(&dcopy_statep->d_globalchan_list.dl_mutex);
755
756 /*
757 * if there are channels which still need to be removed, we will clean
758 * up the device state after they are freed up.
759 */
760 if (device_busy) {
761 return (DCOPY_PENDING);
762 }
763
764 dcopy_device_cleanup(device, B_FALSE);
765
766 *handle = NULL;
767 return (DCOPY_SUCCESS);
768 }
769
770
771 /*
772 * dcopy_device_cleanup()
773 */
774 static void
dcopy_device_cleanup(dcopy_device_handle_t device,boolean_t do_callback)775 dcopy_device_cleanup(dcopy_device_handle_t device, boolean_t do_callback)
776 {
777 struct dcopy_channel_s *channel;
778
779 /*
780 * remove all the channels in the device list, free them, and clean up
781 * the state.
782 */
783 mutex_enter(&dcopy_statep->d_device_list.dl_mutex);
784 channel = list_head(&device->dc_devchan_list.dl_list);
785 while (channel != NULL) {
786 device->dc_devchan_list.dl_cnt--;
787 list_remove(&device->dc_devchan_list.dl_list, channel);
788 dcopy_list_fini(&channel->ch_poll_list);
789 dcopy_stats_fini(channel);
790 channel->ch_cb->cb_channel_free(&channel->ch_channel_private);
791 kmem_free(channel, sizeof (*channel));
792 channel = list_head(&device->dc_devchan_list.dl_list);
793 }
794
795 /* remove it from the list of devices */
796 list_remove(&dcopy_statep->d_device_list.dl_list, device);
797
798 mutex_exit(&dcopy_statep->d_device_list.dl_mutex);
799
800 /*
801 * notify the DMA device driver that the device is free to be
802 * detached.
803 */
804 if (do_callback) {
805 device->dc_cb->cb_unregister_complete(
806 device->dc_device_private, DCOPY_SUCCESS);
807 }
808
809 dcopy_list_fini(&device->dc_devchan_list);
810 kmem_free(device, sizeof (*device));
811 }
812
813
814 /*
815 * dcopy_device_channel_notify()
816 */
817 /*ARGSUSED*/
818 void
dcopy_device_channel_notify(dcopy_handle_t handle,int status)819 dcopy_device_channel_notify(dcopy_handle_t handle, int status)
820 {
821 struct dcopy_channel_s *channel;
822 dcopy_list_t *poll_list;
823 dcopy_cmd_priv_t priv;
824 int e;
825
826
827 ASSERT(status == DCOPY_COMPLETION);
828 channel = handle;
829
830 poll_list = &channel->ch_poll_list;
831
832 /*
833 * when we get a completion notification from the device, go through
834 * all of the commands blocking on this channel and see if they have
835 * completed. Remove the command and wake up the block thread if they
836 * have. Once we hit a command which is still pending, we are done
837 * polling since commands in a channel complete in order.
838 */
839 mutex_enter(&poll_list->dl_mutex);
840 if (poll_list->dl_cnt != 0) {
841 priv = list_head(&poll_list->dl_list);
842 while (priv != NULL) {
843 atomic_inc_64(&channel->
844 ch_stat.cs_notify_poll.value.ui64);
845 e = channel->ch_cb->cb_cmd_poll(
846 channel->ch_channel_private,
847 priv->pr_cmd);
848 if (e == DCOPY_PENDING) {
849 atomic_inc_64(&channel->
850 ch_stat.cs_notify_pending.value.ui64);
851 break;
852 }
853
854 poll_list->dl_cnt--;
855 list_remove(&poll_list->dl_list, priv);
856
857 mutex_enter(&priv->pr_mutex);
858 priv->pr_wait = B_FALSE;
859 cv_signal(&priv->pr_cv);
860 mutex_exit(&priv->pr_mutex);
861
862 priv = list_head(&poll_list->dl_list);
863 }
864 }
865
866 mutex_exit(&poll_list->dl_mutex);
867 }
868
869
870 /*
871 * dcopy_stats_init()
872 */
873 static int
dcopy_stats_init(dcopy_handle_t channel)874 dcopy_stats_init(dcopy_handle_t channel)
875 {
876 #define CHANSTRSIZE 20
877 char chanstr[CHANSTRSIZE];
878 dcopy_stats_t *stats;
879 int instance;
880 char *name;
881
882
883 stats = &channel->ch_stat;
884 name = (char *)ddi_driver_name(channel->ch_device->dc_info.di_dip);
885 instance = ddi_get_instance(channel->ch_device->dc_info.di_dip);
886
887 (void) snprintf(chanstr, CHANSTRSIZE, "channel%d",
888 (uint32_t)channel->ch_info.qc_chan_num);
889
890 channel->ch_kstat = kstat_create(name, instance, chanstr, "misc",
891 KSTAT_TYPE_NAMED, sizeof (dcopy_stats_t) / sizeof (kstat_named_t),
892 KSTAT_FLAG_VIRTUAL);
893 if (channel->ch_kstat == NULL) {
894 return (DCOPY_FAILURE);
895 }
896 channel->ch_kstat->ks_data = stats;
897
898 kstat_named_init(&stats->cs_bytes_xfer, "bytes_xfer",
899 KSTAT_DATA_UINT64);
900 kstat_named_init(&stats->cs_cmd_alloc, "cmd_alloc",
901 KSTAT_DATA_UINT64);
902 kstat_named_init(&stats->cs_cmd_post, "cmd_post",
903 KSTAT_DATA_UINT64);
904 kstat_named_init(&stats->cs_cmd_poll, "cmd_poll",
905 KSTAT_DATA_UINT64);
906 kstat_named_init(&stats->cs_notify_poll, "notify_poll",
907 KSTAT_DATA_UINT64);
908 kstat_named_init(&stats->cs_notify_pending, "notify_pending",
909 KSTAT_DATA_UINT64);
910 kstat_named_init(&stats->cs_id, "id",
911 KSTAT_DATA_UINT64);
912 kstat_named_init(&stats->cs_capabilities, "capabilities",
913 KSTAT_DATA_UINT64);
914
915 kstat_install(channel->ch_kstat);
916
917 channel->ch_stat.cs_id.value.ui64 = channel->ch_info.qc_id;
918 channel->ch_stat.cs_capabilities.value.ui64 =
919 channel->ch_info.qc_capabilities;
920
921 return (DCOPY_SUCCESS);
922 }
923
924
925 /*
926 * dcopy_stats_fini()
927 */
928 static void
dcopy_stats_fini(dcopy_handle_t channel)929 dcopy_stats_fini(dcopy_handle_t channel)
930 {
931 kstat_delete(channel->ch_kstat);
932 }
933 /* *** END OF DEVICE INTERFACE *** */
934