1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22 /*
23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #pragma ident "%Z%%M% %I% %E% SMI"
28
29 /*
30 * Kernel framework functions for the fcode interpreter
31 */
32
33 #include <sys/types.h>
34 #include <sys/conf.h>
35 #include <sys/debug.h>
36 #include <sys/kmem.h>
37 #include <sys/ddi.h>
38 #include <sys/sunddi.h>
39 #include <sys/sunndi.h>
40 #include <sys/esunddi.h>
41 #include <sys/ksynch.h>
42 #include <sys/modctl.h>
43 #include <sys/errno.h>
44 #include <sys/fcode.h>
45
46 #ifdef DEBUG
47 int fcode_debug = 0;
48 #else
49 int fcode_debug = 0;
50 #endif
51
52 static kmutex_t fc_request_lock;
53 static kmutex_t fc_resource_lock;
54 static kmutex_t fc_hash_lock;
55 static kmutex_t fc_device_tree_lock;
56 static kmutex_t fc_phandle_lock;
57 static kcondvar_t fc_request_cv;
58 static struct fc_request *fc_request_head;
59 static int fc_initialized;
60
61 static void fcode_timer(void *);
62
63 int fcode_timeout = 300; /* seconds */
64
65 int fcodem_unloadable;
66
67 extern int hz;
68
69 /*
70 * Initialize the fcode interpreter framework ... must be called
71 * prior to activating any of the fcode interpreter framework including
72 * the driver.
73 */
74 static void
fcode_init(void)75 fcode_init(void)
76 {
77 if (fc_initialized)
78 return;
79
80 mutex_init(&fc_request_lock, NULL, MUTEX_DRIVER, NULL);
81 mutex_init(&fc_resource_lock, NULL, MUTEX_DRIVER, NULL);
82 mutex_init(&fc_hash_lock, NULL, MUTEX_DRIVER, NULL);
83 mutex_init(&fc_device_tree_lock, NULL, MUTEX_DRIVER, NULL);
84 mutex_init(&fc_phandle_lock, NULL, MUTEX_DRIVER, NULL);
85 cv_init(&fc_request_cv, NULL, CV_DRIVER, NULL);
86 ++fc_initialized;
87 }
88
89 static void
fcode_fini(void)90 fcode_fini(void)
91 {
92 mutex_destroy(&fc_request_lock);
93 mutex_destroy(&fc_resource_lock);
94 mutex_destroy(&fc_hash_lock);
95 cv_destroy(&fc_request_cv);
96 fc_initialized = 0;
97 }
98
99 /*
100 * Module linkage information for the kernel.
101 */
102 static struct modlmisc modlmisc = {
103 &mod_miscops, "FCode framework 1.13"
104 };
105
106 static struct modlinkage modlinkage = {
107 MODREV_1, (void *)&modlmisc, NULL
108 };
109
110 int
_init(void)111 _init(void)
112 {
113 int error;
114
115 fcode_init();
116 if ((error = mod_install(&modlinkage)) != 0)
117 fcode_fini();
118 return (error);
119 }
120
121 int
_fini(void)122 _fini(void)
123 {
124 int error = EBUSY;
125
126 if (fcodem_unloadable)
127 if ((error = mod_remove(&modlinkage)) == 0)
128 fcode_fini();
129
130 return (error);
131 }
132
133 int
_info(struct modinfo * modinfop)134 _info(struct modinfo *modinfop)
135 {
136 return (mod_info(&modlinkage, modinfop));
137 }
138
139 /*
140 * Framework function to invoke the interpreter. Wait and return when the
141 * interpreter is done. See fcode.h for details.
142 */
143 int
fcode_interpreter(dev_info_t * ap,fc_ops_t * ops,fco_handle_t handle)144 fcode_interpreter(dev_info_t *ap, fc_ops_t *ops, fco_handle_t handle)
145 {
146 struct fc_request *fp, *qp;
147 int error;
148
149 ASSERT(fc_initialized);
150 ASSERT(ap);
151 ASSERT(ops);
152 ASSERT(handle);
153
154 /*
155 * Create a request structure
156 */
157 fp = kmem_zalloc(sizeof (struct fc_request), KM_SLEEP);
158
159 fp->next = NULL;
160 fp->busy = FC_R_INIT;
161 fp->error = FC_SUCCESS;
162 fp->ap_dip = ap;
163 fp->ap_ops = ops;
164 fp->handle = handle;
165
166 /*
167 * Add the request to the end of the request list.
168 */
169 mutex_enter(&fc_request_lock);
170
171 if (fc_request_head == NULL)
172 fc_request_head = fp;
173 else {
174 for (qp = fc_request_head; qp->next != NULL; qp = qp->next)
175 /* empty */;
176 qp->next = fp;
177 }
178 mutex_exit(&fc_request_lock);
179
180 /*
181 * log a message (ie: i_ddi_log_event) indicating that a request
182 * has been queued to start the userland fcode interpreter.
183 * This call is the glue to the eventd and automates the process.
184 */
185
186 /*
187 * Signal the driver if it's waiting for a request to be queued.
188 */
189 cv_broadcast(&fc_request_cv);
190
191 /*
192 * Wait for the request to be serviced
193 */
194 mutex_enter(&fc_request_lock);
195 fp->timeout = timeout(fcode_timer, fp, hz * fcode_timeout);
196 while (fp->busy != FC_R_DONE)
197 cv_wait(&fc_request_cv, &fc_request_lock);
198
199 if (fp->timeout) {
200 (void) untimeout(fp->timeout);
201 fp->timeout = NULL;
202 }
203
204 /*
205 * Remove the request from the queue (while still holding the lock)
206 */
207 if (fc_request_head == fp)
208 fc_request_head = fp->next;
209 else {
210 for (qp = fc_request_head; qp->next != fp; qp = qp->next)
211 /* empty */;
212 qp->next = fp->next;
213 }
214 mutex_exit(&fc_request_lock);
215
216 FC_DEBUG1(2, CE_CONT, "fcode_interpreter: request finished, fp %p\n",
217 fp);
218
219 /*
220 * Free the request structure and return any errors.
221 */
222 error = fp->error;
223 kmem_free(fp, sizeof (struct fc_request));
224 return (error);
225 }
226
227 /*
228 * Timeout requests thet don't get picked up by the interpreter. This
229 * would happen if the daemon is not running. If the timer goes off
230 * and it's state is not FC_R_INIT, then the interpreter has picked up the
231 * request.
232 */
233 static void
fcode_timer(void * arg)234 fcode_timer(void *arg)
235 {
236 struct fc_request *fp = arg;
237
238 mutex_enter(&fc_request_lock);
239 fp->timeout = 0;
240 if (fp->busy == FC_R_INIT) {
241 cmn_err(CE_WARN, "fcode_timer: Timeout waiting for "
242 "interpreter - Interpreter did not pick up request\n");
243 fp->busy = FC_R_DONE;
244 fp->error = FC_TIMEOUT;
245 mutex_exit(&fc_request_lock);
246 cv_broadcast(&fc_request_cv);
247 return;
248 } else if (fp->error != FC_SUCCESS) {
249 /*
250 * An error was detected, but didn't close the driver.
251 * This will allow the process to error out, returning
252 * the interpreter error code instead of FC_TIMEOUT.
253 */
254 fp->busy = FC_R_DONE;
255 cv_broadcast(&fc_request_cv);
256 mutex_exit(&fc_request_lock);
257 return;
258 } else {
259 cmn_err(CE_WARN, "fcode_timer: Timeout waiting for "
260 "interpreter - Interpreter is executing request\n");
261 }
262 mutex_exit(&fc_request_lock);
263 }
264
265 /*
266 * This is the function the driver calls to wait for and get
267 * a request. The call should be interruptable since it's done
268 * at read(2) time, so allow for signals to interrupt us.
269 *
270 * Return NULL if the wait was interrupted, else return a pointer
271 * to the fc_request structure (marked as busy).
272 *
273 * Note that we have to check for a request first, before waiting,
274 * in case the request is already queued. In this case, the signal
275 * may have already been delivered.
276 */
277 struct fc_request *
fc_get_request(void)278 fc_get_request(void)
279 {
280 struct fc_request *fp;
281
282 ASSERT(fc_initialized);
283
284 mutex_enter(&fc_request_lock);
285
286 /*CONSTANTCONDITION*/
287 while (1) {
288 for (fp = fc_request_head; fp != NULL; fp = fp->next) {
289 if (fp->busy == FC_R_INIT) {
290 fp->busy = FC_R_BUSY;
291 mutex_exit(&fc_request_lock);
292 return (fp);
293 }
294 }
295 if (cv_wait_sig(&fc_request_cv, &fc_request_lock) == 0) {
296 mutex_exit(&fc_request_lock);
297 return (NULL);
298 }
299 }
300 /*NOTREACHED*/
301 }
302
303 /*
304 * This is the function the driver calls when it's finished with
305 * a request. Mark the request as done and signal the thread that
306 * enqueued the request.
307 */
308 void
fc_finish_request(struct fc_request * fp)309 fc_finish_request(struct fc_request *fp)
310 {
311 ASSERT(fc_initialized);
312 ASSERT(fp);
313 ASSERT(fp->busy == FC_R_BUSY);
314
315 mutex_enter(&fc_request_lock);
316 fp->busy = FC_R_DONE;
317 mutex_exit(&fc_request_lock);
318
319 cv_broadcast(&fc_request_cv);
320 }
321
322 /*
323 * Generic resource list management subroutines
324 */
325 void
fc_add_resource(fco_handle_t rp,struct fc_resource * ip)326 fc_add_resource(fco_handle_t rp, struct fc_resource *ip)
327 {
328 ASSERT(rp);
329 ASSERT(ip);
330
331 mutex_enter(&fc_resource_lock);
332 ip->next = NULL;
333 if (rp->head != NULL)
334 ip->next = rp->head;
335 rp->head = ip;
336 mutex_exit(&fc_resource_lock);
337 }
338
339 void
fc_rem_resource(fco_handle_t rp,struct fc_resource * ip)340 fc_rem_resource(fco_handle_t rp, struct fc_resource *ip)
341 {
342 struct fc_resource *fp;
343
344 ASSERT(rp);
345 ASSERT(ip);
346
347 if (rp->head == NULL) {
348 cmn_err(CE_CONT, "fc_rem_resource: NULL list head!\n");
349 return;
350 }
351
352 mutex_enter(&fc_resource_lock);
353 if (rp->head == ip) {
354 rp->head = ip->next;
355 mutex_exit(&fc_resource_lock);
356 return;
357 }
358
359 for (fp = rp->head; fp && (fp->next != ip); fp = fp->next)
360 /* empty */;
361
362 if (fp == NULL) {
363 mutex_exit(&fc_resource_lock);
364 cmn_err(CE_CONT, "fc_rem_resource: Item not on list!\n");
365 return;
366 }
367
368 fp->next = ip->next;
369 mutex_exit(&fc_resource_lock);
370 }
371
372 /*ARGSUSED*/
373 void
fc_lock_resource_list(fco_handle_t rp)374 fc_lock_resource_list(fco_handle_t rp)
375 {
376 mutex_enter(&fc_resource_lock);
377 }
378
379 /*ARGSUSED*/
380 void
fc_unlock_resource_list(fco_handle_t rp)381 fc_unlock_resource_list(fco_handle_t rp)
382 {
383 mutex_exit(&fc_resource_lock);
384 }
385
386 /*
387 * Common helper ops and subroutines
388 */
389 /*ARGSUSED*/
390 int
fc_syntax_error(fc_ci_t * cp,char * msg)391 fc_syntax_error(fc_ci_t *cp, char *msg)
392 {
393 cp->error = fc_int2cell(-1);
394 cp->nresults = fc_int2cell(0);
395 return (0);
396 }
397
398 /*ARGSUSED*/
399 int
fc_priv_error(fc_ci_t * cp,char * msg)400 fc_priv_error(fc_ci_t *cp, char *msg)
401 {
402 cp->priv_error = fc_int2cell(-1);
403 cp->error = fc_int2cell(0);
404 cp->nresults = fc_int2cell(0);
405 return (0);
406 }
407
408 /*ARGSUSED*/
409 int
fc_success_op(dev_info_t * ap,fco_handle_t handle,fc_ci_t * cp)410 fc_success_op(dev_info_t *ap, fco_handle_t handle, fc_ci_t *cp)
411 {
412 cp->priv_error = cp->error = fc_int2cell(0);
413 return (0);
414 }
415
416 /*
417 * fc_fail_op: This 'handles' a request by specifically failing it,
418 * as opposed to not handling it and returning '-1' to indicate
419 * 'service unknown' and allowing somebody else in the chain to
420 * handle it.
421 */
422 /*ARGSUSED*/
423 int
fc_fail_op(dev_info_t * ap,fco_handle_t handle,fc_ci_t * cp)424 fc_fail_op(dev_info_t *ap, fco_handle_t handle, fc_ci_t *cp)
425 {
426 cmn_err(CE_CONT, "fcode ops: fail service name <%s>\n",
427 (char *)fc_cell2ptr(cp->svc_name));
428
429 cp->nresults = fc_int2cell(0);
430 cp->error = fc_int2cell(-1);
431 return (0);
432 }
433
434 /*
435 * Functions to manage the set of handles we give to the interpreter.
436 * The handles are opaque and internally represent dev_info_t pointers.
437 */
438 struct fc_phandle_entry **
fc_handle_to_phandle_head(fco_handle_t rp)439 fc_handle_to_phandle_head(fco_handle_t rp)
440 {
441 while (rp->next_handle)
442 rp = rp->next_handle;
443
444 return (&rp->ptable);
445 }
446
447 /*ARGSUSED*/
448 void
fc_phandle_table_alloc(struct fc_phandle_entry ** head)449 fc_phandle_table_alloc(struct fc_phandle_entry **head)
450 {
451 }
452
453 void
fc_phandle_table_free(struct fc_phandle_entry ** head)454 fc_phandle_table_free(struct fc_phandle_entry **head)
455 {
456 struct fc_phandle_entry *ip, *np;
457
458 /*
459 * Free each entry in the table.
460 */
461 for (ip = *head; ip; ip = np) {
462 np = ip->next;
463 kmem_free(ip, sizeof (struct fc_phandle_entry));
464 }
465 *head = NULL;
466 }
467
468 dev_info_t *
fc_phandle_to_dip(struct fc_phandle_entry ** head,fc_phandle_t handle)469 fc_phandle_to_dip(struct fc_phandle_entry **head, fc_phandle_t handle)
470 {
471 struct fc_phandle_entry *ip;
472
473 mutex_enter(&fc_hash_lock);
474
475 for (ip = *head; ip; ip = ip->next)
476 if (ip->h == handle)
477 break;
478
479 mutex_exit(&fc_hash_lock);
480
481 return (ip ? ip->dip : NULL);
482 }
483
484 fc_phandle_t
fc_dip_to_phandle(struct fc_phandle_entry ** head,dev_info_t * dip)485 fc_dip_to_phandle(struct fc_phandle_entry **head, dev_info_t *dip)
486 {
487 struct fc_phandle_entry *hp, *np;
488 fc_phandle_t h;
489
490 ASSERT(dip);
491 h = (fc_phandle_t)ddi_get_nodeid(dip);
492
493 /*
494 * Just in case, allocate a new entry ...
495 */
496 np = kmem_zalloc(sizeof (struct fc_phandle_entry), KM_SLEEP);
497
498 mutex_enter(&fc_hash_lock);
499
500 /*
501 * If we already have this dip in the table, just return the handle
502 */
503 for (hp = *head; hp; hp = hp->next) {
504 if (hp->dip == dip) {
505 mutex_exit(&fc_hash_lock);
506 kmem_free(np, sizeof (struct fc_phandle_entry));
507 return (h);
508 }
509 }
510
511 /*
512 * Insert this entry to the list of known entries
513 */
514 np->next = *head;
515 np->dip = dip;
516 np->h = h;
517 *head = np;
518 mutex_exit(&fc_hash_lock);
519 return (h);
520 }
521
522 /*
523 * We won't need this function once the ddi is modified to handle
524 * unique non-prom nodeids. For now, this allows us to add a given
525 * nodeid to the device tree without dereferencing the value in the
526 * devinfo node, so we have a parallel mechanism.
527 */
528 void
fc_add_dip_to_phandle(struct fc_phandle_entry ** head,dev_info_t * dip,fc_phandle_t h)529 fc_add_dip_to_phandle(struct fc_phandle_entry **head, dev_info_t *dip,
530 fc_phandle_t h)
531 {
532 struct fc_phandle_entry *hp, *np;
533
534 ASSERT(dip);
535
536 /*
537 * Just in case, allocate a new entry ...
538 */
539 np = kmem_zalloc(sizeof (struct fc_phandle_entry), KM_SLEEP);
540
541 mutex_enter(&fc_hash_lock);
542
543 /*
544 * If we already have this dip in the table, just return the handle
545 */
546 for (hp = *head; hp; hp = hp->next) {
547 if (hp->dip == dip) {
548 mutex_exit(&fc_hash_lock);
549 kmem_free(np, sizeof (struct fc_phandle_entry));
550 return;
551 }
552 }
553
554 /*
555 * Insert this entry to the list of known entries
556 */
557 np->next = *head;
558 np->dip = dip;
559 np->h = h;
560 *head = np;
561 mutex_exit(&fc_hash_lock);
562 }
563
564 /*
565 * Functions to manage our copy of our subtree.
566 *
567 * The head of the device tree is always stored in the last 'handle'
568 * in the handle chain.
569 */
570 struct fc_device_tree **
fc_handle_to_dtree_head(fco_handle_t rp)571 fc_handle_to_dtree_head(fco_handle_t rp)
572 {
573 while (rp->next_handle)
574 rp = rp->next_handle;
575
576 return (&rp->dtree);
577 }
578
579 struct fc_device_tree *
fc_handle_to_dtree(fco_handle_t rp)580 fc_handle_to_dtree(fco_handle_t rp)
581 {
582 struct fc_device_tree **head = fc_handle_to_dtree_head(rp);
583
584 return (*head);
585 }
586
587 /*
588 * The root of the subtree is the attachment point ...
589 * Thus, there is never an empty device tree.
590 */
591 void
fc_create_device_tree(dev_info_t * ap,struct fc_device_tree ** head)592 fc_create_device_tree(dev_info_t *ap, struct fc_device_tree **head)
593 {
594 struct fc_device_tree *dp;
595
596 dp = kmem_zalloc(sizeof (struct fc_device_tree), KM_SLEEP);
597 dp->dip = ap;
598 *head = dp;
599 }
600
601 #ifdef notdef
602 static void
fc_remove_subtree(struct fc_device_tree * dp)603 fc_remove_subtree(struct fc_device_tree *dp)
604 {
605 struct fc_device_tree *np;
606
607 if (dp->child) {
608 fc_remove_subtree(dp->child);
609 dp->child = NULL;
610 }
611
612 /*
613 * Remove each peer node, working our way backwards from the
614 * last peer node to the first peer node.
615 */
616 if (dp->peer != NULL) {
617 for (np = dp->peer; np->peer; np = dp->peer) {
618 for (/* empty */; np->peer; np = np->peer)
619 /* empty */;
620 fc_remove_subtree(np->peer);
621 np->peer = NULL;
622 }
623 fc_remove_subtree(dp->peer)
624 dp->peer = NULL;
625 }
626
627 ASSERT((dp->child == NULL) && (dp->peer == NULL));
628 kmem_free(dp, sizeof (struct fc_device_tree));
629 }
630
631 void
fc_remove_device_tree(struct fc_device_tree ** head)632 fc_remove_device_tree(struct fc_device_tree **head)
633 {
634 ASSERT(head && (*head != NULL));
635
636 fc_remove_subtree(*head);
637 *head = NULL;
638 }
639 #endif /* notdef */
640
641 void
fc_remove_device_tree(struct fc_device_tree ** head)642 fc_remove_device_tree(struct fc_device_tree **head)
643 {
644 struct fc_device_tree *dp;
645
646 ASSERT(head && (*head != NULL));
647
648 dp = *head;
649
650 if (dp->child)
651 fc_remove_device_tree(&dp->child);
652
653 if (dp->peer)
654 fc_remove_device_tree(&dp->peer);
655
656 ASSERT((dp->child == NULL) && (dp->peer == NULL));
657
658 kmem_free(dp, sizeof (struct fc_device_tree));
659 *head = NULL;
660 }
661
662 struct fc_device_tree *
fc_find_node(dev_info_t * dip,struct fc_device_tree * hp)663 fc_find_node(dev_info_t *dip, struct fc_device_tree *hp)
664 {
665 struct fc_device_tree *p;
666
667 while (hp) {
668 if (hp->dip == dip)
669 return (hp);
670
671 if (hp->child)
672 if ((p = fc_find_node(dip, hp->child)) != NULL)
673 return (p);
674
675 hp = hp->peer;
676 }
677 return (NULL);
678 }
679
680 void
fc_add_child(dev_info_t * child,dev_info_t * parent,struct fc_device_tree * hp)681 fc_add_child(dev_info_t *child, dev_info_t *parent, struct fc_device_tree *hp)
682 {
683 struct fc_device_tree *p, *q;
684
685 q = kmem_zalloc(sizeof (struct fc_device_tree), KM_SLEEP);
686 q->dip = child;
687
688 mutex_enter(&fc_device_tree_lock);
689
690 #ifdef DEBUG
691 /* XXX: Revisit ASSERT vs PANIC */
692 p = fc_find_node(child, hp);
693 ASSERT(p == NULL);
694 #endif
695
696 p = fc_find_node(parent, hp);
697 ASSERT(p != NULL);
698
699 q->peer = p->child;
700 p->child = q;
701
702 mutex_exit(&fc_device_tree_lock);
703 }
704
705 void
fc_remove_child(dev_info_t * child,struct fc_device_tree * head)706 fc_remove_child(dev_info_t *child, struct fc_device_tree *head)
707 {
708 struct fc_device_tree *p, *c, *n;
709 dev_info_t *parent = ddi_get_parent(child);
710
711 mutex_enter(&fc_device_tree_lock);
712
713 p = fc_find_node(parent, head);
714 ASSERT(p != NULL);
715
716 /*
717 * Find the child within the parent's subtree ...
718 */
719 c = fc_find_node(child, p);
720 ASSERT(c != NULL);
721 ASSERT(c->child == NULL);
722
723 /*
724 * If it's the first child, remove it, otherwise
725 * remove it from the child's peer list.
726 */
727 if (p->child == c) {
728 p->child = c->peer;
729 } else {
730 int found = 0;
731 for (n = p->child; n->peer; n = n->peer) {
732 if (n->peer == c) {
733 n->peer = c->peer;
734 found = 1;
735 break;
736 }
737 }
738 if (!found)
739 cmn_err(CE_PANIC, "fc_remove_child: not found\n");
740 }
741 mutex_exit(&fc_device_tree_lock);
742
743 kmem_free(c, sizeof (struct fc_device_tree));
744 }
745
746 dev_info_t *
fc_child_node(dev_info_t * parent,struct fc_device_tree * hp)747 fc_child_node(dev_info_t *parent, struct fc_device_tree *hp)
748 {
749 struct fc_device_tree *p;
750 dev_info_t *dip = NULL;
751
752 mutex_enter(&fc_device_tree_lock);
753 p = fc_find_node(parent, hp);
754 if (p && p->child)
755 dip = p->child->dip;
756 mutex_exit(&fc_device_tree_lock);
757
758 return (dip);
759 }
760
761 dev_info_t *
fc_peer_node(dev_info_t * devi,struct fc_device_tree * hp)762 fc_peer_node(dev_info_t *devi, struct fc_device_tree *hp)
763 {
764 struct fc_device_tree *p;
765 dev_info_t *dip = NULL;
766
767 mutex_enter(&fc_device_tree_lock);
768 p = fc_find_node(devi, hp);
769 if (p && p->peer)
770 dip = p->peer->dip;
771 mutex_exit(&fc_device_tree_lock);
772
773 return (dip);
774 }
775