xref: /titanic_50/usr/src/uts/sun4/io/efcode/fc_subr.c (revision 554ff184129088135ad2643c1c9832174a17be88)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright (c) 2000-2001 by Sun Microsystems, Inc.
24  * All rights reserved.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * Kernel framework functions for the fcode interpreter
31  */
32 
33 #include <sys/types.h>
34 #include <sys/conf.h>
35 #include <sys/debug.h>
36 #include <sys/kmem.h>
37 #include <sys/ddi.h>
38 #include <sys/sunddi.h>
39 #include <sys/sunndi.h>
40 #include <sys/esunddi.h>
41 #include <sys/ksynch.h>
42 #include <sys/modctl.h>
43 #include <sys/errno.h>
44 #include <sys/fcode.h>
45 
46 #ifdef	DEBUG
47 int fcode_debug = 0;
48 #else
49 int fcode_debug = 0;
50 #endif
51 
52 static kmutex_t fc_request_lock;
53 static kmutex_t fc_resource_lock;
54 static kmutex_t fc_hash_lock;
55 static kmutex_t fc_device_tree_lock;
56 static kmutex_t fc_phandle_lock;
57 static kcondvar_t fc_request_cv;
58 static struct fc_request *fc_request_head;
59 static int fc_initialized;
60 
61 static void fcode_timer(void *);
62 
63 int fcode_timeout = 300;	/* seconds */
64 
65 int fcodem_unloadable;
66 
67 extern int hz;
68 
69 /*
70  * Initialize the fcode interpreter framework ... must be called
71  * prior to activating any of the fcode interpreter framework including
72  * the driver.
73  */
74 static void
75 fcode_init(void)
76 {
77 	if (fc_initialized)
78 		return;
79 
80 	mutex_init(&fc_request_lock, NULL, MUTEX_DRIVER, NULL);
81 	mutex_init(&fc_resource_lock, NULL, MUTEX_DRIVER, NULL);
82 	mutex_init(&fc_hash_lock, NULL, MUTEX_DRIVER, NULL);
83 	mutex_init(&fc_device_tree_lock, NULL, MUTEX_DRIVER, NULL);
84 	mutex_init(&fc_phandle_lock, NULL, MUTEX_DRIVER, NULL);
85 	cv_init(&fc_request_cv, NULL, CV_DRIVER, NULL);
86 	++fc_initialized;
87 }
88 
89 static void
90 fcode_fini(void)
91 {
92 	mutex_destroy(&fc_request_lock);
93 	mutex_destroy(&fc_resource_lock);
94 	mutex_destroy(&fc_hash_lock);
95 	cv_destroy(&fc_request_cv);
96 	fc_initialized = 0;
97 }
98 
99 /*
100  * Module linkage information for the kernel.
101  */
102 static struct modlmisc modlmisc = {
103 	&mod_miscops, "FCode framework 1.13"
104 };
105 
106 static struct modlinkage modlinkage = {
107 	MODREV_1, (void *)&modlmisc, NULL
108 };
109 
110 int
111 _init(void)
112 {
113 	int error;
114 
115 	fcode_init();
116 	if ((error = mod_install(&modlinkage)) != 0)
117 		fcode_fini();
118 	return (error);
119 }
120 
121 int
122 _fini(void)
123 {
124 	int error = EBUSY;
125 
126 	if (fcodem_unloadable)
127 		if ((error = mod_remove(&modlinkage)) == 0)
128 			fcode_fini();
129 
130 	return (error);
131 }
132 
133 int
134 _info(struct modinfo *modinfop)
135 {
136 	return (mod_info(&modlinkage, modinfop));
137 }
138 
139 /*
140  * Framework function to invoke the interpreter. Wait and return when the
141  * interpreter is done. See fcode.h for details.
142  */
143 int
144 fcode_interpreter(dev_info_t *ap, fc_ops_t *ops, fco_handle_t handle)
145 {
146 	struct fc_request *fp, *qp;
147 	int error;
148 
149 	ASSERT(fc_initialized);
150 	ASSERT(ap);
151 	ASSERT(ops);
152 	ASSERT(handle);
153 
154 	/*
155 	 * Create a request structure
156 	 */
157 	fp = kmem_zalloc(sizeof (struct fc_request), KM_SLEEP);
158 
159 	fp->next = NULL;
160 	fp->busy = FC_R_INIT;
161 	fp->error = FC_SUCCESS;
162 	fp->ap_dip = ap;
163 	fp->ap_ops = ops;
164 	fp->handle = handle;
165 
166 	/*
167 	 * Add the request to the end of the request list.
168 	 */
169 	mutex_enter(&fc_request_lock);
170 
171 	if (fc_request_head == NULL)
172 		fc_request_head = fp;
173 	else {
174 		for (qp = fc_request_head; qp->next != NULL; qp = qp->next)
175 			/* empty */;
176 		qp->next = fp;
177 	}
178 	mutex_exit(&fc_request_lock);
179 
180 	/*
181 	 * log a message (ie: i_ddi_log_event) indicating that a request
182 	 * has been queued to start the userland fcode interpreter.
183 	 * This call is the glue to the eventd and automates the process.
184 	 */
185 
186 	/*
187 	 * Signal the driver if it's waiting for a request to be queued.
188 	 */
189 	cv_broadcast(&fc_request_cv);
190 
191 	/*
192 	 * Wait for the request to be serviced
193 	 */
194 	mutex_enter(&fc_request_lock);
195 	fp->timeout = timeout(fcode_timer, fp, hz * fcode_timeout);
196 	while (fp->busy != FC_R_DONE)
197 		cv_wait(&fc_request_cv, &fc_request_lock);
198 
199 	if (fp->timeout) {
200 		(void) untimeout(fp->timeout);
201 		fp->timeout = NULL;
202 	}
203 
204 	/*
205 	 * Remove the request from the queue (while still holding the lock)
206 	 */
207 	if (fc_request_head == fp)
208 		fc_request_head = fp->next;
209 	else {
210 		for (qp = fc_request_head; qp->next != fp; qp = qp->next)
211 			/* empty */;
212 		qp->next = fp->next;
213 	}
214 	mutex_exit(&fc_request_lock);
215 
216 	FC_DEBUG1(2, CE_CONT, "fcode_interpreter: request finished, fp %p\n",
217 	    fp);
218 
219 	/*
220 	 * Free the request structure and return any errors.
221 	 */
222 	error = fp->error;
223 	kmem_free(fp, sizeof (struct fc_request));
224 	return (error);
225 }
226 
227 /*
228  * Timeout requests thet don't get picked up by the interpreter.  This
229  * would happen if the daemon is not running.  If the timer goes off
230  * and it's state is not FC_R_INIT, then the interpreter has picked up the
231  * request.
232  */
233 static void
234 fcode_timer(void *arg)
235 {
236 	struct fc_request *fp = arg;
237 
238 	mutex_enter(&fc_request_lock);
239 	fp->timeout = 0;
240 	if (fp->busy == FC_R_INIT) {
241 		cmn_err(CE_WARN, "fcode_timer: Timeout waiting for "
242 		    "interpreter - Interpreter did not pick up request\n");
243 		fp->busy = FC_R_DONE;
244 		fp->error = FC_TIMEOUT;
245 		mutex_exit(&fc_request_lock);
246 		cv_broadcast(&fc_request_cv);
247 		return;
248 	} else {
249 		cmn_err(CE_WARN, "fcode_timer: Timeout waiting for "
250 		    "interpreter - Interpreter is executing request\n");
251 	}
252 	mutex_exit(&fc_request_lock);
253 }
254 
255 /*
256  * This is the function the driver calls to wait for and get
257  * a request.  The call should be interruptable since it's done
258  * at read(2) time, so allow for signals to interrupt us.
259  *
260  * Return NULL if the wait was interrupted, else return a pointer
261  * to the fc_request structure (marked as busy).
262  *
263  * Note that we have to check for a request first, before waiting,
264  * in case the request is already queued. In this case, the signal
265  * may have already been delivered.
266  */
267 struct fc_request *
268 fc_get_request(void)
269 {
270 	struct fc_request *fp;
271 
272 	ASSERT(fc_initialized);
273 
274 	mutex_enter(&fc_request_lock);
275 
276 	/*CONSTANTCONDITION*/
277 	while (1) {
278 		for (fp = fc_request_head; fp != NULL; fp = fp->next) {
279 			if (fp->busy == FC_R_INIT) {
280 				fp->busy = FC_R_BUSY;
281 				mutex_exit(&fc_request_lock);
282 				return (fp);
283 			}
284 		}
285 		if (cv_wait_sig(&fc_request_cv, &fc_request_lock) == 0) {
286 			mutex_exit(&fc_request_lock);
287 			return (NULL);
288 		}
289 	}
290 	/*NOTREACHED*/
291 }
292 
293 /*
294  * This is the function the driver calls when it's finished with
295  * a request.  Mark the request as done and signal the thread that
296  * enqueued the request.
297  */
298 void
299 fc_finish_request(struct fc_request *fp)
300 {
301 	ASSERT(fc_initialized);
302 	ASSERT(fp);
303 	ASSERT(fp->busy == FC_R_BUSY);
304 
305 	mutex_enter(&fc_request_lock);
306 	fp->busy = FC_R_DONE;
307 	mutex_exit(&fc_request_lock);
308 
309 	cv_broadcast(&fc_request_cv);
310 }
311 
312 /*
313  * Generic resource list management subroutines
314  */
315 void
316 fc_add_resource(fco_handle_t rp, struct fc_resource *ip)
317 {
318 	ASSERT(rp);
319 	ASSERT(ip);
320 
321 	mutex_enter(&fc_resource_lock);
322 	ip->next = NULL;
323 	if (rp->head != NULL)
324 		ip->next = rp->head;
325 	rp->head = ip;
326 	mutex_exit(&fc_resource_lock);
327 }
328 
329 void
330 fc_rem_resource(fco_handle_t rp, struct fc_resource *ip)
331 {
332 	struct fc_resource *fp;
333 
334 	ASSERT(rp);
335 	ASSERT(ip);
336 
337 	if (rp->head == NULL)  {
338 		cmn_err(CE_CONT, "fc_rem_resource: NULL list head!\n");
339 		return;
340 	}
341 
342 	mutex_enter(&fc_resource_lock);
343 	if (rp->head == ip) {
344 		rp->head = ip->next;
345 		mutex_exit(&fc_resource_lock);
346 		return;
347 	}
348 
349 	for (fp = rp->head; fp && (fp->next != ip); fp = fp->next)
350 		/* empty */;
351 
352 	if (fp == NULL)  {
353 		mutex_exit(&fc_resource_lock);
354 		cmn_err(CE_CONT, "fc_rem_resource: Item not on list!\n");
355 		return;
356 	}
357 
358 	fp->next = ip->next;
359 	mutex_exit(&fc_resource_lock);
360 }
361 
362 /*ARGSUSED*/
363 void
364 fc_lock_resource_list(fco_handle_t rp)
365 {
366 	mutex_enter(&fc_resource_lock);
367 }
368 
369 /*ARGSUSED*/
370 void
371 fc_unlock_resource_list(fco_handle_t rp)
372 {
373 	mutex_exit(&fc_resource_lock);
374 }
375 
376 /*
377  * Common helper ops and subroutines
378  */
379 /*ARGSUSED*/
380 int
381 fc_syntax_error(fc_ci_t *cp, char *msg)
382 {
383 	cp->error = fc_int2cell(-1);
384 	cp->nresults = fc_int2cell(0);
385 	return (0);
386 }
387 
388 /*ARGSUSED*/
389 int
390 fc_priv_error(fc_ci_t *cp, char *msg)
391 {
392 	cp->priv_error = fc_int2cell(-1);
393 	cp->error = fc_int2cell(0);
394 	cp->nresults = fc_int2cell(0);
395 	return (0);
396 }
397 
398 /*ARGSUSED*/
399 int
400 fc_success_op(dev_info_t *ap, fco_handle_t handle, fc_ci_t *cp)
401 {
402 	cp->priv_error = cp->error = fc_int2cell(0);
403 	return (0);
404 }
405 
406 /*
407  * fc_fail_op: This 'handles' a request by specifically failing it,
408  * as opposed to not handling it and returning '-1' to indicate
409  * 'service unknown' and allowing somebody else in the chain to
410  * handle it.
411  */
412 /*ARGSUSED*/
413 int
414 fc_fail_op(dev_info_t *ap, fco_handle_t handle, fc_ci_t *cp)
415 {
416 	cmn_err(CE_CONT, "fcode ops: fail service name <%s>\n",
417 	    (char *)fc_cell2ptr(cp->svc_name));
418 
419 	cp->nresults = fc_int2cell(0);
420 	cp->error = fc_int2cell(-1);
421 	return (0);
422 }
423 
424 /*
425  * Functions to manage the set of handles we give to the interpreter.
426  * The handles are opaque and internally represent dev_info_t pointers.
427  */
428 struct fc_phandle_entry **
429 fc_handle_to_phandle_head(fco_handle_t rp)
430 {
431 	while (rp->next_handle)
432 		rp = rp->next_handle;
433 
434 	return (&rp->ptable);
435 }
436 
437 /*ARGSUSED*/
438 void
439 fc_phandle_table_alloc(struct fc_phandle_entry **head)
440 {
441 }
442 
443 void
444 fc_phandle_table_free(struct fc_phandle_entry **head)
445 {
446 	struct fc_phandle_entry *ip, *np;
447 
448 	/*
449 	 * Free each entry in the table.
450 	 */
451 	for (ip = *head; ip; ip = np) {
452 		np = ip->next;
453 		kmem_free(ip, sizeof (struct fc_phandle_entry));
454 	}
455 	*head = NULL;
456 }
457 
458 dev_info_t *
459 fc_phandle_to_dip(struct fc_phandle_entry **head, fc_phandle_t handle)
460 {
461 	struct fc_phandle_entry *ip;
462 
463 	mutex_enter(&fc_hash_lock);
464 
465 	for (ip = *head; ip; ip = ip->next)
466 		if (ip->h == handle)
467 			break;
468 
469 	mutex_exit(&fc_hash_lock);
470 
471 	return (ip ? ip->dip : NULL);
472 }
473 
474 fc_phandle_t
475 fc_dip_to_phandle(struct fc_phandle_entry **head, dev_info_t *dip)
476 {
477 	struct fc_phandle_entry *hp, *np;
478 	fc_phandle_t h;
479 
480 	ASSERT(dip);
481 	h = (fc_phandle_t)ddi_get_nodeid(dip);
482 
483 	/*
484 	 * Just in case, allocate a new entry ...
485 	 */
486 	np = kmem_zalloc(sizeof (struct fc_phandle_entry), KM_SLEEP);
487 
488 	mutex_enter(&fc_hash_lock);
489 
490 	/*
491 	 * If we already have this dip in the table, just return the handle
492 	 */
493 	for (hp = *head; hp; hp = hp->next) {
494 		if (hp->dip == dip) {
495 			mutex_exit(&fc_hash_lock);
496 			kmem_free(np, sizeof (struct fc_phandle_entry));
497 			return (h);
498 		}
499 	}
500 
501 	/*
502 	 * Insert this entry to the list of known entries
503 	 */
504 	np->next = *head;
505 	np->dip = dip;
506 	np->h = h;
507 	*head = np;
508 	mutex_exit(&fc_hash_lock);
509 	return (h);
510 }
511 
512 /*
513  * We won't need this function once the ddi is modified to handle
514  * unique non-prom nodeids.  For now, this allows us to add a given
515  * nodeid to the device tree without dereferencing the value in the
516  * devinfo node, so we have a parallel mechanism.
517  */
518 void
519 fc_add_dip_to_phandle(struct fc_phandle_entry **head, dev_info_t *dip,
520     fc_phandle_t h)
521 {
522 	struct fc_phandle_entry *hp, *np;
523 
524 	ASSERT(dip);
525 
526 	/*
527 	 * Just in case, allocate a new entry ...
528 	 */
529 	np = kmem_zalloc(sizeof (struct fc_phandle_entry), KM_SLEEP);
530 
531 	mutex_enter(&fc_hash_lock);
532 
533 	/*
534 	 * If we already have this dip in the table, just return the handle
535 	 */
536 	for (hp = *head; hp; hp = hp->next) {
537 		if (hp->dip == dip) {
538 			mutex_exit(&fc_hash_lock);
539 			kmem_free(np, sizeof (struct fc_phandle_entry));
540 			return;
541 		}
542 	}
543 
544 	/*
545 	 * Insert this entry to the list of known entries
546 	 */
547 	np->next = *head;
548 	np->dip = dip;
549 	np->h = h;
550 	*head = np;
551 	mutex_exit(&fc_hash_lock);
552 }
553 
554 /*
555  * Functions to manage our copy of our subtree.
556  *
557  * The head of the device tree is always stored in the last 'handle'
558  * in the handle chain.
559  */
560 struct fc_device_tree **
561 fc_handle_to_dtree_head(fco_handle_t rp)
562 {
563 	while (rp->next_handle)
564 		rp = rp->next_handle;
565 
566 	return (&rp->dtree);
567 }
568 
569 struct fc_device_tree *
570 fc_handle_to_dtree(fco_handle_t rp)
571 {
572 	struct fc_device_tree **head = fc_handle_to_dtree_head(rp);
573 
574 	return (*head);
575 }
576 
577 /*
578  * The root of the subtree is the attachment point ...
579  * Thus, there is never an empty device tree.
580  */
581 void
582 fc_create_device_tree(dev_info_t *ap, struct fc_device_tree **head)
583 {
584 	struct fc_device_tree *dp;
585 
586 	dp = kmem_zalloc(sizeof (struct fc_device_tree), KM_SLEEP);
587 	dp->dip = ap;
588 	*head = dp;
589 }
590 
591 #ifdef	notdef
592 static void
593 fc_remove_subtree(struct fc_device_tree *dp)
594 {
595 	struct fc_device_tree *np;
596 
597 	if (dp->child) {
598 		fc_remove_subtree(dp->child);
599 		dp->child = NULL;
600 	}
601 
602 	/*
603 	 * Remove each peer node, working our way backwards from the
604 	 * last peer node to the first peer node.
605 	 */
606 	if (dp->peer != NULL) {
607 		for (np = dp->peer; np->peer; np = dp->peer) {
608 			for (/* empty */; np->peer; np = np->peer)
609 				/* empty */;
610 			fc_remove_subtree(np->peer);
611 			np->peer = NULL;
612 		}
613 		fc_remove_subtree(dp->peer)
614 		dp->peer = NULL;
615 	}
616 
617 	ASSERT((dp->child == NULL) && (dp->peer == NULL));
618 	kmem_free(dp, sizeof (struct fc_device_tree));
619 }
620 
621 void
622 fc_remove_device_tree(struct fc_device_tree **head)
623 {
624 	ASSERT(head && (*head != NULL));
625 
626 	fc_remove_subtree(*head);
627 	*head = NULL;
628 }
629 #endif	/* notdef */
630 
631 void
632 fc_remove_device_tree(struct fc_device_tree **head)
633 {
634 	struct fc_device_tree *dp;
635 
636 	ASSERT(head && (*head != NULL));
637 
638 	dp = *head;
639 
640 	if (dp->child)
641 		fc_remove_device_tree(&dp->child);
642 
643 	if (dp->peer)
644 		fc_remove_device_tree(&dp->peer);
645 
646 	ASSERT((dp->child == NULL) && (dp->peer == NULL));
647 
648 	kmem_free(dp, sizeof (struct fc_device_tree));
649 	*head = NULL;
650 }
651 
652 struct fc_device_tree *
653 fc_find_node(dev_info_t *dip, struct fc_device_tree *hp)
654 {
655 	struct fc_device_tree *p;
656 
657 	while (hp) {
658 		if (hp->dip == dip)
659 			return (hp);
660 
661 		if (hp->child)
662 			if ((p = fc_find_node(dip, hp->child)) != NULL)
663 				return (p);
664 
665 		hp = hp->peer;
666 	}
667 	return (NULL);
668 }
669 
670 void
671 fc_add_child(dev_info_t *child, dev_info_t *parent, struct fc_device_tree *hp)
672 {
673 	struct fc_device_tree *p, *q;
674 
675 	q = kmem_zalloc(sizeof (struct fc_device_tree), KM_SLEEP);
676 	q->dip = child;
677 
678 	mutex_enter(&fc_device_tree_lock);
679 
680 #ifdef	DEBUG
681 	/* XXX: Revisit ASSERT vs PANIC */
682 	p = fc_find_node(child, hp);
683 	ASSERT(p == NULL);
684 #endif
685 
686 	p = fc_find_node(parent, hp);
687 	ASSERT(p != NULL);
688 
689 	q->peer = p->child;
690 	p->child = q;
691 
692 	mutex_exit(&fc_device_tree_lock);
693 }
694 
695 void
696 fc_remove_child(dev_info_t *child, struct fc_device_tree *head)
697 {
698 	struct fc_device_tree *p, *c, *n;
699 	dev_info_t *parent = ddi_get_parent(child);
700 
701 	mutex_enter(&fc_device_tree_lock);
702 
703 	p = fc_find_node(parent, head);
704 	ASSERT(p != NULL);
705 
706 	/*
707 	 * Find the child within the parent's subtree ...
708 	 */
709 	c = fc_find_node(child, p);
710 	ASSERT(c != NULL);
711 	ASSERT(c->child == NULL);
712 
713 	/*
714 	 * If it's the first child, remove it, otherwise
715 	 * remove it from the child's peer list.
716 	 */
717 	if (p->child == c) {
718 		p->child = c->peer;
719 	} else {
720 		int found = 0;
721 		for (n = p->child; n->peer; n = n->peer) {
722 			if (n->peer == c) {
723 				n->peer = c->peer;
724 				found = 1;
725 				break;
726 			}
727 		}
728 		if (!found)
729 			cmn_err(CE_PANIC, "fc_remove_child: not found\n");
730 	}
731 	mutex_exit(&fc_device_tree_lock);
732 
733 	kmem_free(c, sizeof (struct fc_device_tree));
734 }
735 
736 dev_info_t *
737 fc_child_node(dev_info_t *parent, struct fc_device_tree *hp)
738 {
739 	struct fc_device_tree *p;
740 	dev_info_t *dip = NULL;
741 
742 	mutex_enter(&fc_device_tree_lock);
743 	p = fc_find_node(parent, hp);
744 	if (p && p->child)
745 		dip = p->child->dip;
746 	mutex_exit(&fc_device_tree_lock);
747 
748 	return (dip);
749 }
750 
751 dev_info_t *
752 fc_peer_node(dev_info_t *devi, struct fc_device_tree *hp)
753 {
754 	struct fc_device_tree *p;
755 	dev_info_t *dip = NULL;
756 
757 	mutex_enter(&fc_device_tree_lock);
758 	p = fc_find_node(devi, hp);
759 	if (p && p->peer)
760 		dip = p->peer->dip;
761 	mutex_exit(&fc_device_tree_lock);
762 
763 	return (dip);
764 }
765